query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Add vertex to DCEL if it doesn't already exists, otherwise return the existing vertex. | def add_vertex(self, vertex):
try:
vertex_idx = self.vertices.index(vertex)
# print "{} already in {}".format(vertex, self.vertices)
return self.vertices[vertex_idx]
except Exception:
self.vertices.append(vertex)
# print "adding {} to {}".format(vertex, self.vertices)
return vertex | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_vertex(self, vertex):\n if vertex.id not in self.vertices.keys():\n self.vertices[vertex.id] = vertex",
"def _add_vertex(self, x, y):\n v = Vertex2(x, y)\n i = bisect(self.vertices, v)\n \n # if vertex at these coordinates exists just return it\n if len(self.vertices) > i and self.vertices[i] == v:\n return self.vertices[i]\n \n # otherwise add new vertex in sorted position and return it\n self.vertices.insert(i, v)\n return v",
"def add_vertex(self, v):\n v = {'x': v[0], 'y': v[1]}\n if v not in self:\n self.append(v)\n return len(self)-1\n return self.index(v)",
"def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []\n return vertex",
"def add_vertex(self, vertex):\n if self.contains(vertex):\n return None\n if self.is_weighted():\n self._graph[vertex] = dict()\n else:\n self._graph[vertex] = set()\n return True",
"def add_vertex(self, vertex_id): # O(1) time complexity\n self.vertices[vertex_id] = set() \n\n # additional options (class)\n '''\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = {}\n\n else:\n return \"Vertex is already in Graph\"\n '''",
"def add_vertex(self, key):\n self.vertCount += 1\n addedVertex = vertex.Vertex(key)\n self.vertList[key] = addedVertex\n return addedVertex",
"def add_vertex(self, vertex):\n raise NotImplementedError",
"def add_vertex(self, vertex_id):\n pass # TODO",
"def add_vertex(self, key):\n #increments the number of vertices\n #creates a new vertex\n #adds the new vertex to the vertex list\n #returns the new vertex\n if key != None:\n self.num_vertices += 1\n new_vertex = Vertex(key)\n self.vert_list[key] = new_vertex\n return new_vertex\n raise KeyError(\"There's no key here\")",
"def addVertex(self, key):\n if key not in self.vertList:\n self.numVertices += 1\n vtx = Vertex(key)\n self.verList[key] = vtx\n return vtx",
"def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []",
"def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []",
"def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices += 1\n self.graph[key] = vertex\n\n return vertex",
"def add_vertex(self, vertex):\r\n if vertex not in self.__graph_dict:\r\n self.__graph_dict[vertex] = {}",
"def add_vertex(self, key):\n if key in self.vertices:\n raise ValueError('Key is already in use')\n \n # Create vertex\n self.vertices[key] = GraphVertex(key=key)",
"def add_vertex(self, vertex):\n if vertex not in self.graph_dict:\n self.graph_dict[vertex] = []",
"def add_vertex(self, vertex):\r\n if self.is_vertex_in_graph(vertex):\r\n raise GraphException(\"The vertex already exists.\")\r\n self.__neighbours[vertex] = []",
"def add_vertex(self,vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n # logging.debug(\"vertex being initialized ..\", vertex)\n else:\n # logging.debug(\"vertex not added ..\", vertex)\n pass",
"def add_vertex(self, key):\n # increment the number of vertices\n self.num_vertices += 1\n # create a new vertex\n vertex = Vertex(key)\n # add the new vertex to the vertex dictionary with a list as the value\n # self.vert_dict[vertex] = []\n # add the new vertex to the vertex list\n self.vert_dict[key] = vertex\n # return the new vertex\n return vertex",
"def __add__(self, vertex):\n\n if isinstance(vertex, Vertex):\n vName = vertex.name\n self._vertices[vName] = vertex",
"def add_vertex(self, vertex_id):\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = set()",
"def add_vertex(self, vertex_id):\n if vertex_id not in self.vertices: self.vertices[vertex_id] = set()",
"def addVertex(self, v: Vertex):\n if v is not None:\n self._vertices.add(v)\n\n # Possibly need to recalculate genus/core/etc.\n self.invalidateCaches()",
"def add_vertex(self, key):\n vertex = Vertex(key)\n self.vertices[key] = vertex",
"def add_vertex(self, vertex_id):\n # creates a vertex with an empty list as their neighboring vertices\n self.vertices[vertex_id] = set()",
"def add_vertex(self, vertex_id):\n # just add new dict entry\n self.vertices[vertex_id] = set()\n\n pass # TODO",
"def add_vertex(self, v: str) -> None:\n if self.contains_vertex(v):\n return\n else:\n self.adj_list[v] = []",
"def add_vertex(self, vertex: Vertex) -> None:\n self._vertices.add(vertex)\n if not vertex.predicate:\n self._entities.add(vertex)",
"def add_vertex(self, label=None, properties=None, current_id=None):\n if current_id is None:\n done = False\n while not done:\n next_id = self.get_next_id()\n\n if next_id not in self.vertices:\n current_id = next_id\n done = True\n else:\n if current_id in self.vertices:\n raise Exception('Vertex with ID Already Exist')\n\n current_vertex = Vertex(self, current_id, label=label, properties=properties)\n self.vertices[current_vertex.id] = current_vertex\n return current_vertex"
] | [
"0.7698422",
"0.75747514",
"0.7562474",
"0.75239784",
"0.7504211",
"0.7403369",
"0.7356343",
"0.7279313",
"0.71772546",
"0.7124853",
"0.70904213",
"0.7066278",
"0.7066278",
"0.7043618",
"0.70346177",
"0.7016917",
"0.69948727",
"0.6946342",
"0.6899464",
"0.6882716",
"0.6859618",
"0.68538505",
"0.6830724",
"0.6830189",
"0.6787582",
"0.6782125",
"0.676277",
"0.6751066",
"0.6738139",
"0.6733741"
] | 0.8166046 | 0 |
Add a face to DCEL if it doesn't already exists, otherwise return the existing face. | def add_face(self, face):
try:
face_idx = self.faces.index(face)
return self.faces[face_idx]
except Exception:
self.faces.append(face)
return face | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_face(self, face):\n\n if face.uuid is None:\n face.uuid = self._generate_uuid()\n\n if face.uuid in self._faces:\n error_str = \"Trying to add an already existing face with uuid: \"\\\n + str(face.uuid)\n raise KeyError(error_str)\n\n self._faces[face.uuid] = Face.from_face(face)\n\n return face.uuid",
"def update_face(self, face):\n\n if face.uuid not in self._faces:\n error_str = \"Trying to update a non-existing face with uuid: \"\\\n + str(face.uuid)\n raise KeyError(error_str)\n\n if not isinstance(face, Face):\n error_str = \"Trying to update an object with the wrong type. \"\\\n + \"Face expected.\"\n raise TypeError(error_str)\n\n face_to_update = self._faces[face.uuid]\n\n face_to_update.data = face.data\n face_to_update.points = face.points",
"def addFace(self, vertices, bypassCheck=False):\n try:\n if bypassCheck:\n raise ValueError\n return self.getFace(vertices)\n except ValueError:\n if any(vertices.count(v) > 1 for v in vertices):\n raise ValueError('The face given is invalid: '\n 'two or more vertices are identical')\n newF = face(vertices)\n self.faces.append(newF)\n for i in range(len(vertices)):\n try:\n e = self.getEdge(vertices[i],\n vertices[(i + 1) % len(vertices)])\n except ValueError:\n e = self.addEdge(vertices[i],\n vertices[(i + 1) % len(vertices)])\n e.linkFace(newF)\n return newF",
"def _add_facet(self, ea, eb, ec):\n f = Facet2(ea, eb, ec)\n i = bisect(self.edges, f)\n if len(self.facets) > i and self.facets[i] == f:\n return self.facets[i]\n \n self.facets.insert(i, f)\n return f",
"def change_face(self, face):\n if self.face is not None:\n self.face.remove_point(self)\n\n self.face = face\n self.face.add_point(self)",
"def get_face(self, uuid):\n\n try:\n return Face.from_face(self._faces[uuid])\n except KeyError:\n error_str = \"Trying to get an non-existing face with uuid: {}\"\n raise ValueError(error_str.format(uuid))",
"def insertFace(bm, v):\n a = []\n for k in range(len(v)):\n a.append(bm.verts[v[k]])\n f = bm.faces.new(a)\n bm.faces.ensure_lookup_table()\n return f",
"def add_face(self, vertices: Iterable[\"Vertex\"]) -> None:\n self.faces.append(self.add_vertices(vertices))",
"def getFace(self, vertices):\n for f in self.faces:\n if f.vertices == vertices:\n return f\n raise ValueError('No face found')",
"def add(self, cell, overwrite_duplicate=False):\n if isinstance(cell, Cell):\n if (not overwrite_duplicate and cell.name in self.cell_dict and\n self.cell_dict[cell.name] is not cell):\n raise ValueError(\"[GDSPY] cell named {0} already present in \"\n \"library.\".format(cell.name))\n self.cell_dict[cell.name] = cell\n else:\n for c in cell:\n if (not overwrite_duplicate and c.name in self.cell_dict and\n self.cell_dict[c.name] is not c):\n raise ValueError(\"[GDSPY] cell named {0} already present \"\n \"in library.\".format(c.name))\n self.cell_dict[c.name] = c\n return self",
"def selectmeshface(self):#Not used yet\n go = Rhino.Input.Custom.GetObject()\n go.GeometryFilter=Rhino.DocObjects.ObjectType.MeshFace\n go.SetCommandPrompt(\"Get mesh Face\")\n go.Get()\n objref=go.Object(0)\n face_guid = objref.ObjectId\n go.Dispose()\n \n return face_guid",
"def addPhoto(fileName, personName):\n\n #Check if image is a jpg\n if (fileName[-4:] != \".jpg\"):\n print(\"\\n[!] File extenstion must be .jpg!\\n\")\n return\n\n #Check image exists\n if (not os.path.isfile(fileName)):\n print(\"\\n[!] File does not exist!\\n\")\n return\n\n #Check no illegal characters in file name\n for c in ILLEGAL_FILE_NAMES:\n if (c in personName):\n print(\"\\n[!] Provided name contains an illegal argument\\n\")\n return\n\n #Load image\n image = face_recognition.load_image_file(fileName)\n\n #Use the name in the filename as the identity key\n identity = os.path.splitext(os.path.basename(fileName))[0]\n\n #Get the face location\n locationsHog = hogDetectFaceLocations(image)\n\n locationsHaar = haarDetectFaceLocations(image)\n\n #Get the face encoding\n encodingsHaar = face_recognition.face_encodings(image, locationsHaar)\n encodingsHog = face_recognition.face_encodings(image, locationsHog)\n\n #check if exactly one face is in the photo\n if ((len(encodingsHaar) == 0) or (len(encodingsHog) == 0)):\n print(\"\\n[!] No face detected in the provided photo\\n\")\n return\n\n elif ((len(encodingsHaar) > 1) or (len(encodingsHog) > 1)):\n print(\"\\n[!] More than one face detected in the provided photo\\n\")\n return\n\n #Set path to respective dataset\n directoryToAddTo = DATABASE_PATH + personName\n\n #Look for directory\n exists = False\n for subdir, dirs, files in os.walk(DATABASE_PATH):\n if (subdir == directoryToAddTo):\n exists = True\n\n #If directory doesnt exist, make it\n if (not exists):\n os.mkdir(directoryToAddTo)\n\n #Save data to file\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Haar.txt\"),\n encodingsHaar[0])\n np.savetxt((directoryToAddTo + \"/\" + identity + \"Hog.txt\"),\n encodingsHog[0])\n\n print(\"\\n[*] Face successfully added!\\n\")",
"def add_new_known_face(new_file_name, known_face_encodings, known_face_names):\n face_encoding = read_face_encoding(new_file_name)\n known_face_encodings.append(face_encoding)\n\n known_face_names.append(new_file_name)\n\n return known_face_encodings, known_face_names",
"def setFace(self, value):\n self.face = value",
"def setFace(self, value):\n self.face = value",
"def face(self):\n\n return self.faceup",
"def log_in_database(name, face):\n #FIX: Curtain name array and face array to the first element of each respective array.\n if len(name) != 0:\n faces_present = 1\n name = name[0]\n face = face[0]\n\n #Check if the names and faces pickle exists.\n if (file_path/\"names_and_faces.pkl\").exists():\n\n #Load the pickled dictionary.\n with open(file_path/\"names_and_faces.pkl\", mode = \"rb\") as opened_file:\n names_and_faces = pickle.load(opened_file)\n\n #If the person's name is already in the dictionary, then append the descriptor to the end of the value as part of a list.\n if name in names_and_faces.keys():\n names_and_faces[name].append(face)\n\n #If the person's name is not in the dictionary, make a new dictionary entry.\n else:\n names_and_faces[name] = [face]\n #If there is no dictionary, make a new dictionary.\n else:\n names_and_faces = {}\n names_and_faces[name] = [face]\n\n #Save the dictionary.\n with open(file_path/\"names_and_faces.pkl\", mode = \"wb\") as opened_file:\n pickle.dump(names_and_faces, opened_file)\n else:\n faces_present = 0\n return faces_present",
"def markIntersectedWith(self, face):\n try:\n self.hasIntersected.add(face)\n except AttributeError:\n self.hasIntersected = set()\n self.hasIntersected.add(face)",
"def is_existing_face(image, trackers, face):\n\n x1, y1, w1, h1 = face\n face_mask = np.zeros_like(image)\n face_mask[y1:y1+h1, x1:x1+w1] = 1\n for t in trackers:\n try:\n x,y,w,h = t.bounding_box\n t_mask = np.zeros_like(image)\n t_mask[y:y+h, x:x+w] = 1\n\n union = np.sum(np.bitwise_or(face_mask, t_mask))\n intersection = np.bitwise_and(face_mask, t_mask)\n if float(np.sum(intersection))/union > 0.3 or float(np.sum(intersection))/np.sum(t_mask+1) > 0.7:\n return (t, True)\n except Exception:\n pass\n \n return (None, False)",
"def linkFace(self, f):\n if self.pFace is None:\n self.pFace = f\n elif self.nFace is None:\n self.nFace = f\n else:\n raise ValueError('Edge is already linked to two faces')",
"def add_vertex(self, vertex):\n try:\n vertex_idx = self.vertices.index(vertex)\n # print \"{} already in {}\".format(vertex, self.vertices)\n return self.vertices[vertex_idx]\n except Exception:\n self.vertices.append(vertex)\n # print \"adding {} to {}\".format(vertex, self.vertices)\n return vertex",
"def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever",
"def _merge_face(merge, image, face):\n scaled = merge.image.resize(face.as_dimension()).convert(\"RGBA\")\n image.image = image.image.convert(\"RGBA\")\n image.image.paste(scaled, face.as_box(), mask=scaled)\n return image",
"def draw_face_box(data):\n head = extract_head(data);\n face_box = data['position_data']['face_box'][data['i']];\n sefs = data['sefs'][data['i']];\n if face_box is not None:\n cv2.rectangle(head, *face_box, (0, 255, 0));\n else:\n cv2.rectangle(head, *sefs, (0, 0, 255));\n return head;",
"def test_face_in_face(self):\n w = mt.createCube(marker=1, boundaryMarker=1)\n b = w.boundary(2)\n\n pad = mt.createFacet(mt.createCircle(radius=0.2, segments=12,\n isHole=True))\n b2 = pad.boundary(0)\n\n # rotate to match target norm and pos\n rot = pg.core.getRotation(b2.norm(), b.norm())\n pad.transform(rot)\n pad.translate(b.center())\n\n # create a boundary with new marker match the hole\n w.copyBoundary(b2)\n\n w.createBoundary(w.nodes([w.createNode(n.pos()).id() for n in b2.nodes()]),\n marker=2)\n\n #print(w.boundaryMarkers())\n\n mesh = mt.createMesh(w)\n\n #pg.show(mesh)\n # w.exportPLC('pad.poly')\n # mesh.exportBoundaryVTU('b.vtu')\n np.testing.assert_array_equal(pg.unique(pg.sort(mesh.boundaryMarkers())),\n [0, 1, 2])\n\n # print(mesh)\n # mesh.exportBoundaryVTU('b.vtu')\n pg.show(mesh)",
"def paint_faces_data(frame, faces_data):\n for face in faces_data:\n (top, right, bottom, left) = face['location']\n\n if face['identity'] is None:\n name = 'Unknown'\n color = (0, 0, 255) # red\n else:\n name = face['identity']\n color = (0, 128, 0) # dark green\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)",
"def _load_known_face(self):\n faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')\n faces = [os.path.join(faces_dir, f) for f in os.listdir(faces_dir) if f.endswith('.jpeg')]\n known_images = [face_recognition.load_image_file(i) for i in faces]\n self.known_faces = []\n for image in known_images:\n encoding = face_recognition.face_encodings(image)\n if len(encoding) > 0:\n logging.debug('Adding known face')\n self.known_faces.append(encoding[0])",
"def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]",
"def face_with_verts(bm, verts, default=None):\n for face in bm.faces:\n if len(set(list(face.verts) + verts)) == len(verts):\n return face\n return default",
"def loginWithFace(self):\n #store, detector, recognizer, pca\n if self.store[\"face_added\"] == True:\n success = loginWithFace(self.store, self.detector, self.recognizer, self.pca)\n if success:\n self.clearScreen()\n from screen3 import Screen3\n Screen3(self.parent, self.store)\n else:\n self.temporaryLabel(self.frame2, \"Login failed. Try again\",\n x=220, y=420, fg=\"#F00\", second=1)\n else:\n self.temporaryLabel(self.frame2, \"You haven't added face recognition\",\n x=220, y=420, fg=\"#F00\", second=1)"
] | [
"0.7516604",
"0.62119764",
"0.61790365",
"0.5979341",
"0.5872748",
"0.5751965",
"0.5587595",
"0.539488",
"0.5394591",
"0.5375931",
"0.53083056",
"0.5234154",
"0.52226603",
"0.5124483",
"0.5124483",
"0.5102945",
"0.50553864",
"0.5042418",
"0.5035193",
"0.502191",
"0.5008115",
"0.4987875",
"0.49856907",
"0.49489287",
"0.4946597",
"0.49424168",
"0.49255627",
"0.49093345",
"0.48817888",
"0.48489112"
] | 0.8184843 | 0 |
Return a list of vertices that form the outer boundary of finite faces of the DCEL. | def get_outer_boundary_of_voronoi(self):
edge = [edge for edge in self.edges if not edge.nxt][0]
# next(obj for obj in objs if obj.val==5)
first_vertex = edge.origin
outer_boundary = []
while (not edge.get_destination() == first_vertex):
if(edge.get_destination().is_infinity()):
edge = edge.twin.nxt
else:
outer_boundary.append(edge)
edge = edge.nxt
outer_boundary.append(edge)
return outer_boundary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetInteriorEdgesQuad(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesQuad()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesQuad()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags",
"def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices",
"def get_outer_vertices(feature):\n return [\n point\n for part in Geometry.get_multipolygon(feature)\n for point in part[0][0:-1]\n ]",
"def bounded_edges(self):\n obj = self.Vrepresentation()\n edges = []\n for i in range(len(obj)):\n if not obj[i].is_vertex(): continue\n for j in range(i+1,len(obj)):\n if not obj[j].is_vertex(): continue\n if self.vertex_adjacency_matrix()[i,j] == 0: continue\n yield (obj[i], obj[j])",
"def _interiorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n interiorIDs = numerix.concatenate((numerix.ravel(XYids[ ..., 1:-1]),\n numerix.ravel(XZids[:, 1:-1,:]),\n numerix.ravel(YZids[1:-1, ...].swapaxes(0, 1))))\n\n from fipy.variables.faceVariable import FaceVariable\n interiorFaces = FaceVariable(mesh=self, value=False)\n interiorFaces[interiorIDs] = True\n return interiorFaces",
"def GetInteriorEdgesPent(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesPent()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesPent()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags",
"def faces_as_vertices(self) -> Iterable[List[Vec3]]:\n v = self.vertices\n for face in self.faces:\n yield [v[index] for index in face]",
"def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d",
"def GetInteriorEdgesTri(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesTri()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesTri()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags",
"def vertices(self):\n return self.pointlist",
"def get_bounded_faces(self):\n return [face for face in self.faces if face.is_bounded()]",
"def GetBoundaryEdgesHex(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"quad\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesQuad()",
"def GetBoundaryFacesHex(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.faces,np.ndarray):\n if self.faces.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.faces.shape[1] == 4 and p > 1:\n pass\n else:\n return\n\n node_arranger = NodeArrangementHex(p-1)[0]\n\n # CONCATENATE ALL THE FACES MADE FROM ELEMENTS\n all_faces = np.concatenate((np.concatenate((\n np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],\n self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),\n self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),\n self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES\n freqs_inv = itemfreq(inv)\n faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.faces = uniques[faces_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES\n all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)\n all_faces_in_faces = np.where(all_faces_in_faces==True)[0]\n\n # boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)\n boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]\n boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]\n self.faces = self.faces.astype(np.uint64)\n self.boundary_face_to_element = boundary_face_to_element",
"def vertices(self):\n return map(Vertex, self._top_exp.vertices())",
"def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()",
"def cw_face_edges(self,face):\n\n l0 = self.region_link[face]\n if face == self.left_region[l0]:\n l0 = (l0[1], l0[0])\n l = l0\n\n traversing = True\n edges = []\n while traversing:\n edges.append(l)\n r = self.right_region[l]\n if r == face:\n l = self.succ_right[l]\n else:\n l = self.succ_left[l]\n if l == l0:\n traversing = False\n return edges",
"def vertices(self) -> list[Point]:\n first_polygon_index = self.rank - max(self.pdim - 1, 1) - 1\n new_shape = self.shape[:first_polygon_index] + (-1, self.shape[-1])\n array = self.array.reshape(new_shape)\n return list(distinct(Point(x, copy=False) for x in np.moveaxis(array, -2, 0)))",
"def vertices(self):\n return list(self._graph)",
"def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]",
"def get_vertices(self) -> []:\n return [i for i in self.adj_list]",
"def calculateMeshInv(mesh_face_vertices):\n mesh_inv = []\n for mesh in mesh_face_vertices:\n U = np.array([\n [mesh[0, 0], mesh[1, 0], mesh[2, 0]],\n [mesh[0, 1], mesh[1, 1], mesh[2, 1]],\n [1, 1, 1],\n ])\n mesh_inv.append(np.linalg.inv(U))\n return np.array(mesh_inv)",
"def find_isolated_vertices(self):\n graph = self.__graph_dict\n isolated = []\n for vertex in graph:\n # print(isolated,vertex)\n if not graph[vertex]:\n isolated += [vertex]\n return isolated",
"def mesh_boundary(mesh):\n adja = edges_to_adjacency_matrix(mesh)\n r = sparse.extract.find(adja)\n li = r[0][np.where(r[2] == 1)]\n lj = r[1][np.where(r[2] == 1)]\n edges_boundary = np.vstack([li, lj]).T\n \"\"\"\n # alternative implementation based on edges and grouping from trimesh\n # instead of adjacency matrix\n from trimesh import grouping\n groups = grouping.group_rows(mesh.edges_sorted, require_count=1)\n # vertex_boundary = np.unique(open_mesh.edges_sorted[groups])\n edges_boundary = mesh.edges_sorted[groups]\n \"\"\"\n if li.size == 0:\n print('No holes in the surface !!!!')\n return np.array()\n else:\n return edges_to_boundary(edges_boundary)",
"def GetElementsWithBoundaryFacesHex(self):\n\n # DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES\n assert self.elements is not None\n assert self.faces is not None\n\n if self.boundary_face_to_element is not None:\n return self.boundary_face_to_element\n\n # THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK\n # IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME\n # EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY\n # HENCE THIS MAPPING BECOMES NECESSARY\n\n C = self.InferPolynomialDegree() - 1\n node_arranger = NodeArrangementHex(C)[0]\n\n all_faces = np.concatenate((np.concatenate((\n np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],\n self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),\n self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),\n self.elements[:,node_arranger[5,:]]),axis=0).astype(self.faces.dtype)\n\n all_faces_in_faces = in2d(all_faces,self.faces[:,:4],consider_sort=True)\n all_faces_in_faces = np.where(all_faces_in_faces==True)[0]\n\n boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)\n boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]\n boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]\n\n\n # SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER\n # NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND\n # FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.\n # WE NEED TO FIND THIS MAPPING NOW\n\n # WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS\n faces = self.elements[boundary_face_to_element[:,0][:,None],\n node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)\n\n # CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED\n assert np.sum(faces[:,:4].astype(np.int64) - self.faces[:,:4].astype(np.int64)) == 0\n\n # NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES\n from Florence.Tensor import shuffle_along_axis\n row_mapper = shuffle_along_axis(faces[:,:4],self.faces[:,:4],consider_sort=True)\n\n # UPDATE THE MAP\n boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]\n self.boundary_face_to_element = boundary_face_to_element\n\n return self.boundary_face_to_element",
"def vertices(self):\n return self._vertices",
"def get_faces(ulist, vlist):\n width = len(ulist)\n faces = []\n for i in range(len(ulist) - 1):\n for j in range(len(vlist) - 1):\n topleft = j * width + i\n topright = topleft + 1\n bottomleft = ((j + 1) * width) + i\n bottomright = bottomleft + 1\n one = [topleft, topright, bottomleft]\n two = [bottomleft, topright, bottomright]\n faces.append(one)\n faces.append(two)\n\n return faces",
"def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)",
"def GetInteriorFacesHex(self):\n\n if not isinstance(self.all_faces,np.ndarray):\n self.GetFacesHex()\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n face_flags = in2d(self.all_faces.astype(self.faces.dtype),self.faces,consider_sort=True)\n face_flags[face_flags==True] = 1\n face_flags[face_flags==False] = 0\n interior_faces = self.all_faces[face_flags==False,:]\n\n return interior_faces, face_flags",
"def get_faces(self):\n faces = []\n for j in range(0, self.height - 1):\n for i in range(0, self.width - 1):\n # add the two triangle faces\n tl = (j * self.width) + i\n tr = (j * self.width) + i + 1\n bl = ((j+1) * self.width) + i\n br = ((j+1) * self.width) + i + 1\n\n face = [bl, tr, tl]\n faces.append(face)\n face = [bl, br, tr]\n faces.append(face)\n return faces",
"def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices"
] | [
"0.6706478",
"0.6694034",
"0.6630309",
"0.6623699",
"0.66021216",
"0.65841985",
"0.6554914",
"0.64516014",
"0.6362444",
"0.63408965",
"0.6277447",
"0.62755173",
"0.61893046",
"0.6162313",
"0.6147987",
"0.6136316",
"0.612656",
"0.611977",
"0.61181563",
"0.610831",
"0.60700697",
"0.60514164",
"0.6050791",
"0.60379523",
"0.6012995",
"0.60123616",
"0.59998924",
"0.59840435",
"0.5983638",
"0.59547126"
] | 0.73313373 | 0 |
Return the dual of the current DCEL. | def dual(self):
def set_twins():
for edge_idx in range(0, len(dual_dcel.edges), 2):
dual_dcel.edges[edge_idx].twin = dual_dcel.edges[edge_idx + 1]
dual_dcel.edges[edge_idx + 1].twin = dual_dcel.edges[edge_idx]
def set_next_and_previous():
for face in dual_dcel.faces:
face_edges = [edge for edge in dual_dcel.edges if edge.incident_face == face]
for edge in face_edges:
if(not edge.get_destination().is_infinity()):
edge.nxt = [e for e in face_edges if e.origin == edge.get_destination()][0]
if(not edge.origin.is_infinity()):
edge.prev = [e for e in face_edges if edge.origin == e.get_destination()][0]
dual_dcel = DCEL()
for edge in self.edges:
incident_face = dual_dcel.add_face(Face(circumcentre=edge.twin.origin.as_points()))
origin = dual_dcel.add_vertex(Vertex(coordinates=edge.incident_face.circumcentre))
dual_edge = HalfEdge(
origin=origin,
incident_face=incident_face
)
incident_face.outer_component = dual_edge
origin.incident_edge = dual_edge
dual_dcel.edges.append(dual_edge)
set_twins()
set_next_and_previous()
return dual_dcel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dual(self):\n return dual_array(self)",
"def getdualobj(self,whichsol_):\n dualobj_ = ctypes.c_double()\n res = __library__.MSK_XX_getdualobj(self.__nativep,whichsol_,ctypes.byref(dualobj_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n dualobj_ = dualobj_.value\n _dualobj_return_value = dualobj_\n return (_dualobj_return_value)",
"def dual_objective(self, dual_coeffs):\n primal = self.model._sdca_primal_dual_relation(self.l_l2sq,\n dual_coeffs)\n prox_l2_value = 0.5 * self.l_l2sq * np.linalg.norm(primal) ** 2\n return self.model.dual_loss(dual_coeffs) - prox_l2_value",
"def dualGrid(self):\n return self._dual_grid( )",
"def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative",
"def double(self):\n return self._double",
"def gen_dual_func(self):\n if 0 in self.sig:\n # We are degenerate, use the right complement\n return self.right_complement_func\n else:\n Iinv = self.pseudoScalar.inv().value\n gmt_func = self.gmt_func\n @numba.njit\n def dual_func(Xval):\n return gmt_func(Xval, Iinv)\n return dual_func",
"def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))",
"def dual(self, I=None) -> 'MultiVector':\n if I is None:\n return self.layout.MultiVector(value=self.layout.dual_func(self.value))\n else:\n Iinv = I.inv()\n\n return self * Iinv",
"def getD(self):\r\n return self.D",
"def disagreement(self):\n return 0.5*(np.dot(np.dot(np.transpose(self.x),self.L),self.x)).item(0)",
"def derivative ( self ):\n return self.__derivative",
"def get_dual_val(self, var_name, pos):\n val = self.get_other_value(self.dual_var, var_name, pos)\n if not self.pdv_to_csv: # if not saved to csv file\n return val\n else: # otherwise, we should get the file path and read from the file to array or mat\n f_path = os.path.join(self.root_dir, 'dual_vars', var_name, str(val) + '.csv')\n df = pd.read_csv(f_path, header = None) # first read csv file into a pandas data frame and then transform\n return np.asmatrix(df.values)",
"def diffuse_coefficient(self):\n return self._diffuse_coefficient",
"def dual(self):\n letter = self.letter()\n # the self-dual cases\n if letter != 'BC' and letter[0] in ['B','C']:\n if letter == 'BB': letter = 'CC'\n elif letter == 'CC': letter = 'BB'\n elif letter[0] == 'B': letter = 'C' + letter[1:]\n elif letter[0] == 'C': letter = 'B' + letter[1:]\n rank = self._rank\n if self.is_affine():\n rank -= 1\n twist = self._twist\n return QuiverMutationType(letter,rank,twist)\n # the cases F and G have non-trivial duality in some cases\n elif letter in ['F','G']:\n if self.is_finite(): return self\n elif self.is_affine():\n rank = self._rank - 1\n twist = - self._twist\n elif self.is_elliptic():\n twist = self._twist\n rank = self._rank - 2\n if letter == 'F':\n if self._twist == [2,2]:\n twist == [1,1]\n if self._twist == [1,1]:\n twist == [2,2]\n if letter == 'G':\n if self._twist == [3,3]:\n twist = [1,1]\n elif self._twist == [1,1]:\n twist = [3,3]\n else: rank = self._rank\n return QuiverMutationType(letter,rank,twist)\n else:\n return self",
"def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5",
"def DOR(self):\n a, c, d, b = self.to_ccw()\n ad, bc = a * d, b * c\n return _div(ad, bc)",
"def getdualobj(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getdualobj(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _dualobj_return_value = resargs\n return _dualobj_return_value",
"def valuation(self):\n\t\tif self.__tete:\n\t\t\treturn self.__tete.plus_grand().get_coefficient()\n\t\telse:\n\t\t\treturn rationnel()",
"def getDouble(self, int: int, int2: int) -> float:\n ...",
"def D(self):\n if not hasattr(self, '_D'):\n self.logger.warning('The differential operator G.D is not '\n 'available, we need to compute it. Explicitly '\n 'call G.compute_differential_operator() '\n 'once beforehand to suppress the warning.')\n self.compute_differential_operator()\n return self._D",
"def dualy(self, arg, **kwargs):\n ax = self.alty(**kwargs)\n self._dualy_arg = arg\n self._dualy_overrides()\n return ax",
"def dual_k_Schur(self):\n return DualkSchurFunctions(self)",
"def ddalf(x):\n return dalf_spl.derivatives(x)[1]",
"def get_linear_dispersion(self):\n if \"DX\" not in self._results_df or \"DY\" not in self._results_df:\n self.calc_linear_dispersion()\n return self._results_df.loc[:, [\"S\", \"DX\", \"DY\"]]",
"def activate_der(self):\r\n\t\treturn self.value * (1 - self.value)",
"def _der(self, x):\n y, dydx = self._evalAndDer(x)\n return dydx # Sadly, this is the fastest / most convenient way...",
"def _der(self, x):\n y, dydx = self._evalAndDer(x)\n return dydx # Sadly, this is the fastest / most convenient way...",
"def get_bessel_derivative(self):\n return np.array([t.der_bessel for t in self._trc])",
"def getDensityLaw(self):\n return self.densityLaw"
] | [
"0.68338317",
"0.6423258",
"0.5941094",
"0.5921593",
"0.57853687",
"0.57633495",
"0.5762203",
"0.57469624",
"0.5682356",
"0.5682002",
"0.5639214",
"0.5613858",
"0.55889916",
"0.55681217",
"0.5535743",
"0.5530557",
"0.55077",
"0.54985017",
"0.5457592",
"0.54481125",
"0.5441906",
"0.54185843",
"0.54157156",
"0.5402935",
"0.5397527",
"0.5395019",
"0.53673464",
"0.53673464",
"0.5318552",
"0.5295986"
] | 0.6778644 | 1 |
Printfriendly representation of the DCEL object. | def __repr__(self):
return (
'<DCEL ('
'vertices:\n {obj.vertices},\n'
'edges:\n {obj.edges},\n'
'faces:\n {obj.faces}>'.format(obj=self)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __repr__(self):\n cls = self.__class__.__name__\n return '%s(%s)' % (cls, repr(self.d))",
"def __str__(self):\n result=\"curv %f d0 %f z0 %f ctheta %f phi %f barcode %d\"%(self.curv,self.d0,self.z0,self.ctheta,self.phi,self.barcode)\n return result",
"def printObj(self):\n return 'patient_id:{}, medication:{}, frequency:{}, start_dt:{},'\n 'end_dt:{}, noti_type:{}'.format(\n self.patients.data,\n self.medication.data,\n self.frequency.data,\n self.start_dt,\n self.end_dt.data,\n self.noti_type.data)",
"def output(self):\n to_write = 'C '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x'])+' '\n to_write += str(self.offset[1] + self.def_field['y'])+' '\n to_write += str(self.def_field['radius'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write",
"def __str__(self):\n from nodepy.utils import array2strings\n\n c = array2strings(self.c,printzeros=True)\n A = array2strings(self.A)\n b = array2strings(self.b,printzeros=True)\n lenmax, colmax = _get_column_widths([A,b,c])\n\n s=self.name+'\\n'+self.info+'\\n'\n for i in range(len(self)):\n s+=c[i].ljust(colmax+1)+'|'\n for j in range(len(self)):\n s+=A[i,j].ljust(colmax+1)\n s=s.rstrip()+'\\n'\n s+='_'*(colmax+1)+'|'+('_'*(colmax+1)*len(self))+'\\n'\n s+= ' '*(colmax+1)+'|'\n for j in range(len(self)):\n s+=b[j].ljust(colmax+1)\n return s.rstrip()",
"def __repr__(self):\n return self.pretty_print(self.__dict__)",
"def __repr__(self):\n values = ', '.join(f'{k}={v}' for k, v in self.variables.items())\n return f'D({values})'",
"def __str__(self):\n txt = \"%s:\\n\" % self.name\n txt += \" Charge: %.4f\\n\" % self.charge\n txt += \" Radius: %.4f\" % self.radius\n return txt",
"def __str__(self):\n txt = ''\n if self.PrintHeader:\n txt = \" |\" + \"|\".join(sorted(self.rows[0].keys())).expandtabs() + \"|\"\n txt += \"\\n\"\n txt += \"|-\"\n for r in self.rows:\n txt += \"\\n|\"\n txt += \"|\".join([str(uround(r[key] , 2) if isinstance(r[key], (int, long, float, complex , Variable,AffineScalarFunc )) else r[key]) for key in sorted(self.rows[0].keys())]) + \"|\"\n txt += \"\\n|-\"\n if self.PrintSum:\n txt += \"\\n\"\n sumRow = self.GetSumRow()\n txt += \"| |\" + \"|\".join( [str(uround(sumRow[key] , 2) if isinstance(sumRow[key], (int, long, float, complex , Variable ,AffineScalarFunc )) else sumRow[key]) for key in sorted(self.rows[0].keys())[1:]] ) + \"|\"\n\n return txt",
"def __repr__(self):\n\n return '%s(%r, %r, %s)' % (\n self, self.lod, self.cvs_path, self._format_entries(),\n )",
"def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string",
"def __str__(self):\n\n string = \"values:\\n\\t\"\n string += \" x \".join(map(str, self.shape))\n\n string += \" {} ({})\\n\".format(type(self.values).__name__, self.values.dtype)\n\n if self.print_values is True:\n string += str(self.values) + \"\\n\"\n\n string += \"dims:\\n\\t\"\n\n string += \"{}\\n\".format(self.dims)\n\n string += \"coords:\\n\\t\"\n string += \"\\n\\t\".join(map(repr, self.coords))\n\n string += \"\\n\"\n\n string += \"attrs:\\n\"\n\n for ix, key in enumerate(self.attrs.keys()):\n if ix == self.max_print_attrs:\n string += \"\\t+%i attrs\" % (len(self.attrs) - self.max_print_attrs)\n break\n string += \"\\t{!r}: {!r}\\n\".format(key, self.attrs[key])\n\n return string",
"def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )",
"def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info",
"def __str__(self):\n return f\"{self.__class__.__name__}:\\n{self._axl_data}\"",
"def __str__(self):\n s = \"--\\n\"\n for element in self:\n s += element.__str__() + \"\\n\"\n s += \"--\"\n \"\"\"\n # Uncomment if you want to see the internal structure\n s = \"\\n--\\n\"\n for i in xrange(self.size):\n s += \"%d [%s, %s]\\n\" % ( i, self.slot[i], self.data[i] )\n s += \"--\"\n \"\"\"\n return s",
"def __str__(self):\n return self.printable()",
"def __str__(self):\n\n outstr = 'gear wheel data:\\n'\n # output gear data\n for date in self.data:\n outstr += date.ljust(10) + ':\\t' + str(self.data.get(date)) + '\\n'\n\n # output modification data\n if self.modifications:\n outstr += '\\nflank modifications:\\n'\n for date in self.modifications:\n outstr += date.ljust(10) + ':\\t' + str(self.modifications.get(date)) + '\\n'\n\n # output tooth form coordinates\n if self.formcoords:\n # upper and lower index of point-array\n outstr += '\\ntooth form coordinates:\\n'\n for coord in self.formcoords:\n outstr += str(coord[0]) + '\\t' + str(coord[1]) + '\\n'\n\n return outstr",
"def __repr__(self):\n s = self.print_bfs()\n return s",
"def __repr__(self):\n str(self)",
"def __repr__(self):\n return f\"{self.number} {self.name}: {self.desc}\"",
"def __str__(self):\r\n\r\n retval = self.__class__.__name__ + ' ('\r\n for val in self.VALUES:\r\n value = getattr(self, val, None)\r\n if value is not None:\r\n retval += '%s:%.4f ' % (val, getattr(self, val))\r\n return retval.strip() + ')'",
"def __repr__(self):\r\n\t\treturn str(self)",
"def __repr__(self):\n return ''.join(f'\\ncompany: {self.company_name}\\nsize: {self.company_size}\\ncompany_founded: '\n f'{self.company_founded}\\ncompany_industry: {self.company_industry}\\ncompany_sector: '\n f'{self.company_sector}\\ncompany_type: {self.company_type}\\ncompany_rating: '\n f'{self.company_rating}\\ncompany_competitors: {self.company_competitors}\\ncompany_revenue: '\n f'{self.company_revenue}\\ncompany_headquarters: {self.company_headquarters}')",
"def __repr__ (self):\n\t\tStr = \"\"\n\t\tfor i in self.structref:\n\t\t\tStr = Str + \"%-15s = \"%(i[self.NAME])\n\t\t\tvalue = self.value [i[self.NAME]]\n\t\t\tif isInteger(value):\n\t\t\t\tStr = Str + \"%d, 0x%X\"%(value,value)\n\t\t\t\tif value >= 0x20 and value <= 0xFF:\n\t\t\t\t\tStr = Str + \" '\" + chr (value) + \"'\"\n\t\t\telse:\n\t\t\t\tif type(value) == type(bytes(0)):\n\t\t\t\t\tStr = Str + value.decode(\"utf8\",\"ignore\")\n\t\t\t\telse:\n\t\t\t\t\tStr = Str + str(value) \n\t\t\t\t\t\n\t\t\tStr = Str + \"\\n\"\n\t\treturn Str",
"def __repr__(self):\r\n return self.to_str()",
"def __repr__(self):\r\n return self.to_str()",
"def __repr__(self):\n\t\treturn repr( (self.name, self.position, self.cost, self.vorp) )",
"def __str__(self):\n return repr(self)",
"def __repr__(self):\r\n return str(self)"
] | [
"0.71572053",
"0.70599174",
"0.70522857",
"0.69488704",
"0.69239604",
"0.6836956",
"0.6819858",
"0.6812137",
"0.6802802",
"0.6789431",
"0.6776764",
"0.6766797",
"0.6753627",
"0.6751448",
"0.6745186",
"0.67189395",
"0.6699513",
"0.6692117",
"0.6681709",
"0.6676318",
"0.6663971",
"0.6663641",
"0.6663305",
"0.6659223",
"0.66523874",
"0.6641484",
"0.6641484",
"0.6636226",
"0.6630794",
"0.66239804"
] | 0.74030674 | 0 |
Store the camera intrinsics. We need this for the calibration matrices from the Tango | def new_camera_intrinsics_callback(self, new_camera_info):
self.camera_intrinsics = new_camera_info
self.k_mat = np.matrix(
np.array(self.camera_intrinsics.K).reshape((3, 3))
)
self.k_inv = self.k_mat.I | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_intrinsics(self, save_dir):\n if not osp.isfile(\n osp.join(save_dir, 'intrinsics', 'intrinsics.npy')):\n np.save(osp.join(\n save_dir, 'intrinsics', 'intrinsics'), self.camera_model.K)",
"def load_extrinsics(self):\n return self.um.read_json(\"extrinsics.json\")",
"def load_calib(self):\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the calibration file\n calib_filepath = os.path.join(self.sequence_path, 'calib.txt')\n filedata = utils.read_calib_file(calib_filepath)\n\n # Create 3x4 projection matrices\n P_rect_00 = np.reshape(filedata['P0'], (3, 4))\n P_rect_10 = np.reshape(filedata['P1'], (3, 4))\n P_rect_20 = np.reshape(filedata['P2'], (3, 4))\n P_rect_30 = np.reshape(filedata['P3'], (3, 4))\n\n # Compute the rectified extrinsics from cam0 to camN\n T1 = np.eye(4)\n T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]\n T2 = np.eye(4)\n T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]\n T3 = np.eye(4)\n T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]\n\n # Compute the velodyne to rectified camera coordinate transforms\n data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))\n data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])\n data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])\n data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])\n data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])\n\n # Compute the camera intrinsics\n data['K_cam0'] = P_rect_00[0:3, 0:3]\n data['K_cam1'] = P_rect_10[0:3, 0:3]\n data['K_cam2'] = P_rect_20[0:3, 0:3]\n data['K_cam3'] = P_rect_30[0:3, 0:3]\n\n # Compute the stereo baselines in meters by projecting the origin of\n # each camera frame into the velodyne frame and computing the distances\n # between them\n p_cam = np.array([0, 0, 0, 1])\n p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)\n p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)\n p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)\n p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)\n\n data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline\n data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline\n\n self.calib = namedtuple('CalibData', data.keys())(*data.values())",
"def intrinsics(self) -> 'Intrinsics':\n return self._intrinsics",
"def intrinsics_json(json_path):\n with open(json_path) as json_file:\n # Camera Intrinsic Matrix\n k_mat = np.eye(4, dtype=np.float32) #Idk why size 4? (To match translation?)\n json_data = json.load(json_file)\n k_mat[0, 0] = json_data[\"intrinsic\"][\"fx\"]\n k_mat[1, 1] = json_data[\"intrinsic\"][\"fy\"]\n k_mat[0, 2] = json_data[\"intrinsic\"][\"u0\"]\n k_mat[1, 2] = json_data[\"intrinsic\"][\"v0\"]\n\n # Transformation Mat between cameras\n stereo_t = np.eye(4, dtype=np.float32)\n stereo_t[0, 3] = json_data[\"extrinsic\"][\"baseline\"]\n\n return {\"K\":k_mat, \"inv_K\":np.linalg.pinv(k_mat), \"baseline_T\":stereo_t}",
"def save(self, filename):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() != INTR_EXTENSION:\n raise ValueError('Extension %s not supported for OrhtographicIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))\n\n camera_intr_dict = copy.deepcopy(self.__dict__)\n f = open(filename, 'w')\n json.dump(camera_intr_dict, f)\n f.close()",
"def saveCameraIntrinsics(cameraList, imageTopics, resultFile):\n cameraModelNames = {acvb.DistortedPinhole: 'pinhole',\n acvb.EquidistantPinhole: 'pinhole',\n acvb.FovPinhole: 'pinhole',\n acvb.Omni: 'omni',\n acvb.DistortedOmni: 'omni',\n acvb.ExtendedUnified: 'eucm',\n acvb.DoubleSphere: 'ds'}\n distortionModels = {acvb.DistortedPinhole: 'radtan',\n acvb.EquidistantPinhole: 'equidistant',\n acvb.FovPinhole: 'fov',\n acvb.Omni: 'none',\n acvb.DistortedOmni: 'radtan',\n acvb.ExtendedUnified: 'none',\n acvb.DoubleSphere: 'none'}\n\n chain = cr.CameraChainParameters(resultFile, createYaml=True)\n for cam_id, cam in enumerate(cameraList):\n cameraModel = cameraModelNames[cam.model]\n distortionModel = distortionModels[cam.model]\n\n # create new config file\n camParams = cr.CameraParameters(resultFile, createYaml=True)\n camParams.setRosTopic(imageTopics[cam_id])\n\n # set the data\n P = cam.geometry.projection()\n if cameraModel == 'omni':\n camParams.setIntrinsics(cameraModel, [P.xi(), P.fu(), P.fv(), P.cu(), P.cv()])\n elif cameraModel == 'pinhole':\n camParams.setIntrinsics(cameraModel, [P.fu(), P.fv(), P.cu(), P.cv()])\n elif cameraModel == 'eucm':\n camParams.setIntrinsics(cameraModel, [P.alpha(), P.beta(), P.fu(), P.fv(), P.cu(), P.cv()])\n elif cameraModel == 'ds':\n camParams.setIntrinsics(cameraModel, [P.xi(), P.alpha(), P.fu(), P.fv(), P.cu(), P.cv()])\n else:\n raise RuntimeError(\"Invalid camera model {}.\".format(cameraModel))\n camParams.setResolution([P.ru(), P.rv()])\n dist_coeffs = P.distortion().getParameters().flatten(1)\n camParams.setDistortion(distortionModel, dist_coeffs)\n\n chain.addCameraAtEnd(camParams)\n\n chain.writeYaml()",
"def estimate_extrinsics(dataset):\n # extrinsics are matrices M of shape (3,4) for every datapoint --> M = [R,t] where R=rotation matrix and t = translation vector\n camera_extrinsics_univ = np.zeros(\n (dataset.datadict[\"keypoints_3d_univ\"].shape[0], 3, 4), dtype=np.float\n )\n camera_extrinsics = np.zeros(\n (dataset.datadict[\"keypoints_3d\"].shape[0], 3, 4), dtype=np.float\n )\n\n for i, vid in enumerate(\n tqdm(\n np.unique(dataset.datadict[\"v_ids\"]),\n desc=\"Estimate extrinsics per video\",\n )\n ):\n ids = dataset.datadict[\"v_ids\"] == vid\n kps3d_c = dataset.datadict[\"keypoints_3d\"][ids]\n kps3d_c_univ = dataset.datadict[\"keypoints_3d_univ\"][ids]\n kps3d_w = dataset.datadict[\"keypoints_3d_world\"][ids]\n kps3d_c = np.reshape(kps3d_c, (-1, 3))\n kps3d_c_univ = np.reshape(kps3d_c_univ, (-1, 3))\n kps3d_w = np.reshape(kps3d_w, (-1, 3))\n\n _, M, _ = cv2.estimateAffine3D(\n kps3d_w, kps3d_c, ransacThreshold=10, confidence=0.999\n )\n _, M_univ, _ = cv2.estimateAffine3D(\n kps3d_w, kps3d_c_univ, ransacThreshold=10, confidence=0.999\n )\n\n # returned values correspond to [R,t]^T\n camera_extrinsics[ids] = M\n camera_extrinsics_univ[ids] = M_univ\n\n return camera_extrinsics_univ, camera_extrinsics",
"def get_intrinsics(self):\n if self._K is None:\n K = self.original_intrinsics.clone()\n if self.crop is not None:\n K[:2,2] -= torch.tensor(self.crop[:,0], device=K.device, dtype=K.dtype)\n K[:2] *= self.reup_sample / self.down_sample\n self._K = K\n return self._K",
"def save_mem_load(self):\n if len(self.get_data_shape())==4 and self._img:\n data = np.zeros(self.get_data_shape())\n self._data = np.rot90(data)\n self._loaded_time_list = [0]\n self._data[..., 0] = np.rot90(self._img.dataobj[..., 0])\n else:\n self._loaded_time_list = [0]\n data = self._img.get_data(caching='unchanged')\n self._data = np.rot90(data)",
"def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass",
"def get_calibration_info():\n mjpeg_info_dict = redis_tools.get_dict(db,'mjpeg_info_dict')\n calibration_info = mct_introspection.get_homography_calibration_info()\n for camera in mjpeg_info_dict:\n if not camera in calibration_info:\n calibration_info[camera] = {'modified': ''}\n return calibration_info",
"def load_camera_data(file_name):\n assert os.path.isfile(file_name), \"Invalid file {}\".format(file_name)\n import sintel_io\n\n intrinsic, extrinsic = sintel_io.cam_read(file_name)\n return intrinsic, extrinsic",
"def cam_calibration():\n # read all calibration images in a folder with similar names\n images = glob.glob('./camera_cal/calibration*.jpg')\n\n # calibrate camera and read object-points (3D), image points (2D) and image shape\n objpoints, imgpoints, img_shape = calibrate_camera(images)\n print(\"DONE: Camera calibration\")\n # save calibration parameters' pickle file\n save_calib_params(objpoints, imgpoints, img_shape)\n print(\"Calibration parameters pickle file saved \")",
"def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()",
"def load(filename):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() != INTR_EXTENSION:\n raise ValueError('Extension %s not supported for CameraIntrinsics. Must be stored with extension %s' %(file_ext, INTR_EXTENSION))\n\n f = open(filename, 'r')\n ci = json.load(f)\n f.close()\n return OrthographicIntrinsics(frame=ci['_frame'],\n vol_height=ci['_vol_height'],\n vol_width=ci['_vol_width'],\n vol_depth=ci['_vol_depth'],\n plane_height=ci['_plane_height'],\n plane_width=ci['_plane_width'],\n depth_scale=ci['_depth_scale'])",
"def persistent_image_features(images, toStoreFile):\n image_features = extract_features(images)\n\n np.save(toStoreFile, image_features)",
"def extract_calibration(self):\n #TODO add function to check if the folder exists because opencv points to other error rather than saying it doesnt exist\n cv_file = cv2.FileStorage(\"calib_images/calibration.yaml\", cv2.FILE_STORAGE_READ)\n camera_matrix = cv_file.getNode(\"camera_matrix\").mat()\n dist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n print(\"[INFO]: Extracted camera parameters.\")\n cv_file.release()\n return camera_matrix, dist_matrix",
"def copy(self):\n return CameraExtrinsic(self.position, self.direction, self.up)",
"def estimate_pose(self, corners, intrinsics: CameraIntrinsics):\n raise NotImplementedError()",
"def writeCameraSettings(self):\n pass",
"def loadCameraCalibration(self):\n\n # Read calibration.csv\n with open(\"util/calibration.csv\", 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar=\"|\")\n tmp = []\n intrinsic_matrix = []\n distort_coef = []\n i = 0\n for row in csvreader:\n for col in row:\n try:\n tmp.append(float(col))\n except:\n print(\"ERROR in calibration.csv intrinsic matrix\")\n if(i!=3):\n intrinsic_matrix.append(tmp)\n i += 1\n tmp = []\n if(i==3):\n distort_coef = tmp\n tmp = []\n \n return intrinsic_matrix, distort_coef",
"def data(self):\n image = transform.warp(self._raw_image_data, self._corrective_transform)\n image_data = transform.rotate(image, self._rotation_offset)\n return image_data",
"def intrinsic_matrix_from_camera(w, h, fov):\n (cx, cy), f = calc_focal_values(w, h, fov)\n return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])",
"def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)",
"def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img",
"def create_xyz_basic(self):\n if not self.__check_array__():\n return\n\n if self.resize:\n # Assign to new variable as we may want to revert back to using self.data_array with different resizing\n self.data_array_resize = cv2.resize(self.data_array, tuple(self.resize_dims), interpolation=cv2.INTER_CUBIC)\n\n # If we resize te image we need to update the dimensions\n self.num_scans = self.data_array_resize.shape[0]\n self._num_pts = self.data_array_resize.shape[1]\n print(self.num_scans, self._num_pts)\n\n # Create empty matrix to hold all fo data\n self.xyz_array = np.zeros([self.num_scans, self._num_pts, self._len_z])\n\n # assign temperature, distance and angle data to arrays\n self.xyz_array[:, :, :3] = self.data_array_resize\n\n # Iterate through each scan and assign it an arbitrary x coordinate (1st scan is 0, 2nd is 1 etc)\n for x in range(self._num_pts):\n self.xyz_array[:, x, self.x_idx] = x\n\n # Iterate through each scan angle and give an arbitrary y coordinate\n for y in range(self.num_scans):\n # Reverse indices so that we start with bottom of array\n # > np index starts top left as 0,0 but we want to set 0,0 as bottom left so that y increase up the rows\n idx = self.num_scans - (y + 1)\n self.xyz_array[idx, :, self.y_idx] = y\n\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('XYZ array created successfully!!!')\n else:\n print('XYZ array created successfully!!!')\n\n self.flatten_array()",
"def store_image(self):\n cv2.imwrite(self.__diff_filename(), self.__diff_image())",
"def camera(self):\n self.spectrum = self.spectrum",
"def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)"
] | [
"0.6322945",
"0.597689",
"0.58319306",
"0.5828599",
"0.57685983",
"0.5751193",
"0.57245207",
"0.5624981",
"0.55312103",
"0.550769",
"0.55075365",
"0.5502988",
"0.54702735",
"0.54578793",
"0.53877527",
"0.5327595",
"0.53023946",
"0.5278144",
"0.5220536",
"0.5179092",
"0.516386",
"0.5151776",
"0.5142039",
"0.5104245",
"0.5093749",
"0.5082651",
"0.5080736",
"0.5079747",
"0.5076021",
"0.50640017"
] | 0.6199243 | 1 |
Add padding for unet of given depth | def _pad(x, depth=4):
divisor = np.power(2, depth)
remainder = x.shape[0] % divisor
# no padding because already of even shape
if remainder == 0:
return x
# add zero rows after 1D feature
elif len(x.shape) == 2:
return np.pad(x, [(0, divisor - remainder), (0, 0)], "constant")
# add zero columns and rows after 2D feature
elif len(x.shape) == 3:
return np.pad(x, [(0, divisor - remainder), (0, divisor - remainder),
(0, 0)], "constant") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def space_to_depth_fixed_padding(inputs, kernel_size,\n data_format='channels_last', block_size=2):\n pad_total = kernel_size - 1\n pad_beg = (pad_total // 2 + 1) // block_size\n pad_end = (pad_total // 2) // block_size\n return _padding(inputs, (pad_beg, pad_end), data_format)",
"def padding_depth(self):\n\t\treturn self.paddings_shape_param('D')",
"def zero_pad_features(features, depth):\n\n n = int(features.get_shape().dims[-1])\n extra_feature_count = depth - n\n assert n >= 0\n if n > 0:\n padding = tf.tile(features[:, :, :, :1] * 0,\n [1, 1, 1, extra_feature_count])\n features = tf.concat([features, padding], 3)\n return features",
"def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2",
"def temporal_padding(x, padding=(1, 1)):\n assert len(padding) == 2\n pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]\n return tf.pad(x, pattern)",
"def pad_upper(self, data, options, padding):\n # data, options = nrrd.read(input_file_name)\n rows, columns, depths = data.shape\n\n # numpy.fill\n for i in range(padding):\n padding_layer = [[self.AIR] * columns for j in range(rows)]\n data = self.concatenate_layers(data, padding_layer)\n\n options['sizes'][2] += padding # update depths\n return (data, options)",
"def pad(x, system_shape, pad_size):\n res = unpad(tf.tile(x, (1,)+(3,)*len(pad_size)),\n tuple(s-p for s, p in zip(system_shape, pad_size)))\n return res",
"def _zero_pad(self, kernel, size):\n if len(size) != kernel.ndim:\n size = kernel.shape[:1] + tuple(size) + kernel.shape[-1:]\n padsize = np.array(size) - np.array(kernel.shape)\n paddown = padsize // 2\n padup = padsize - paddown\n padarray = np.concatenate((padup[..., None],\n paddown[..., None]), axis=1)\n pads = tuple([tuple(p) for p in padarray])\n kernel_pad = np.pad(kernel, pads, 'constant', constant_values=0)\n return kernel_pad",
"def pad(x, padding, fill_value=0):\n input_shape = x.shape\n output_shape = []\n indices = []\n\n for dim, pad in enumerate(padding):\n try:\n left_pad, right_pad = pad\n except TypeError:\n left_pad = right_pad = pad\n output_shape.append(left_pad + input_shape[dim] + right_pad)\n indices.append(slice(left_pad, left_pad + input_shape[dim]))\n\n if fill_value:\n out = T.ones(output_shape) * fill_value\n else:\n out = T.zeros(output_shape)\n return T.set_subtensor(out[tuple(indices)], x)",
"def _prepare_onnx_paddings__tensorrt(g, input, pad):\n ctx = FUNCTION_REWRITER.get_context()\n torch_version = version_parse(torch.__version__)\n if torch_version.major == 1 and torch_version.minor < 10:\n return ctx.origin_func(g, input, pad)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with\n # zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(\n g, pad, g.op('Constant', value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op('Size', g.op('Shape', input))\n else:\n rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\n 'Sub',\n g.op('Mul', rank,\n g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),\n pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end,\n # dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n paddings = g.op(\n 'Concat',\n pad,\n g.op(\n 'ConstantOfShape',\n extension,\n value_t=torch.tensor([0], dtype=torch.int64)),\n axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,\n # ..., 0, dim_n - 1_end, dim_n_end]\n\n # replace original Constant-Transpose-Constant with Slices and Concat.\n paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])\n begins = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])\n ends = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])\n paddings = g.op('Concat', begins, ends, axis_i=0)\n padding_c = g.op(\n 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n return padding_c",
"def convert_padding(g, op, block):\n\n input_x = g.get_node(op.input(\"X\")[0])\n input_padding = op.input(\"Paddings\")\n if input_padding:\n padding = g.get_node(input_padding[0])\n padding = infer_value(padding, g.get_params()).numpy().tolist()\n else:\n padding = op.attr(\"paddings\")\n padding = op.attr(\"paddings\")\n value = op.attr(\"value\")\n data_format = op.attr(\"data_format\")\n mode = op.attr(\"mode\")\n assert mode != \"circular\", \"Don't support mod='circular' for PaddlePaddle's padding\"\n if mode == \"replicate\":\n mode = \"edge\"\n\n pad_len = len(padding)\n new_paddings = [0] * (pad_len + 4)\n for i in range(0, pad_len, 2):\n index = -1 - i\n if data_format[:2] != \"NC\":\n index = -3 - i\n new_paddings[index] = padding[i + 1]\n new_paddings[index - 1] = padding[i]\n\n new_paddings = [new_paddings[i : i + 2] for i in range(0, len(new_paddings), 2)]\n\n out = _op.nn.pad(input_x, new_paddings, pad_value=value, pad_mode=mode)\n g.add_node(op.output(\"Out\")[0], out)",
"def pad(input, pad_size):\n if not pad_size:\n return input\n return tf.pad(input, [[0,0],[pad_size, pad_size],[pad_size, pad_size],[0,0]], 'REFLECT')",
"def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)",
"def fixed_padding_2d3d(self, inputs, kernel_size):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if self.data_format == 'channels_first':\n if len(inputs.shape)==4:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n elif len(inputs.shape)==5:\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n if len(inputs.shape)==4:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n elif len(inputs.shape)==5:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n return padded_inputs",
"def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs",
"def _build(layer, height):\n if len(layer) == 1:\n return layer\n odd = None\n if len(layer) % 2:\n # promote to higher level\n odd = layer.pop(-1)\n # layer.append(layer[-1])\n new_layer = []\n for idx in range(0, len(layer), 2):\n node = Node(layer[idx].val + layer[idx + 1].val)\n node.h = height + 1\n node.l, node.r = layer[idx], layer[idx + 1]\n layer[idx].p, layer[idx + 1].p = node, node\n new_layer.append(node)\n if odd:\n odd.h += 1\n new_layer.append(odd)\n return new_layer",
"def _pad1d(self, x: torch.Tensor, padding_left: int, padding_right: int, mode: str = \"zero\", value: float = 0.0):\n length = x.shape[-1]\n if mode == \"reflect\":\n max_pad = max(padding_left, padding_right)\n if length <= max_pad:\n x = F.pad(x, (0, max_pad - length + 1))\n return F.pad(x, (padding_left, padding_right), mode, value)",
"def pad_graph(graph_dict, n_graphs_post_padding, n_nodes_post_padding, n_edges_post_padding):\n node_graph_idx = np.zeros(n_nodes_post_padding)\n node_graph_idx[:len(graph_dict['node_graph_idx'])] = graph_dict['node_graph_idx']\n graph_dict['node_graph_idx'] = node_graph_idx\n\n node_features = np.concatenate(graph_dict['node_features'])\n padded_node_features = np.zeros([n_nodes_post_padding, node_features.shape[1]],\n dtype=node_features.dtype)\n padded_node_features[:len(node_features), :] = node_features\n graph_dict['node_features'] = padded_node_features\n\n edge_graph_idx = np.zeros(n_edges_post_padding)\n edge_graph_idx[:len(graph_dict['edge_graph_idx'])] = graph_dict['edge_graph_idx']\n graph_dict['edge_graph_idx'] = edge_graph_idx\n\n edge_features = np.concatenate(graph_dict['edge_features'])\n padded_edge_features = np.zeros([n_edges_post_padding, edge_features.shape[1]],\n dtype=edge_features.dtype)\n padded_edge_features[:len(edge_features), :] = edge_features\n graph_dict['edge_features'] = padded_edge_features\n\n edge_idx_padding = np.zeros(shape=[2, n_edges_post_padding - len(edge_features)], dtype=np.int32)\n # transpose so shape is [n_edge, 2]\n graph_dict['edge_idx'] = np.concatenate(graph_dict['edge_idx'] + [edge_idx_padding], axis=1).T\n\n labels_array = -np.ones([n_graphs_post_padding], dtype=np.int32)\n labels_array[:len(graph_dict['labels'])] = graph_dict['labels']\n graph_dict['labels'] = labels_array\n return graph_dict",
"def pad( padNumber, ant, subarray=DEFAULT):\n multiSubarray('pad', subarray, padNumber, ant)",
"def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img",
"def pad_edges(self, pad):\n weights=[]\n for dim, xy in zip([0, 1], [self.x, self.y]):\n xy0 = np.mean(xy)\n W = xy[-1]-xy[0]\n dist = np.abs(xy-xy0)\n wt=np.ones_like(dist)\n wt[ dist >= W/2 - pad] = 0\n weights += [wt]\n self.weight *= weights[0][:,None].dot(weights[1][None,:])",
"def _padding(inputs, paddings, data_format):\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], paddings, paddings])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], paddings, paddings, [0, 0]])\n return padded_inputs",
"def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0],\n [0, 0],\n list(padding[0]),\n list(padding[1])]\n else:\n pattern = [[0, 0],\n list(padding[0]), list(padding[1]),\n [0, 0]]\n return tf.pad(x, pattern, \"REFLECT\")",
"def padding(old, l):\n new = deepcopy(old)\n for i, j in enumerate(new):\n new[i] += [0] * (l - len(j))\n new[i] = j[:l]\n return new",
"def add_padding(img, x_padding):\n w = img.shape[1] + x_padding * 2\n img_with_padding = np.zeros((img.shape[0], w, 3), dtype=img.dtype)\n img_with_padding[:, x_padding:img.shape[1] + x_padding] = img\n return img_with_padding",
"def get_paddings(self):\n return tf.constant([[0, 0,],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [0, 0]])",
"def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)),\n data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0],\n [0, 0],\n list(padding[0]),\n list(padding[1])]\n else:\n pattern = [[0, 0],\n list(padding[0]), list(padding[1]),\n [0, 0]]\n return tf.pad(x, pattern, \"REFLECT\")",
"def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs",
"def proper_padding(self, prediction, k_space_slice):\n h = prediction.shape[-3]\n w = prediction.shape[-2]\n w_pad = (k_space_slice.shape[-2] - w) // 2\n h_pad = (k_space_slice.shape[-3]-h) // 2\n return torch.nn.functional.pad(prediction, (0,0,w_pad,w_pad,h_pad,h_pad), \"constant\", 0)",
"def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img"
] | [
"0.66942734",
"0.6406283",
"0.61821586",
"0.6131536",
"0.5901226",
"0.5837195",
"0.57383364",
"0.56973714",
"0.56866306",
"0.56743133",
"0.56252444",
"0.551921",
"0.55042475",
"0.5423706",
"0.5403531",
"0.54034203",
"0.5395218",
"0.53895396",
"0.538164",
"0.5379684",
"0.5374347",
"0.53632003",
"0.5352448",
"0.53519785",
"0.53474075",
"0.5346369",
"0.5335501",
"0.53249127",
"0.5321084",
"0.53206474"
] | 0.69494796 | 0 |
returns a normalized url to path relative from root | def relative_url(path, root):
try:
url = os.path.relpath(path, root)
except:
error('Unable to make a relative url:', url, root)
url = url.replace('\\', '/') if os.sep == '\\' else url
return urllib.parse.quote(url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def full_url(self, path):\n if path[0] == '/':\n path = path[1:]\n return urljoin(self.absolute_root, path)",
"def getRootURL():",
"def relative_base(base):\n return as_base(base).lstrip('/')",
"def _graceful_relative_url(base_url, url):\n if url == base_url:\n return ''\n base_prefix = '%s://%s' % urlparse.urlparse(base_url or '')[0:2]\n url_prefix = '%s://%s' % urlparse.urlparse(url or '')[0:2]\n if base_prefix == url_prefix and url_prefix != '://':\n return url[len(url_prefix):]\n return url",
"def get_base_url(self):\n return urlparse.urljoin(self.domain, self.root_path)",
"def full_url(self):\r\n\r\n url = '/' + '/'.join(p.slug for p in list(self.get_ancestors()) + [self] if p.slug)\r\n\r\n # Make sure the URL ends with a slash, as god intended.\r\n # This little endswith dance is done to handle the root url ('/') correctly.\r\n if not url.endswith('/'):\r\n url = url + '/'\r\n\r\n return url",
"def _absurl(fragment):\r\n root = settings.MEDIA_URL\r\n root += root[-1:] != '/' and '/' or ''\r\n return urlparse.urljoin(root, fragment)",
"def normalize_cdmi_url(self, path):\n # Turn URL path into OS path for manipulation\n mypath = url2pathname(path)\n if not os.path.isabs(mypath):\n mypath = os.path.join(url2pathname(self.pwd()), mypath)\n # normalize path\n mypath = os.path.normpath(mypath)\n if path.endswith(\"/\") and not mypath.endswith(\"/\"):\n mypath += \"/\"\n url = self.cdmi_url + pathname2url(mypath)\n return url",
"def relative_uri(base, to):\n if to.startswith(SEP):\n return to\n b2 = base.split(SEP)\n t2 = to.split(SEP)\n # remove common segments (except the last segment)\n for x, y in zip(b2[:-1], t2[:-1]):\n if x != y:\n break\n b2.pop(0)\n t2.pop(0)\n if b2 == t2:\n # Special case: relative_uri('f/index.html','f/index.html')\n # returns '', not 'index.html'\n return ''\n if len(b2) == 1 and t2 == ['']:\n # Special case: relative_uri('f/index.html','f/') should\n # return './', not ''\n return '.' + SEP\n return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)",
"def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])",
"def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])",
"def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path",
"def urlpath(self, url):\n\t\t# remove schema + hostname\n\t\turl = re.sub('^[^:]*://[^/]+', '/', url)\n\n\t\treturn self.canonicalize(url)",
"def get_relative_url(current, target):\n rel = os.path.relpath(target, current)\n\n if rel[-1] != \"/\":\n if \".\" not in rel.split(\"/\")[-1]:\n rel += \"/\"\n\n if not rel.startswith(\"../\") and rel != \"./\":\n rel = f\"./{rel}\"\n\n return rel",
"def buildpath(self):\n basepath = urlutil.href_settings.root + (self.relpath if self.relpath else cherrypy.request.path_info)\n if basepath.find('~') < 0:\n basepath += ('' if basepath.endswith('/') else '/') + '~'\n if cherrypy.request.query_string:\n basepath += ('&' if basepath.find('?') >= 0 else '?') + cherrypy.request.query_string\n return basepath",
"def get_path(self, normalize = False):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n path = split[0]\r\n if not normalize: return path\r\n if not path.startswith((\"http://\", \"https://\")): return path\r\n return netius.legacy.urlparse(path).path",
"def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)",
"def clean_url(app_server, base_path) -> str:\n if app_server.endswith('/'):\n base_url = f\"{app_server[:-1]}{base_path}\"\n else:\n base_url = f\"{app_server}/{base_path}\"\n return base_url",
"def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url",
"def absolute(self):\n if self.relative == '':\n return self.root # don't join in this case as that appends trailing '/'\n return os.path.join(self.root, self.relative)",
"def get_short_url_base():",
"def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"",
"def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))",
"def normalize_url(url: str) -> str:\n parts = urlparse(url)\n\n path = quote(parts.path)\n while '//' in path:\n path = path.replace(\"//\", \"/\")\n\n return urlunparse(parts._replace(path=path))",
"def _fullpath(self, path):\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result",
"def url_abs(name, *args):\n\tprotocol = settings.PROTOCOL\n\tdomain = settings.DOMAIN\n\turl = reverse(name, args=args)\n\tabs_path = '{}://{}{}'.format(protocol, domain, url)\n\t\n\treturn abs_path",
"def fix_apiroot(root):\n if '://' in root:\n return root\n if ('/' not in root) or ('.' not in root.split('/')[0]):\n root = \"www.pennapps.com/\" + root\n return \"http://%s\" % root",
"def build_absolute_url(self, path_or_url):\n return urllib.parse.urljoin(self.parsed_url.geturl(), path_or_url)",
"def resolve_url(url, redirects):\n s = url.find(':')\n if s < 0:\n return url\n scheme, rest = url[:s], url[s+1:]\n if scheme in redirects:\n root = redirects[scheme]\n elif scheme in REPO_ROOTS:\n root = REPO_ROOTS[scheme]\n else:\n return url\n root = root.rstrip('/')\n rest = rest.lstrip('/')\n return '/'.join([root, rest])",
"def base_uri(relative_path=''):\n base_path = get_app_root()\n if not os.path.exists(base_path):\n raise ValueError('Path %s does not exist' % base_path)\n\n return 'file://%s' % os.path.join(base_path, relative_path)"
] | [
"0.75811696",
"0.7120959",
"0.7007499",
"0.6957676",
"0.6937742",
"0.6913981",
"0.6850913",
"0.67784756",
"0.6737724",
"0.67350227",
"0.67350227",
"0.6698321",
"0.6689216",
"0.6677582",
"0.6671769",
"0.6658958",
"0.66039646",
"0.6588936",
"0.65848976",
"0.65747464",
"0.65624976",
"0.65560746",
"0.6540153",
"0.6529589",
"0.6506639",
"0.64965904",
"0.64825475",
"0.6440993",
"0.64391786",
"0.6414008"
] | 0.7165835 | 1 |
Generate Post objects from markdown. Date must be present in each post and posts must be ordrered by date. | def parse_markdown(filename):
if not os.path.exists(filename):
error('File not found', filename)
posts = list()
with open(filename, encoding='utf-8') as f:
line = next(f)
if line.startswith('# '):
title = line[2:].strip()
record = []
next(f)
else:
title = None
record = [line]
for line in f:
if not line.startswith('___'):
record.append(line)
else:
posts.append(Post.from_markdown(record))
record = []
# set rank of posts in date
daterank = defaultdict(int)
for post in posts:
daterank[post.date] += 1
post.daterank = daterank[post.date]
# check post order
for post1, post2 in zip(posts[:-1], posts[1:]):
if post1.date > post2.date:
error('Posts are not ordered', f'{post1.date} > {post2.date}')
return title, posts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def markdown_post(post):\n post['entry'] = markdown(post['entry'].replace(\"\\n\",\" \\n\"), output=\"html5\")\n return post",
"def parse_post_text(formatted_content):\n post = {}\n # Parse Mod comments and remove them from the text.\n potential_comments = re.finditer(\"\\[.+?\\]\", formatted_content, re.DOTALL)\n comments = []\n for comment_match in potential_comments:\n comment = comment_match.group()\n mod = re.search(r\"\\-\\s?Mod\\.\\s?(?P<mod>\\w+\\b)\", comment)\n if mod:\n comments.append({\n \"comment\" : comment,\n \"mod\" : mod.group(\"mod\")\n })\n post[\"modComments\"] = comments\n \n # Comments are removed from the post test so that\n # links, reports, etc. mentioned by mods are not extracted.\n no_comment_txt = formatted_content\n for comment in comments:\n no_comment_txt = no_comment_txt.replace(comment[\"comment\"], \"\")\n \n metadata, header_end = parse_post_metadata(no_comment_txt)\n post.update(metadata)\n \n sections = re.split(r\"^[\\*#]{3,}\\s*$\", no_comment_txt[header_end:], flags=re.M)\n articles = []\n \n # Some posts have articles which are parsed into multiple sections:\n # Ex: http://www.promedmail.org/direct.php?id=2194235\n # The section parsing code tries to recombine these by concatenating\n # unrecognized sections onto the previous sections if they form an article.\n # article_start_idx keeps track of the first section in the article.\n article_start_idx = None\n \n for idx, section in enumerate(sections):\n section = section.strip()\n article = parse_article_text(section, post_date=post['promedDate'])\n # Check if the section contains an actual article by seeing which\n # properties could be parsed.\n if article.get('source') or article.get('date'):\n articles.append(article)\n article_start_idx = idx\n else:\n # When a section cannot be parsed as an article the following code\n # tries to determine what it is. If the type cannot be determined\n # an error or warning is thrown.\n # These warnings can be used to find sections which are not being\n # correctly parsed.\n # Posts with known issues:\n # http://www.promedmail.org/direct.php?id=19990512.0773\n if re.search(r\"Visit ProMED-mail\\'s web site at|\"\n r\"Please support (the \\d{4}\\s)?ProMED\\-mail|\"\n r\"Donate to ProMED\\-mail. Details available at|\"\n r\"ProMED\\-mail makes every effort to verify the reports|\"\n r\"PROMED\\-MAIL FREQUENTLY ASKED QUESTIONS|\"\n r\"Become a ProMED\\-mail Premium Subscriber|\"\n r\"A ProMED\\-mail post\",\n section, re.I):\n # boilerplate promed notice section\n pass\n elif re.search(r\"In this (update|post(ing)?)\", section):\n # table of contents section\n pass\n elif re.search(r\"Cases in various countries\", section):\n # This type of post typically has links to several articles\n # with single sentence summaries.\n # Ex: http://www.promedmail.org/direct.php?id=20131125.2073661\n pass\n elif section == \"\":\n # empty section\n pass\n elif idx == 0 and section.count(\"\\n\") < 2:\n # probably the article title\n pass\n else:\n if article_start_idx != None:\n article = parse_article_text(\n \"\\n#####\\n\".join(\n sections[article_start_idx:idx]).strip(),\n post_date=post['promedDate'])\n assert article.get('source') or article.get('date')\n articles[-1] = article\n continue\n else:\n print \"Unexpected Section (%s):\" % post['archiveNumber'], [section[0:50] + \"...\"]\n article_start_idx = None\n post['articles'] = articles\n return post",
"def rebuild_from_yaml(args):\n\n git_checkout_branch('gh-pages')\n\n posts = []\n for fname in glob('_posts/*.html'):\n with codecs.open(fname, 'r', 'utf-8') as f:\n c = f.read()\n # we only want the yaml frontmatter\n start = c.index('---') + 3\n end = c.rindex('---')\n frontmatter = yaml.safe_load(c[start:end])\n\n posts.append(Post(**frontmatter['api_data']['post']))\n\n _write_out(posts, yaml=False, supporting=True)",
"def parse_article_text(article_text, post_date=datetime.datetime.now()):\n result = {}\n\n metadata_start = 0\n main_content_start = 0\n main_content_end = len(article_text)\n \n article_date_match = re.search(r\"^Date:\\s(?P<date>[^\\(\\[\\n]+)\", article_text, re.M)\n if article_date_match:\n # There may be more than one source date in summary articles.\n # Example: http://promedmail.org/direct.php?id=1073176\n # Summary articles are not a focus so currently only the first date\n # is recorded.\n source_date = parse_datetime(\n article_date_match.group(\"date\")\n )\n\n if source_date:\n result[\"date\"] = datetime_to_utc(source_date)\n metadata_start = min(article_date_match.start(), metadata_start)\n main_content_start = max(article_date_match.end(), main_content_start)\n # The year is checked to avoid typos like 200_ that throw\n # the date off by a large factor.\n # Example: http://www.promedmail.org/direct.php?id=45850 (article 2)\n if result[\"date\"].year < 1900:\n result[\"date\"] = None\n # Some articles have timestamps that are incorrectly parsed.\n # Current examples:\n # http://www.promedmail.org/direct.php?id=43918\n # http://www.promedmail.org/direct.php?id=2200173\n # Some of these incorrect timestamps can be removed by verifying that\n # they preceed the time of the posting. A day of slop time is allowed\n # to account for variations due to incorrect timezones.\n elif result[\"date\"] > post_date + datetime.timedelta(1):\n result[\"date\"] = None\n else:\n result[\"date\"] = None\n \n source_match = re.search(r\"Source:\\s(?P<name>[^\\[\\n]+)\" +\\\n r\"(\\s(?P<edits>\\[.*))?\" +\\\n r\"\\n\" +\\\n r\"(?P<url>http.+)?\", article_text)\n if source_match:\n result[\"source\"] = source_match.groupdict()\n metadata_start = min(source_match.start(), metadata_start)\n main_content_start = max(source_match.end(), main_content_start)\n \n heading_match = re.search(r\"^(?P<idx>\\[\\d\\]\\s)?\" +\\\n r\"(?P<heading>\\S+.*)\\n\",\n article_text[0:metadata_start], re.M)\n if heading_match:\n result[\"heading\"] = heading_match.group(\"heading\")\n \n communicated_match = re.search(communicated_by_regex, article_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n main_content_end = min(communicated_match.start(), main_content_end)\n \n result[\"content\"] = article_text[main_content_start:main_content_end].strip()\n return result",
"def get_contents(\n self, post_ids: List[str], datetime_filter_fn: Optional[Callable[[datetime], bool]] = None\n ) -> List[str]:\n contents = []\n url = f\"http://blog.naver.com/PostView.nhn\"\n params = {\"blogId\": self.naver_id}\n for post_id in post_ids:\n params[\"logNo\"] = post_id\n\n # Get contents of a post\n response = self.session.get(url, params=params)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Smart editor 3\n text = soup.select_one(f\"#post-view{post_id} > div > div > div.se-main-container\")\n # Smart editor 2\n if not text:\n text = soup.select_one(\n f\"#post-view{post_id} > div > div > div.se_component_wrap.sect_dsc.__se_component_area\"\n )\n\n if not text:\n text = soup.select_one(f\"#post-view{post_id}\")\n if text:\n text = text.get_text(\"\\n\").replace(\"\\xa0\", \" \") # Space unicode replace\n else:\n print(f\"[Error] cannot select content in {post_id}.\", file=sys.stderr)\n continue\n\n text = re.sub(\"\\s+\", \" \", text).strip()\n if datetime_filter_fn is None:\n contents.append(text)\n continue\n\n date_time = soup.select(\n f\"#post-view{post_id} > div > div > div > div > div > div.blog2_container > span.se_publishDate.pcol2\"\n )\n date_time += soup.select(\"#printPost1 > tr > td.bcc > table > tr > td > p.date.fil5\")\n\n if date_time:\n date_time = date_time[0].get_text()\n post_datetime = datetime.strptime(date_time, \"%Y. %m. %d. %H:%M\")\n if not datetime_filter_fn(post_datetime):\n continue\n else:\n print(f\"[Error] cannot select datetime in {post_id}, this post is not filtered\")\n\n contents.append(text)\n\n print(f\"Get contents: {len(contents)} found.\")\n return contents",
"def parse_post_metadata(post_text):\n result = {}\n \n header_end = 0\n \n promed_date_match = re.search(\n r\"Published Date:\\s(?P<date>.*)\", post_text)\n result[\"promedDate\"] = parse_promed_pub_datetime(\n promed_date_match.group(\"date\"))\n \n archive_match = re.search(r\"Archive Number: (?P<num>.*)\", post_text)\n result[\"archiveNumber\"] = archive_match.group(\"num\")\n header_end = archive_match.end()\n \n subject = re.search(r\"Subject:\\s(?P<subject>.*)\", post_text).group(\"subject\")\n result[\"subject\"] = parse_subject_line(subject)\n result[\"subject\"][\"raw\"] = subject\n \n # This will not find all linked reports.\n # Some older posts refrence posts using different indexes I do not know\n # how to interpret.\n # Example: http://promedmail.org/direct.php?id=2194235\n result[\"linkedReports\"] = [\n report_id for report_id in re.findall(r\"\\d{8}\\.\\d+\", post_text)]\n \n # Most links will be article source urls or links to promed.\n result[\"links\"] = list(set(\n re.findall(r\"http\\S+[^(\\.\\])(\\.\\)>\\s]\", post_text)))\n result[\"links\"].sort()\n \n communicated_match = re.search(communicated_by_regex, post_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n return result, header_end",
"def _parse_markdown(self):\n renderer = MyRenderer()\n md = mistune.Markdown(renderer=renderer)\n md.render(self._markdown_text)\n self._bash_commands = renderer._bash_commands",
"def markdown(text, *args, **kwargs):\n md = StMarkdown(*args, **kwargs)\n return md.convert(text)",
"def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)",
"def markdown(self, text):\n\n try:\n html = markdown.markdown(text)\n data = self.convert_content(html)\n return data\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.send('markdown.error')\n Utils.error(e.args[0])",
"def _populate_posts(self, channel, url):\n import feedparser\n\n Post = get_model('articles', 'Post')\n Image = get_model('images', 'Image')\n\n parser = feedparser.parse(url)\n\n for entry in parser['entries']:\n # Some entries are incomplete and have only the title, need to\n # ignore these entries.\n if not entry.get('summary'):\n continue\n\n # The title may have only 140 characters\n title = self._truncate_string(entry['title'], 140)\n slug = slugify(title)\n headline = entry['summary']\n\n # Some entries do not have the 'content' field, in this case we\n # get the 'summary' field instead.\n if entry.get('content'):\n content = entry['content'][0]['value']\n else:\n content = entry['summary']\n\n # When we find a entry that already is registered we don't need\n # continue because the following registries already be registered.\n exists = Post.objects.filter(slug=slug).count()\n if exists:\n break\n\n # Check if has some image in the post content.\n # NOTE: For the best user experience we use only the posts that\n # have images.\n image_url = self._get_image_url_in_content(content)\n if image_url:\n main_image = Image.objects.create(\n title=title,\n slug=slug,\n archive_link=image_url,\n published=True,\n user=self._user\n )\n # Generate the 'short_title' based on 'content'\n short_title = re.sub('<[^<]+?>', '', content).encode('utf-8')\n short_title = self._truncate_string(short_title.strip(), 140)\n\n post = Post.objects.create(\n title=title,\n short_title=short_title,\n slug=slug,\n headline=headline,\n content=content,\n channel=channel,\n main_image=main_image,\n show_on_root_channel=True,\n published=True,\n hat='',\n user=self._user\n )",
"def process_md(text_md):\n\tprocessed_text_md = ( pre_proc.replace_br(text_md)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_false_titles)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_blank_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_cid)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_with_dash)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_hyphen)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_et_al)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_beta)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_vs)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_enye)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_ellipsis)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_subtraction)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_colon)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_dashes)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_marks)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_title_questions)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_useless_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_whitespaces)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_repeated_strings)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t)\n\treturn processed_text_md",
"def post_content(context, is_markdown=False, *args, **kwargs):\n obj = context['object']\n content = obj.parse_content()\n\n if not content:\n content = obj.content_rendered\n\n default_template = [\"djblog/includes/post_content.html\"]\n\n if obj.custom_template:\n tpl = Template(obj.custom_template)\n\n #elif obj.template_name:\n # default_template.append(obj.template_name)\n # tpl = loader.select_template(default_template)\n\n else:\n tpl = loader.select_template(default_template)\n\n custom_context = Context({\n 'content': mark_safe(markdown(content))\n })\n\n custom_context.update(context)\n\n return mark_safe(tpl.render(custom_context))",
"def format_posts(posts):\n formatted_posts = []\n\n for post in posts:\n post_data = post['data']\n formatted_post = {\n \"title\": post_data['title'],\n \"post_id\": post_data['id'],\n \"subreddit\": post_data['subreddit'],\n \"score\": post_data['score'],\n \"url\": post_data['url'],\n \"author\": post_data['author'],\n \"permalink\": format_post_permalink(post_data['permalink']),\n \"num_comments\": post_data['num_comments'],\n \"created\": post_data['created'],\n \"body\": post_data['selftext']\n }\n\n formatted_posts.append(formatted_post)\n\n return formatted_posts",
"def archive_parse_for_posts(page_html):\n # <div\\s+class=\"post.+data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\n post_info_regex = \"\"\"<div\\s+class=\"post.+?data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\"\"\"\n post_info = re.findall(post_info_regex, page_html, re.IGNORECASE|re.DOTALL)\n return post_info",
"def parse_posts(self):\n logger.info(\"Parsing posts\")\n\n self.df.title = self.df.title.str.strip()\n\n spam_companies = [\"Indeed Prime\"]\n self.df = self.df[~self.df[\"company\"].isin(spam_companies)]\n self.df = self.df.dropna(subset=[\"company\"])\n self.df = self.df.drop_duplicates(subset=[\"company\", \"date_posted\", \"title\"])",
"def generate_post(self):\n post = {'title': self.generate_title(), 'draft': False}\n for k in ('blog', 'id', 'labels', 'categories', 'draft'):\n if k not in self.header:\n continue\n if k == 'blog':\n post[k] = {'id': self.header[k]}\n else:\n post[k] = self.header[k]\n return post",
"def add_new_posts(last_updated=None):\n for blog in Blog.objects.all():\n try:\n document = feedparser.parse(blog.feed_url)\n except:\n print \"error parsing\"\n continue\n\n if last_updated is None:\n print(\"- Adding %i articles from %s\" % (len(document['entries']), blog.title))\n\n for entry in document['entries']:\n # now we create a new post\n post = Post()\n post.blog = blog\n post.title = entry['title']\n\n if 'summary' in entry:\n post.content = entry['summary']\n if 'content' in entry:\n post.content = entry['content']\n\n post.link = entry['link']\n post.save()\n else:\n # TODO: only parse from a date\n pass",
"def parse():\n G.go(SITE_URL)\n articles = []\n for article in G.doc.select(\"//li[@class='regularitem']\"):\n header = article.select('h4').text()\n text = article.select('div').text()\n url = article.select('h4/a/@href').text()\n dt_string = article.select('h5').text()\n # for date format \"1 Nov 2019 00:00:00\" or \"01 Nov 2019 00:00:00\"\n article_dt = re.search(r'\\d{1,2} [a-zA-Z]+ \\d{4} \\d{2}:\\d{2}:\\d{2}', dt_string)\n if article_dt is None:\n logging.exception('Datestring format is unknown: %s', dt_string)\n continue\n article_dt = article_dt.group(0)\n article_dt = datetime.datetime.strptime(article_dt, '%d %b %Y %H:%M:%S').strftime(\"%Y-%m-%d %H:%M:%S\")\n articles.append({'header': header, 'url': url, 'text': text, 'dt': article_dt})\n return articles",
"def markdown(text):\n text = gfm(text)\n text = markdown_lib.markdown(text)\n return text",
"def parse_post_content(self, response):\n post = Post()\n post['title'] = response.xpath('//h2/a/text()')[0].extract()\n post['image_url'] = response.xpath(\"//div[@class='cont group']//img/@src\")[0].extract()\n yield post",
"def convert(self, markdown: str) -> str:\n lines = markdown.split(NEWLINE)\n iterator = LineIterator(lines)\n\n while not iterator.is_done():\n for element in self.__elements:\n if element.is_relevant(iterator.value):\n element.replace(iterator)\n iterator.advance()\n return NEWLINE.join(iterator.lines)",
"def create_posts_df(post_filenames):\n posts_list = []\n n = 1\n for post in post_filenames:\n try:\n processed_post = xmlpost_to_dict(post)\n posts_list.append(processed_post)\n except AttributeError:\n print('Error parsing post:', post)\n n += 1\n\n print(\"Posts with trouble parsing (possibly missing messages):\" + str(n))\n df = pd.DataFrame(posts_list)\n df.post_time = pd.to_datetime(df.post_time)\n df.last_edit_time = pd.to_datetime(df.last_edit_time)\n # df.set_index(['post_id'])\n\n return df",
"def main(blog, date):\n template = front_matter({\n \"title\": blog,\n \"date\": get_date(\"%Y-%m-%d %H:%M:%S %z\"),\n })\n new_blog(date + '-' + blog + '.markdown', template)",
"def markdown(text, **kwargs):\n import markdown\n return markdown.markdown(text, **kwargs)",
"def convert_to_markdown(lines):\n # description = get_description(lines)\n blocks = get_blocks(lines)\n out = []\n for block in blocks:\n item = align_block(block)\n item = format_headings(item)\n item = format_lists(item)\n item = format_numb_list(item)\n out.append(item)\n return join_blocks(out)",
"def process_posts(app, doctree):\n env = app.builder.env\n if not hasattr(env, \"ablog_posts\"):\n env.ablog_posts = {}\n post_nodes = list(doctree.findall(PostNode))\n if not post_nodes:\n return\n post_date_format = app.config[\"post_date_format\"]\n should_auto_orphan = app.config[\"post_auto_orphan\"]\n docname = env.docname\n if should_auto_orphan:\n # mark the post as 'orphan' so that\n # \"document isn't included in any toctree\" warning is not issued\n # We do not simply assign to should_auto_orphan because if auto-orphan\n # is false, we still want to respect the per-post :rst:dir`orphan` setting\n app.env.metadata[docname][\"orphan\"] = True\n blog = Blog(app)\n auto_excerpt = blog.post_auto_excerpt\n multi_post = len(post_nodes) > 1 or blog.post_always_section\n for order, node in enumerate(post_nodes, start=1):\n if node[\"excerpt\"] is None:\n node[\"excerpt\"] = auto_excerpt\n if multi_post:\n # section title, and first few paragraphs of the section of post\n # are used when there are more than 1 posts\n section = node\n while True:\n if isinstance(section, nodes.section):\n break\n section = node.parent\n else:\n section = doctree\n # get updates here, in the section that post belongs to\n # Might there be orphan updates?\n update_dates = _get_update_dates(section, docname, post_date_format)\n # Making sure that post has a title because all post titles\n # are needed when resolving post lists in documents\n title = node[\"title\"] or _get_section_title(section)\n # creating a summary here, before references are resolved\n excerpt = []\n if node.children:\n if node[\"exclude\"]:\n node.replace_self([])\n else:\n node.replace_self(node.children)\n for child in node.children:\n excerpt.append(child.deepcopy())\n elif node[\"excerpt\"]:\n count = 0\n for nod in section.findall(nodes.paragraph):\n excerpt.append(nod.deepcopy())\n count += 1\n if count >= (node[\"excerpt\"] or 0):\n break\n node.replace_self([])\n else:\n node.replace_self([])\n nimg = node[\"image\"] or blog.post_auto_image\n if nimg:\n for img, nod in enumerate(section.findall(nodes.image), start=1):\n if img == nimg:\n excerpt.append(nod.deepcopy())\n break\n date = node[\"date\"]\n if date:\n try:\n date = datetime.strptime(date, post_date_format)\n except ValueError:\n if date_parser:\n try:\n date = date_parser(date)\n except ValueError:\n raise ValueError(\"invalid post date in: \" + docname)\n else:\n raise ValueError(\n f\"invalid post date ({date}) in \" + docname + f\". Expected format: {post_date_format}\"\n )\n else:\n date = None\n # if docname ends with `index` use folder name to reference the document\n # a potential problem here is that there may be files/folders with the\n # same name, so issuing a warning when that's the case may be a good idea\n folder, label = os.path.split(docname)\n if label == \"index\":\n folder, label = os.path.split(folder)\n if not label:\n label = slugify(title)\n section_name = \"\"\n if multi_post and section.parent is not doctree:\n section_name = section.attributes[\"ids\"][0]\n label += \"-\" + section_name\n else:\n # create a reference for the post\n # if it is posting the document\n # ! this does not work for sections\n app.env.domains[\"std\"].data[\"labels\"][label] = (docname, label, title)\n app.env.domains[\"std\"].data[\"anonlabels\"][label] = (docname, label)\n if section.parent is doctree:\n section_copy = section[0].deepcopy()\n else:\n section_copy = section.deepcopy()\n # multiple posting may result having post nodes\n for nn in section_copy.findall(PostNode):\n if nn[\"exclude\"]:\n nn.replace_self([])\n else:\n nn.replace_self(node.children)\n postinfo = {\n \"docname\": docname,\n \"section\": section_name,\n \"order\": order,\n \"date\": date,\n \"update\": max(update_dates + [date]),\n \"title\": title,\n \"excerpt\": excerpt,\n \"tags\": node[\"tags\"],\n \"author\": node[\"author\"],\n \"category\": node[\"category\"],\n \"location\": node[\"location\"],\n \"language\": node[\"language\"],\n \"redirect\": node[\"redirect\"],\n \"nocomments\": node[\"nocomments\"],\n \"image\": node[\"image\"],\n \"exclude\": node[\"exclude\"],\n \"external_link\": node[\"external_link\"],\n \"doctree\": section_copy,\n }\n if docname not in env.ablog_posts:\n env.ablog_posts[docname] = []\n env.ablog_posts[docname].append(postinfo)\n # instantiate catalogs and collections here\n # so that references are created and no warnings are issued\n if app.builder.format == \"html\":\n stdlabel = env.domains[\"std\"].data[\"labels\"] # NOQA\n else:\n if hasattr(env, \"intersphinx_inventory\"):\n stdlabel = env.intersphinx_inventory.setdefault(\"std:label\", {}) # NOQA\n baseurl = getattr(env.config, \"blog_baseurl\").rstrip(\"/\") + \"/\" # NOQA\n project, version = env.config.project, str(env.config.version) # NOQA\n for key in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n catalog = blog.catalogs[key]\n for label in postinfo[key]:\n coll = catalog[label] # NOQA\n if postinfo[\"date\"]:\n coll = blog.archive[postinfo[\"date\"].year] # NOQA",
"def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def posts_as_schemas(posts_from_vk: list[dict]) -> list[Post]:\n posts = []\n\n for post_from_vk in posts_from_vk:\n try:\n post = Post(\n date=post_from_vk[\"date\"],\n likes=post_from_vk[\"likes\"][\"count\"],\n text=post_from_vk[\"text\"],\n path=f\"wall{post_from_vk['owner_id']}_\" f\"{post_from_vk['id']}\",\n photos=[],\n videos=[],\n )\n except KeyError as exc:\n logger.error(\"No key %s for post: %s\", exc, post_from_vk)\n continue\n\n # Collect attachments (photos, videos etc.).\n if \"attachments\" in post_from_vk:\n attachments = post_from_vk[\"attachments\"]\n for attachment in attachments:\n if attachment[\"type\"] == \"photo\":\n try:\n photo = PostPhoto(url=\"\")\n photo.url = attachment[\"photo\"][\"sizes\"][-1][\"url\"]\n post.photos.append(photo)\n except KeyError as exc:\n logger.error(\"No key %s for photo: %s\", exc, post_from_vk)\n\n elif attachment[\"type\"] == \"video\":\n video = PostVideo(first_frame_url=\"\")\n video_from_vk = attachment[\"video\"]\n if \"first_frame\" in video_from_vk:\n video.first_frame_url = video_from_vk[\"first_frame\"][-1][\"url\"]\n elif \"image\" in video_from_vk:\n video.first_frame_url = video_from_vk[\"image\"][-1][\"url\"]\n else:\n logger.error(\"No video image found: %s\", post)\n continue\n post.videos.append(video)\n\n posts.append(post)\n\n return posts",
"def parse_article(self, response):\n\n raw_post = response.css(\"div.blog.post > div.inner > div.row > article\")\n\n post_loader = ItemLoader(item=BlogPostItem(), selector=raw_post)\n post_loader.default_output_processor = TakeFirst()\n\n post_title = raw_post.css(\"div#postcontent > h1::text\").extract_first()\n post_loader.add_value(\"title\", post_title)\n post_loader.add_value(\"url\", response.request.url)\n\n post_text_selector = raw_post.css(\"div#postcontent > div#mypost\")\n post_text = post_text_selector.xpath('string(.)').extract_first()\n post_loader.add_value(\"content\", post_text[:160])\n\n pub_date_text = raw_post.css(\"div#postcontent > div.no-mobile > div.posttag.right.nomobile > span::text\").extract_first()\n pub_date = parse_date(pub_date_text)\n post_loader.add_value(\"publication_date\", pub_date)\n\n initial_author_list = raw_post.css(\n \"div#postcontent > div.no-mobile > div.postauthor > span > a.goauthor > span::text\").extract()\n author_list = [name.strip() for name in initial_author_list]\n post_authors = \"::\".join(author_list)\n post_loader.add_value(\"author\", post_authors)\n\n post_tags = raw_post.css(\"div#postcontent > a.tag.secondary::attr(title)\").extract()\n post_tags_str = \"::\".join(post_tags)\n post_loader.add_value(\"tags\", post_tags_str)\n\n return post_loader.load_item()"
] | [
"0.6771389",
"0.62583774",
"0.59715056",
"0.59394884",
"0.59075785",
"0.58579963",
"0.5806396",
"0.58032244",
"0.57632285",
"0.5762416",
"0.575449",
"0.5736609",
"0.5716471",
"0.5630635",
"0.56303996",
"0.55910885",
"0.5574774",
"0.55405563",
"0.5535492",
"0.55039394",
"0.54988575",
"0.54572403",
"0.54377383",
"0.5424497",
"0.54201555",
"0.5406719",
"0.53972965",
"0.5394924",
"0.5387797",
"0.53724754"
] | 0.7360025 | 0 |
Purge root dir from irrelevant html files | def purge_htmlfiles(args, posts):
htmlist = list_of_htmlfiles(args, posts)
html_to_remove = list()
for fullname in glob.glob(os.path.join(args.root, '*.htm*')):
if fullname not in htmlist:
html_to_remove.append(fullname)
if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in html_to_remove:
print('Removing html files', name)
os.remove(name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(ctx):\n ctx.run(\"rm -rf build/html\")",
"def cleanup():\n download_dir = settings.DOWNLOAD_BASE_DIR\n\n for base, dirs, files in os.walk(download_dir):\n for dir in dirs:\n shutil.rmtree(download_dir + dir)",
"def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()",
"def html_clean(options):\r\n remake_directories(options.sphinx.doctrees, options.html.outdir)\r\n html(options)\r\n return",
"def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)",
"def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))",
"def tearDown(self):\n for root, dirs, files in os.walk(TEMPDIR, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n os.rmdir(root)",
"def clean():\n clean_files()",
"def purge_files(self):\n run_keyword(\"Purge Server Configuration\")\n run_keyword(\"Purge Cache Manager Configuration\")\n # TODO: Probably the only sane way to do this is to call\n # a helper script which runs as root.\n # run_keyword(\"Purge Cache\")\n valid = r'/vicep([a-z]|[a-h][a-z]|i[a-v])$'\n for vicep in glob.glob(\"/vicep*\"):\n if re.match(valid, vicep) and os.path.isdir(vicep):\n run_keyword(\"Purge Directory\", \"%s/AFSIDat\" % vicep)\n run_keyword(\"Purge Directory\", \"%s/Lock\" % vicep)\n for vheader in glob.glob(\"%s/V*.vol\" % vicep):\n run_keyword(\"Sudo\", \"rm -f %s\" % vheader)",
"def purge_cache():\n for (dir_path, dir_names, file_names) in os.walk(CACHE, topdown=False):\n for file_name in file_names:\n if is_json_file(file_name):\n path = os.path.join(dir_path, file_name)\n print(\"Removing file “%s”\" % path)\n os.remove(path)\n for directory in dir_names:\n path = os.path.join(dir_path, directory)\n if not os.listdir(path):\n print(\"Removing directory “%s”\" % path)\n os.rmdir(path)",
"def cleanDirecs(rootDir):\n for root, dirs, files in os.walk(rootDir, topdown=False):\n \n if not files:\n if not dirs:\n print(\"Removing {0}\".format(root))\n os.rmdir(os.path.join(rootDir, root))",
"def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])",
"def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)",
"def clean_home_subdir(self):\n\n self.log.debug(\"Cleaning up %s...\" % self.home_subdir_local)\n try:\n for tree in os.listdir(self.home_subdir_local):\n self.log.debug(\"... removing %s subtree\" % tree)\n path = os.path.join(self.home_subdir_local, tree)\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)\n except OSError, err:\n self.log.error(\"Cleaning up intel dir %s failed: %s\" % (self.home_subdir_local, err))",
"def _cleanPackageDir(self, *_):\r\n for _, path in self._pkgDir:\r\n os.rmdir(os.path.join(self._rootfs, path))\r\n\r\n assert len(self._containers) == 0",
"def clean():\n print(\"\\nCleaning the site from {}\\n\".format(_site_dest))\n rm(_site_dest)",
"def _clean_bins():\n rmtree(LIBS_DIR)\n rmtree(BINS_DIR)\n rmtree(HEADERS_DIR)",
"def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()",
"def clean_build(c):\n c.run(\"rm -fr build/\")\n c.run(\"rm -fr dist/\")\n c.run(\"rm -fr xmlstarlet/config.h \" \"xmlstarlet/Makefile \" \"xmlstarlet/config.status\")\n c.run(\"rm -fr .eggs/\")\n c.run(\"find . -name '*.egg-info' -exec rm -fr {} +\")\n c.run(\"find . -name '*.egg' -exec rm -f {} +\")",
"def clean(self):\n original_dir = os.getcwd()\n os.chdir(self.output)\n\n # Clear out directory\n file_list = os.listdir(self.output)\n\n for afile in file_list:\n if not afile.endswith('.gitignore'):\n path = os.path.join(self.output, afile)\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.remove(path)\n os.chdir(original_dir)",
"def clean_build(context):\n context.run(\"rm -fr build/\")\n context.run(\"rm -fr dist/\")\n context.run(\"rm -fr .eggs/\")\n context.run(\"find . -name '*.egg-info' -exec rm -fr {} +\")\n context.run(\"find . -name '*.egg' -exec rm -f {} +\")",
"def clean(self) -> None:\n # remove all *.py and *.pyi files in the folder\n for wc in [\"*.py\", \"*.pyi\", \"modules.json\"]:\n for f in (self.package_path).rglob(wc):\n f.unlink()",
"def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)",
"def clean(allimages, alldirs):\n\n for img in allimages:\n # Delete HTML files\n htmlfn = join(opts.root, img._dir._path, img._pagefn)\n if exists(htmlfn):\n if opts.verbose:\n print \"Deleting\", htmlfn\n try:\n os.unlink(htmlfn)\n except:\n print >> sys.stderr, \"Error: deleting\", htmlfn\n\n # Delete thumbnails\n if img._thumbfn:\n thumbfn = join(opts.root, img._thumbfn)\n if exists(thumbfn):\n if opts.verbose:\n print \"Deleting\", thumbfn\n try:\n os.unlink(thumbfn)\n img._thumbfn = None\n except:\n print >> sys.stderr, \"Error: deleting\", thumbfn\n\n for d in alldirs:\n files = dircache.listdir(join(opts.root, d._path))\n\n # Delete HTML files in directories\n for f in files:\n fn = join(opts.root, d._path, f)\n if f in [ dirindex_fn, allindex_fn, allcidx_fn,\n sortindex_fn, css_fn ] or \\\n f.startswith('trackindex-'):\n if opts.verbose:\n print \"Deleting\", fn\n try:\n os.unlink(fn)\n pass\n except:\n print >> sys.stderr, \"Error: deleting\", fn\n\n if f == index_fn and islink(fn):\n os.unlink(fn)",
"def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)",
"def clean():\n for root, dirs, files in os.walk('.'):\n for item in dirs:\n if (item[0]!='.'):\n try:\n os.remove(os.path.join(item,'.DS_Store'))\n except:\n pass",
"def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)",
"def clean(session):\n clean_dirs = (\n get_path(\".cache\"),\n get_path(\".coverage\"),\n get_path(\".pytest_cache\"),\n get_path(\"__pycache__\"),\n get_path(\"build\"),\n get_path(\"dist\"),\n get_path(\"docs\", \"__pycache__\"),\n get_path(\"docs\", \"build\"),\n get_path(\"scripts\", \"macos\", \"__pycache__\"),\n get_path(\"src\", \"python\", \"bezier.egg-info\"),\n get_path(\"src\", \"python\", \"bezier\", \"__pycache__\"),\n get_path(\"tests\", \"__pycache__\"),\n get_path(\"tests\", \"functional\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"hazmat\", \"__pycache__\"),\n get_path(\"wheelhouse\"),\n )\n clean_globs = (\n get_path(\".coverage\"),\n get_path(\"*.mod\"),\n get_path(\"*.pyc\"),\n get_path(\"docs\", \"abi\", \"example\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyc\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyd\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.so\"),\n get_path(\"src\", \"fortran\", \"*.o\"),\n get_path(\"tests\", \"*.pyc\"),\n get_path(\"tests\", \"functional\", \"*.pyc\"),\n get_path(\"tests\", \"unit\", \"*.pyc\"),\n )\n for dir_path in clean_dirs:\n session.run(shutil.rmtree, dir_path, ignore_errors=True)\n for glob_path in clean_globs:\n for filename in glob.glob(glob_path):\n session.run(os.remove, filename)",
"def clean():\n sudo(\"rm -rf %(admin_webroot)s\" % env)",
"def clean(ctx):\n ctx.run('rm -rf {dir}'.format(dir=ctx.build_dir.debian_dir))\n ctx.run('rm -rf {dir}'.format(dir=ctx.build_dir.dist_dir))\n ctx.run('rm -rf {dir}'.format(dir=DEBIAN_DPKG_DIR))"
] | [
"0.7720015",
"0.6934762",
"0.68778396",
"0.6763646",
"0.66906065",
"0.6680769",
"0.668029",
"0.66643465",
"0.66492623",
"0.6645479",
"0.6635241",
"0.6626942",
"0.6605654",
"0.65854466",
"0.654416",
"0.6536765",
"0.6526797",
"0.64898103",
"0.64494216",
"0.6443703",
"0.6431596",
"0.64281493",
"0.6421824",
"0.6420025",
"0.64162767",
"0.6400494",
"0.6388606",
"0.6372155",
"0.6369315",
"0.63596666"
] | 0.72082686 | 1 |
Purge thumbnail dir from irrelevant thumbnails | def purge_thumbnails(args, thumbdir, posts, diary=False):
thumblist = list_of_thumbnails(posts, diary)
thumbs_to_remove = list()
for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):
if os.path.basename(fullname) not in thumblist:
thumbs_to_remove.append(fullname)
if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in thumbs_to_remove:
print('Removing thumbnail', name)
os.remove(name)
info_fullname = os.path.splitext(name)[0] + '.info'
if os.path.exists(info_fullname):
os.remove(info_fullname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_thumbnails(self):",
"def delete_thumbnail(self, thumbnail_name):",
"def tearDown(self):\n for fn in self.tempImages:\n os.remove(os.path.join(self.root, fn))\n os.rmdir(self.root)",
"def clear_tmp_folder(self):\r\n for file in os.listdir(self.temp_dir):\r\n if file.endswith('.png') or file.endswith('.jpg'):\r\n path = os.path.join(self.temp_dir, file)\r\n print ('Cleaned up {}'.format(path))\r\n os.remove(path)",
"def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()",
"def clear_thumbnail(self):\n from anima.ui import utils\n utils.clear_thumbnail(self.thumbnail_graphics_view)",
"def cleanup():\n cmd='docker rmi --force $(docker images -a -q)'\n bash_command(\"Deleting all images\", cmd)",
"def clearImageFolder():\n filelist = listImageFolder()\n for f in filelist:\n os.remove('{}/{}'.format(imageFolder, f))",
"def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()",
"def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))",
"def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)",
"def space_cleaning():\n for file in os.listdir(\".\"):\n if file.endswith(\".png\"):\n os.remove(file)",
"def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)",
"def clean_up_temp_dir():\n files = glob.glob(f'{CONFIG_DIR}/tmp/*')\n for f in files:\n try:\n os.remove(f)\n except Exception:\n pass",
"def clearAllPictures(self):\n shutil.rmtree(PNG_OUTPUT_PATH)\n os.makedirs(PNG_OUTPUT_PATH)",
"def cleanup():\n download_dir = settings.DOWNLOAD_BASE_DIR\n\n for base, dirs, files in os.walk(download_dir):\n for dir in dirs:\n shutil.rmtree(download_dir + dir)",
"def delete_file(self, instance, sender, **kwargs):\n super(AutoImageField, self).delete_file(instance, sender)\n if getattr(instance, self.attname):\n # Get full path and the base directory that contains the file\n file_name = getattr(instance,self.name).name\n basedir = os.path.dirname(file_name)\n \n # Look for thumbnails created from filters for the current filename\n # and delete the files\n mask = add_to_basename(file_name, '_*')\n [os.remove(os.path.join(basedir, f)) for f in glob.glob(mask)]",
"def auto_delete_image_and_thumbnail_on_delete(sender, instance, **kwargs):\n if instance.image:\n if os.path.isfile(instance.image.path):\n os.remove(instance.image.path)\n\n if instance.thumbnail:\n if os.path.isfile(instance.thumbnail.path):\n os.remove(instance.thumbnail.path)\n\n return False",
"def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)",
"def cleanup_old_backups(self):\n print(\"Cleaning Old Backups for media files\")\n\n file_list = utils.get_backup_file_list(\n self.get_databasename(),\n self.get_servername(),\n 'media.tar.gz',\n self.storage\n )\n\n for backup_date, filename in file_list[0:-dbbackup_settings.CLEANUP_KEEP_MEDIA]:\n if int(backup_date.strftime(\"%d\")) != 1:\n print(\" Deleting: %s\" % filename)\n self.storage.delete_file(filename)",
"def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()",
"def clean_temp_storage_dir(self, filenames):\n for fn in filenames:\n try:\n pathlib.Path(pathlib.PurePath(self.temp_storage_dir, fn)).unlink()\n except FileNotFoundError:\n pass",
"def tearDown(self):\n shutil.rmtree(self.test_pic_folder)",
"def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)",
"def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)",
"def remove_unactionable_images(data):\n os.makedirs(os.path.join(data, 'removed'), exist_ok=True)\n for product in os.listdir(data):\n if product.startswith('product') is False:\n continue\n path = os.path.join(data, product)\n if os.path.isdir(path) is False:\n continue\n if is_useful(path, 0.5) is False:\n print('\\tRemoving ' + path)\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, 'removed', product + '.tiff'))\n shutil.rmtree(path)\n else:\n shutil.copy(os.path.join(path, 'TCI.tiff'),\n os.path.join(data, product + '.tiff'))",
"def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)",
"def clean(allimages, alldirs):\n\n for img in allimages:\n # Delete HTML files\n htmlfn = join(opts.root, img._dir._path, img._pagefn)\n if exists(htmlfn):\n if opts.verbose:\n print \"Deleting\", htmlfn\n try:\n os.unlink(htmlfn)\n except:\n print >> sys.stderr, \"Error: deleting\", htmlfn\n\n # Delete thumbnails\n if img._thumbfn:\n thumbfn = join(opts.root, img._thumbfn)\n if exists(thumbfn):\n if opts.verbose:\n print \"Deleting\", thumbfn\n try:\n os.unlink(thumbfn)\n img._thumbfn = None\n except:\n print >> sys.stderr, \"Error: deleting\", thumbfn\n\n for d in alldirs:\n files = dircache.listdir(join(opts.root, d._path))\n\n # Delete HTML files in directories\n for f in files:\n fn = join(opts.root, d._path, f)\n if f in [ dirindex_fn, allindex_fn, allcidx_fn,\n sortindex_fn, css_fn ] or \\\n f.startswith('trackindex-'):\n if opts.verbose:\n print \"Deleting\", fn\n try:\n os.unlink(fn)\n pass\n except:\n print >> sys.stderr, \"Error: deleting\", fn\n\n if f == index_fn and islink(fn):\n os.unlink(fn)",
"def clean():\n folders = ['utils_dfn/temp', 'utils_dfn/img', 'utils_dfn/mask', 'utils_dfn/output']\n for folder in folders:\n for item in os.listdir(folder):\n item_path = os.path.join(folder, item)\n if os.path.isdir(item_path):\n shutil.rmtree(item_path)\n elif os.path.isfile(item_path):\n os.remove(item_path)",
"def clean(self):\n\n for metric in self.metricList:\n listf = glob.glob(\n '{}/*_{}_{}*'.format(self.outDir, metric.name, self.num))\n if len(listf) > 0:\n for val in listf:\n os.system('rm {}'.format(val))"
] | [
"0.7496226",
"0.70975465",
"0.6904142",
"0.6789052",
"0.65433615",
"0.64652795",
"0.64232606",
"0.6410066",
"0.635366",
"0.6340497",
"0.63255644",
"0.63222384",
"0.6308864",
"0.630171",
"0.6284883",
"0.6272671",
"0.6269268",
"0.62621397",
"0.62551665",
"0.6248571",
"0.6245241",
"0.6237175",
"0.6237087",
"0.6200791",
"0.61761653",
"0.6172708",
"0.6151164",
"0.6135021",
"0.61335015",
"0.6127233"
] | 0.7741738 | 0 |
Return the list of full paths for pictures and movies in source directory plus subdirectories containing media | def list_of_medias_ext(args, sourcedir):
result = list()
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
fullname = os.path.join(sourcedir, basename)
if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):
result.append(fullname)
else:
if is_media_within_dates(fullname, args.dates):
result.append(fullname)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_of_files(sourcedir, recursive):\n result = list()\n if recursive is False:\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n result.append(os.path.join(sourcedir, basename))\n else:\n for root, dirs, files in os.walk(sourcedir):\n if '.nomedia' not in files:\n for basename in sorted_listdir(files):\n result.append(os.path.join(root, basename))\n return result",
"def __return_movie_file_list(self, movie_path):\n movie_dir = movie_path.rsplit(\"/\",1)[0]\n movie_file_list =[]\n movie_extentionds = self.__movie_file_extensions(self.__file_extentions)\n for x in os.listdir(movie_dir):\n if x.rsplit(\".\",1)[-1]in movie_extentionds:\n movie_file_list.append(movie_dir+\"/\"+x)\t\t\n\t#USUNAC URL Z NAPISY24\n return movie_file_list",
"def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list",
"def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList",
"def get_subdirs(src_dir):\n img_dirs = sorted(next(os.walk(src_dir))[1])\n subdirs = [src_dir + img_dir for img_dir in img_dirs]\n return subdirs",
"def find_photos(source_path, common_extensions=('JPG', 'CR2', 'ORF', 'ARW', 'TIFF', 'DNG'), ignore=[]):\n # combinedignored = re.compile('|'.join('(?:{0})'.format(x) for x in ignore))\n # use endswith , ignore must be a tuple then\n # if ignore and dirpath.endswith(ignore):\n # for duplication, at the end cll the same funciton\n\n source_files = list()\n\n for (dirpath, dirnames, filenames) in os.walk(source_path):\n for f in filenames:\n if f.upper().endswith(common_extensions):\n # source_files.append(os.path.join(dirpath, f))\n parent = os.path.basename(os.path.normpath(dirpath))\n source_files.append({'dir':dirpath,\n 'filename':f,\n 'parent_folder':parent})\n\n return source_files",
"def get_movies(path):\n movies_list = []\n \n \n for f in os.listdir(path):\n \n full_file_path = join(path,f)\n if isdir(full_file_path):\n \n movies_list.extend( get_movies(full_file_path) )\n \n elif isfile(full_file_path) and full_file_path[-3:] in util.extension:\n m = Movie(f, full_file_path)\n movies_list.append(m)\n \n return movies_list",
"def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths",
"def get_replay_source_helper_paths(self):\n\n if self.replay_source is None:\n return None\n\n paths = []\n classes = self.get_helpers_classes()\n\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n for hc in classes:\n current_paths = []\n for c in hc:\n path = base_path + str(c).zfill(2) + \".tfrecord\"\n current_paths.append(path)\n paths.append(current_paths)\n return paths",
"def get_image_path(raw_input_dir: str) -> list:\n result = []\n for root, dirs, files in os.walk(raw_input_dir):\n for file in files:\n result.append(os.path.join(root, file))\n return result",
"def voicesPathsCollector(source_dir, gender_ids):\r\n # The list containing the IDs of the speakers which are the folder names\r\n person_ids_list = gender_ids\r\n\r\n # An empty list which it will contains voices paths which are in the section folders\r\n voices_paths_list = []\r\n\r\n # Iterating over a person speaker ids\r\n for person_id_idx in range(len(person_ids_list)):\r\n # Speaker path containing the chapter's directories\r\n speaker_path = Path.cwd().joinpath(source_dir, person_ids_list[person_id_idx])\r\n # The list containing the IDs of the sections by the current speaker which are the folder names\r\n sections_ids_list = os.listdir(speaker_path)\r\n\r\n # Iterating over a sections ids by the current speaker\r\n for sec_id_idx in range(len(sections_ids_list)):\r\n # Section path containing files\r\n section_path = Path.cwd().joinpath(speaker_path,sections_ids_list[sec_id_idx])\r\n # Iterating over files which are existing in section's folders\r\n for file in os.listdir(section_path):\r\n # Choose files with .flac extension\r\n if file.endswith(\".flac\"):\r\n\r\n voice_path = Path.cwd().joinpath(section_path, file)\r\n voices_paths_list.append(voice_path)\r\n\r\n return voices_paths_list",
"def get_replay_source_no_helper_paths(self):\n paths = []\n classes = self.get_replay_classes_no_helper()\n base_path = self.base_folder_path + \"/data/replay_images/\" + self.replay_source + \"/\"\n\n \n for c in classes:\n full_path = base_path + str(c).zfill(2) + \".tfrecord\"\n paths.append(full_path)\n \n return paths",
"def readPlayerImageFiles(self):\n currentPath = os.path.dirname(os.path.abspath(__file__))\n listOfFileNames=[]\n for i in os.listdir(currentPath):\n if re.match(\"player\\_\\d+\",i): #i.endswith(\".gif\")\n listOfFileNames.append(currentPath+'/'+i)\n return listOfFileNames",
"def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst",
"def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels",
"def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]",
"def get_paths_list_from_folder(folder):\n names = os.listdir(folder)\n relative_paths = [os.path.join(folder, image_name) for image_name in names]\n return relative_paths",
"def create_directory_list(root_dir: str):\n if not os.path.exists(root_dir):\n raise FileNotFoundError(\"Directory {} does not exist\".format(root_dir))\n\n # List all directories associated to different videos.\n recording_path_list = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]\n\n input_data_path = []\n for g in recording_path_list:\n # Append the different directories associated to different video frame intervals.\n input_data_path.extend([os.path.join(g, f) for f in os.listdir(g)])\n\n return input_data_path",
"def get_dir_files(self, recursive=False):\n logging.info('Enumerating files under the source path (recursive=%s) ...', recursive)\n files = {}\n if not recursive:\n files[self.path_source] = [\n f for f in os.listdir(self.path_source) if os.path.isfile(os.path.join(self.path_source, f))\n ]\n else:\n for current_dir, sub_dirs, dir_files in os.walk(self.path_source):\n files[os.path.join(self.path_source, current_dir)] = [f for f in dir_files]\n\n return files",
"def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files",
"def get_images_paths(path: str) -> List[str]:\n\n image_paths = []\n\n for folder in os.listdir(path):\n for file in os.listdir(os.path.join(f\"{path}/{folder}\", \"images\")):\n image_paths.append(f\"{path}/{folder}/images/{file}\")\n\n return image_paths",
"def get_dir_and_file_list(path):\r\n dList = os.listdir(path)\r\n dirList = []\r\n fileList = []\r\n\r\n for item in dList:\r\n \r\n if os.path.isdir(os.path.join(path, item)):\r\n dirList.append(item)\r\n elif os.path.isfile(os.path.join(path, item)):\r\n if any(image_type in item.lower() for image_type in image_types):\r\n preview = image_preview(os.path.join(path, item))\r\n fileList.append((item, preview))\r\n else:\r\n fileList.append((item, None))\r\n\r\n return dirList, fileList",
"def load_video_paths(dir):\n VIDEO_EXTENSIONS = ['.mov', '.MOV', '.mp4']\n video_paths = []\n\n # traverse directory to obtain only paths to videos\n for dir_name, _, paths in sorted(os.walk(os.path.expanduser(dir))):\n for path in paths:\n if any(path.endswith(extensions) for extensions in VIDEO_EXTENSIONS):\n video_paths.append(os.path.expanduser(dir_name + '/' + path))\n\n return video_paths",
"def get_datapaths(input_dir):\n image_paths = []\n assert os.path.isdir(input_dir), f\"{input_dir} is not existed\"\n\n for root, _, names in os.walk(input_dir):\n for name in names:\n path = os.path.join(root, name)\n image_paths.append(path)\n return image_paths",
"def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]",
"def listdir(self, name, source, test_data=()):\n assert isinstance(source, config_types.Path)\n self.m.path.assert_absolute(source)\n result = self._run(\n name, ['listdir', source],\n lambda: self.test_api.listdir(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(x) for x in result.stdout.splitlines()]\n result.presentation.logs['listdir'] = map(str, ret)\n return ret",
"def get_skins_and_extensions(base_dir):\n ext_paths = []\n for subdir in ['extensions', 'skins']:\n for name in os.listdir(os.path.join(base_dir, subdir)):\n if os.path.isdir(os.path.join(base_dir, subdir, name)):\n ext_paths.append(os.path.join(subdir, name))\n return ext_paths",
"def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]",
"def get_content_directories() -> List[str]:\n result:list[str] = []\n for current_path in os.listdir(\"content\"):\n if os.path.isdir(os.path.join(\"content\", current_path)):\n result.append(os.path.join(\"content\", current_path))\n return result",
"def get_filepaths(extract_dir):\n\n index = []\n labels = []\n _extract_dir = os.path.join(extract_dir, 'UCF-101')\n for folder in os.listdir(_extract_dir):\n labels.append(folder)\n folderpath = os.path.join(_extract_dir, folder)\n\n if not os.path.isdir(folderpath):\n continue\n\n for filename in os.listdir(folderpath):\n if 'avi' not in filename:\n continue\n\n if filename[0] == '.':\n continue\n\n filepath = os.path.join(folderpath, filename)\n\n if os.path.exists(filepath):\n index.append(filepath)\n else:\n print(filepath)\n return index, labels"
] | [
"0.6816371",
"0.6584394",
"0.65694135",
"0.6454687",
"0.64294946",
"0.63914907",
"0.63858587",
"0.6369066",
"0.6280144",
"0.62456554",
"0.6161519",
"0.6160253",
"0.61361057",
"0.61112374",
"0.60726917",
"0.60657465",
"0.6015738",
"0.60046023",
"0.59833866",
"0.5970308",
"0.5962931",
"0.5946842",
"0.5943449",
"0.5941118",
"0.59369385",
"0.5896389",
"0.58943665",
"0.58893466",
"0.5880047",
"0.5864546"
] | 0.6835324 | 0 |
Compose html with blogger image urls | def compose_blogger_html(args, title, posts, imgdata, online_videos):
for post in posts:
for media in post.medias:
if type(media) is PostImage:
if media.uri not in imgdata:
print('Image missing: ', media.uri)
else:
img_url, resized_url = imgdata[media.uri]
media.uri = img_url
media.resized_url = resized_url
elif type(media) is PostVideo:
if not online_videos:
print('Video missing: ', media.uri)
else:
media.iframe = online_videos[0]
del online_videos[0]
else:
assert False
return print_html(args, posts, title, '', target='blogger') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replfunc(self, match):\n url = match.group(1)\n imgformat = url.split('.')[-1]\n if url.startswith('http'):\n data = urlopen(url).read()\n elif url.startswith('data'):\n img = '<img src=\"' + url + '\" ' + match.group(2) + ' />'\n return img\n else:\n with open(url, 'rb') as f:\n data = f.read()\n\n self.log.info(\"embedding url: %s, format: %s\" % (url, imgformat))\n b64_data = base64.b64encode(data).decode(\"utf-8\")\n if imgformat == \"svg\":\n img = '<img src=\"data:image/svg+xml;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n elif imgformat == \"pdf\":\n img = '<img src=\"data:application/pdf;base64,' + \\\n b64_data + '\" ' + match.group(2) + '/>'\n else:\n img = '<img src=\"data:image/' + imgformat + \\\n ';base64,' + b64_data + '\" ' + match.group(2) + ' />'\n return img",
"def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"Campaign badge\"/>' % self.image.url\r\n return mark_safe(h)",
"def image(self, link, title, alt):\n if not link.startswith(('http://', 'https://')):\n source_dir = os.path.dirname(self.source_path)\n link = os.path.abspath(os.path.join(source_dir, link))\n return '<img src=\"%s\" title=\"%s\" alt=\"%s\" />' % (link, title, alt)",
"def create_HTML_a_img(link_url, image_url):\n img = '<img src=\"' + image_url + '\">'\n linked_image = create_HTML_a(link_url, img)\n return linked_image",
"def website_create_body(website_info):\r\n body = \"\"\r\n body += H2 + website_info.title + END_H2\r\n body += '\\n' + P + website_info.content + END_P\r\n for image in website_info.images:\r\n if isinstance(image, str):\r\n body += '<img src=\"' + image + CLASS_CENTER\r\n elif isinstance(image, Image):\r\n body += '<img src=\"' + image.name + '\" width=\"' + image.size + CLASS_CENTER\r\n else:\r\n pass\r\n return body",
"def url(self):\n\t\treturn self.base_url+\"{}/{}/{}.jpg\".format(self.template,self._escape(self.top_text),self._escape(self.bottom_text))+(\"?\"+\"&\".join([\"{}={}\".format(k,quote(self.kwargs[k])) for k in self.kwargs]) if self.kwargs else \"\")",
"def embed_images(self):\n for img in self.book.xpath(\"//img[ not(starts-with(@src, 'data:')) and @src!= '']\"):\n img_src = img.attrib[\"src\"]\n img_raw = self.get_remote_content(img_src)\n if img_raw != None:\n img_64 = base64.b64encode(img_raw)\n file_info = os.path.splitext(img_src)\n ext = file_info[1].replace(\".\", \"\")\n ext = re.sub(\"\\?.*$\", \"\" , ext)\n \n if ext == \"svg\":\n svg = html.fromstring(img_raw.decode(\"utf-8\"))\n img.clear()\n img.tag = \"svg\"\n img[:] = [svg]\n else:\n img.set(\"src\", \"data:image/{};base64,{}\".format(ext, img_64.decode(\"utf-8\")))",
"def prepare_for_blogger(args):\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n\n html = compose_blogger_html(args, title, posts, online_images, online_videos)\n\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)",
"def create_html(pic_info,sum_pic,upload_path,yun_link=('1','2')):\n save_file=pic_info+'.txt'\n content=\"\"\"\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <img src=\"%s%s\" style=\"\" title=\"%s\"/>\n </p>\n <p>\n <span style=\"color: #FF0000; font-size: 24px;\">link: \n </span>\n <a href=\"%s\" target=\"_blank\" \n style=\"font-size: 24px; text-decoration: underline;\">\n <span style=\"font-size: 24px;\">%s\n </span>\n </a> \n <span style=\"font-size: 24px;\">\n <span style=\"color: #FF0000; font-size: 24px;\">code:\n </span>\n %s\n </span>\n </p>\\n\\n\\n\\n\\n\\n\\n\\n\\n\n \"\"\"%(upload_path,sum_pic[0],sum_pic[0],upload_path,sum_pic[1],sum_pic[1],\n upload_path,sum_pic[2],sum_pic[2],upload_path,sum_pic[3],sum_pic[3],\n yun_link[0],yun_link[0],yun_link[1])\n with open(save_file, 'w') as f:\n f.write(content)\n f.close()",
"def correct_img_links(body_main_content, schema_name, list_name_image):\n for name_image in list_name_image:\n body_main_content = body_main_content.replace(\n \"src=\\\"\" + name_image + \"\\\"\",\n \"src=\\\"{% static \\\"schema_viewer/oxygen/\" + schema_name + \"/\" + name_image + \"\\\" %}\\\"\"\n )\n return body_main_content",
"def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=\"{url}\" alt=\"{alt}\">'.format(\n url=banner_url,\n alt='Banner'\n )",
"def image(self, src, title, text):\n src = escape_link(src)\n text = escape(text, quote=True)\n if title:\n title = escape(title, quote=True)\n html = '<img src=\"%s\" alt=\"%s\" title=\"%s\"' % (src, text, title)\n else:\n html = '<img src=\"%s\" alt=\"%s\"' % (src, text)\n if self.options.get('use_xhtml'):\n return '%s />' % html\n return '%s>' % html",
"def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_resized_url, self.title)\r\n return mark_safe(h)",
"def getNewsIconURL(newsBrain):",
"def embed_images(self, html):\n if not self.SUPPORT_EMBED_IMAGES:\n raise RuntimeError('%r does not support embed_images' % type(self))\n\n return self.RE_IMG.sub(self._embed_image, html)",
"def get_image(result):\n article_id = result['id']\n id_ = article_id[14:]\n href = article_id[:14]\n\n #FIXME: not working\n image_url = \"http://www.jpress.nli.org.il/Olive/APA/NLI_heb/get/GetImage.ashx?kind=block&href=%s&id=%s&ext=.png\" %(href, id_)\n \n return image_url",
"def get_image_url():",
"def thumbnail_generator():\n website_url = json.loads(request.data.decode())['url']\n try:\n webpage, message = url_preview.send_request(website_url)\n if webpage is not None:\n #Construct the soup object\n soup_object = url_preview.get_soup_object(webpage)\n #Get the title of the artcile\n title = url_preview.get_title(soup_object)\n #Get the website of the article\n website_name = url_preview.get_url(soup_object).rsplit(\".\", 1)[0]\n if website_name is None:\n website_name = website_url.split(\"//\", 1)[1].split(\"/\", 1)[0].rsplit(\".\", 1)[0]\n\n #Get the description of the article\n description = url_preview.get_description(soup_object)\n\n #Get the published date and time of the article\n date_time = url_preview.get_date_time(website_url)\n\n #Get the link to the preview image\n image_url = url_preview.get_preview_image(soup_object)['content']\n\n #Get the link to the favicon\n favicon_url = url_preview. get_favicon(soup_object)\n\n return render_template(\n \"success.html\",\n urlx=website_url,\n title=title,\n site_name=website_name,\n description=description,\n date_time=date_time,\n preview_image=image_url,\n favicon=favicon_url\n )\n except Exception as exp:\n return render_template('error.html', msg=str(exp))",
"def get_images(self,soup,Images):\n \n img=soup.find_all('a',href=re.compile(\"/photo.php?fbid=\"))\n img1=soup.find_all('a',href=re.compile(\"/photo\"))\n m=' '\n if img !=[]:\n img_href='https://www.facebook.com'+img[0]['href']\n m+=img_href+'\\n'\n \n elif img1 !=[]:\n img_href='https://www.facebook.com'+img1[0]['href']\n m+=img_href+'\\n'\n \n else:\n img=soup.find_all('a',href=re.compile(\"pcb\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n' \n \n \n else:\n img=soup.find_all('a',href=re.compile(\"photos\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n'\n \n Images.append(m)\n \n return Images",
"def hook_image_tag(self, parser, space, name):\n link = name\n caption = name\n params = {}\n\n # Parse the inner syntax, e.g. [[Image:src|option=val|caption]]\n separator = name.find('|')\n items = []\n if separator != -1:\n items = link.split('|')\n link = items[0]\n # If the last item contains '=', it's not a caption\n if items[-1].find('=') == -1:\n caption = items[-1]\n items = items[1:-1]\n else:\n caption = link\n items = items[1:]\n\n # parse the relevant items\n params = self._buildImageParams(items)\n img_path = self._getImagePath(link)\n\n template = jingo.env.get_template('wikiparser/hook_image.html')\n r_kwargs = {'img_path': img_path, 'caption': caption, 'params': params}\n return template.render(**r_kwargs)",
"def format_post_background(post: dict) -> str:\n parsed = urllib.parse.urlparse(post['url'])\n logger.debug(f\"Sending post: {post['name']}\")\n if 'i.redd.it' in post['url']:\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <img class=\"\" style=\"width: 100%;\" src=\"{post['url']}\">\n </div>\n \"\"\"\n elif 'v.redd.it' in post['url']:\n if post['media'] is not None:\n return \"\"\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <video style=\"width: 100%;\" data-dashjs-player autoplay src=\"{post['media']['reddit_video']['dash_url']}\" controls></video>\n </div>\n \"\"\"\n else:\n logger.error(f\"Error no media for v.redd.it link: {post['url']}\")\n return \"\"\n elif 'imgur' in post['url'] and ('gif' in post['url'] or 'mp4' in post['url']):\n return \"\"\n imgur_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <video controls poster=\"//i.imgur.com/{imgur_id}.jpg\" preload=\"auto\" autoplay=\"autoplay\" muted=\"muted\" loop=\"loop\" webkit-playsinline=\"\" style=\"width: 100%; height: 100%;\">\n <source src=\"//i.imgur.com/{imgur_id}.mp4\" type=\"video/mp4\">\n </video>\n </div>\n \"\"\"\n elif 'imgur' in post['url']:\n imgur_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <img class=\"\" style=\"width: 100%;\" src=\"//i.imgur.com/{imgur_id}.jpg\">\n </div>\n \"\"\"\n elif 'redgif' in post['url']:\n return \"\"\n redgifs_id = parsed.path.split('.')[0].split('/')[-1]\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <div style='position:relative; padding-bottom:88.67%;'>\n <iframe src='https://redgifs.com/ifr/{redgifs_id}' frameborder='0' scrolling='no' width='100%' height='100%' style='position:absolute;top:0;left:0;' allowfullscreen></iframe>\n </div>\n </div>\n \"\"\"\n elif 'gfycat' in post['url']:\n return \"\"\n else:\n thumbnail = post.get('thumbnail', '/favicon.ico')\n thumbnail = thumbnail if thumbnail != '' else '/favicon.ico'\n return f\"\"\"\n <div id=\"{post['name']}\" class=\"col-sm-3 col-lg-3 col-xxl-3\">\n <a href=\"{post['url']}\"><img class=\"\" style=\"width: 100%;\" src=\"{thumbnail}\">\n </div>\n \"\"\"\n return \"\"",
"def img(self, obj):\n return mark_safe(\n f\"<img src='/{obj.image.url}' width='{obj.image.width}' height='{obj.image.height}' />\"\n )",
"def show_me():\n # Scumbag thumbnail code\n try:\n from PIL import Image\n except ImportError:\n pass\n else:\n filename = os.path.join(app.static_folder, 'img', 'badumtss.png')\n image = Image.open(filename)\n\n return render_template('show_me.html')",
"async def img(ctx, message):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: message\"\"\"\n \"\"\"return image url\"\"\"\n link_list = []\n\n url = \"http://imgur.com/search?q=\" + message\n response = urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"lxml\")\n for a in soup.find_all('a', href=True):\n if((a['href'][0:9]) == \"/gallery/\"):\n link_list.append(\"https://imgur.com/\" + a['href'])\n if(len(link_list) >=1):\n random_num = random.randint(0, len(link_list) - 1)\n await bot.say(link_list[random_num])\n else:\n await bot.say(\"there is no contente for \"+message)",
"def get_content(self):\n\n self.content = self.book.get_template('cover')\n\n tree = parse_string(super(EpubCoverHtml, self).get_content())\n tree_root = tree.getroot()\n\n images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})\n\n images[0].set('src', self.image_name)\n images[0].set('alt', self.title)\n\n tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)\n\n return tree_str",
"def parse_template(data, template):\n img_html = \"\"\"<div class=\"thumb-wrap\"><div class=\"thumb-holder\"></div><a href=\"{{URL}}\" target=\"_top\"><div class=\"thumb-img\" style=\"background-image:url('{{IMG}}');\"></div></a></div>\"\"\"\n template = template.replace('{{URL}}', data['link'].replace('http:','https:'))\n template = template.replace('{{URLX}}', data['link'])\n template = template.replace('{{TITLE}}', data['title'])\n #template = template.replace('{{BLURB}}', data['summary'])\n img_html = img_html.replace('{{URL}}', data['link'].replace('http:','https:'))\n if hasattr(data, 'tags') and len(data['tags']) > 0:\n template = template.replace('{{SECTION}}', data['tags'][0]['term'])\n else:\n template = template.replace('<h2><a href=\"{{URL}}\" target=\"_top\">{{SECTION}}</a></h2>', '')\n if hasattr(data, 'media_content') and len(data['media_content']) > 0:\n template = template.replace('{{IMG}}', '%s?w=150' % data['media_content'][0]['url'].replace('http:','https:'))\n else:\n template = template.replace(img_html, '')\n\n return template",
"def index():\n\n return \"\"\"\n <div>\n <h1> Image Captioning REST API </h1>\n <h3> The following API end points are valid </h3>\n <ul>\n <h4> Inception V3 </h4>\n <li> <code>/inception/v3/ping </code> - <br/>\n <b> Description : </b> checks availability of the service. returns \"pong\" with status 200 when it is available\n </li>\n <li> <code>/inception/v3/caption/image</code> - <br/>\n <table>\n <tr><th align=\"left\"> Description </th><td> This is a service that can caption images</td></tr>\n <tr><th align=\"left\"> How to supply Image Content </th></tr>\n <tr><th align=\"left\"> With HTTP GET : </th> <td>\n Include a query parameter <code>url </code> which is an http url of JPEG image <br/>\n Example: <code> curl \"localhost:8764/inception/v3/caption/image?url=http://xyz.com/example.jpg\"</code>\n </td></tr>\n <tr><th align=\"left\"> With HTTP POST :</th><td>\n POST JPEG image content as binary data in request body. <br/>\n Example: <code> curl -X POST \"localhost:8764/inception/v3/caption/image\" --data-binary @example.jpg </code>\n </td></tr>\n </table>\n </li>\n <ul>\n </div>\n \"\"\"",
"def get_thumbnail_url():",
"def get_image_link():\n image_links = set()\n supplemented_keyword = urllib.parse.quote(\n supplemented_keywords[random.randint(0,\n len(supplemented_keywords) - 1)],\n safe='')\n main_keyword = urllib.parse.quote(\n main_keywords[random.randint(0,\n len(main_keywords) - 1)], safe='')\n\n # print('the theme of cats: ' + supplemented_keyword)\n\n search_query = (main_keyword + ' ' + supplemented_keyword).replace(\n ' ', '%20')\n url = 'https://www.google.com/search?q=' + \\\n search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n while 'https://' not in image_link or r'\\\\u' in image_link or '.jpg' not in image_link:\n image_link = list(image_links)[random.randint(0, len(image_links) - 1)]\n # print('link:' + image_link)\n\n return image_link",
"def prettyformat(self):\n \n import re\n\n html = self.get_htmlsrc()\n if type(html) == type([]):\n html = html[0]\n if type(html) != type(\"\"):\n try:\n html = str(html)\n except:\n html = html.__str__()\n \n tmp = BeautifulSoup(html)\n base = self.target_baseurl()\n# aitems = tmp.findAll(\"a\",href=re.compile(\"^\\/\"))\n aitems = tmp.findAll(\"a\",href=re.compile(\"^[^hH]\"))\n for i in aitems:\n u = i['href']\n if u[0] != '/':\n i['href'] = base + '/' + u\n else: \n i['href'] = base + u\n# imgitems = tmp.findAll(\"img\",src=re.compile(\"^\\/\"))\n imgitems = tmp.findAll(\"img\",src=re.compile(\"^[^hH]\"))\n for j in imgitems:\n v = j['src']\n if v[0] != '/':\n j['src'] = base + '/' + v\n else: \n j['src'] = base + v\n return tmp"
] | [
"0.65580297",
"0.6551254",
"0.65510756",
"0.6550227",
"0.6437125",
"0.6404325",
"0.6345454",
"0.63356936",
"0.6310244",
"0.6258961",
"0.6221798",
"0.6157185",
"0.6137797",
"0.60765594",
"0.606654",
"0.6046435",
"0.60122216",
"0.6008134",
"0.60027015",
"0.60017127",
"0.5978731",
"0.5950172",
"0.5888022",
"0.5865719",
"0.5858332",
"0.58505434",
"0.5821355",
"0.58195585",
"0.5818279",
"0.58034253"
] | 0.74483454 | 0 |
Export blogger html to clipboard. If full, export complete html, otherwise export html extract ready to paste into blogger edit mode. | def prepare_for_blogger(args):
title, posts = parse_markdown(os.path.join(args.root, 'index.md'))
online_images, online_videos = online_images_url(args)
if args.check_images and check_images(args, posts, online_images) is False:
pass
html = compose_blogger_html(args, title, posts, online_images, online_videos)
if args.full is False:
html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)
html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)
html = STYLE.replace('%%', '%') + html
if args.dest:
with open(args.dest, 'wt', encoding='utf-8') as f:
f.write(html)
else:
clipboard.copy(html) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_backup(filename, html):\n\n with open(filename, 'wb') as f:\n f.write(html)",
"def write_to_paste_buffer(txt):\n pyperclip.copy(txt)",
"def clipboard(self, text = None):\n if text == None:\n response = self._fetch_json('/api/clipboard')\n return response['content']\n else:\n postdata = codecs.encode(json.dumps({ 'content': text }), 'utf-8')\n self._urlopen('/api/clipboard', postdata).read()",
"def _get_paste_page_content(self, url):\n paste_raw_url = self._get_post_url(url)\n paste_content = self._make_request(paste_raw_url, to_json=False)\n return paste_content.text",
"def publish_html(self, readyhtml):\n with open(self.outfile,'w') as f_out:\n f_out.writelines(readyhtml)",
"def copy(to_end=False):\n # Find a way to generalize this for different systems\n if to_end:\n with open('/Users/john/Terminal Saved Output', 'r') as f:\n output = f.read().replace('bpython', 'Python')\n code = output.split('\\nPython')[-1]\n else:\n code = pyperclip.paste()\n pyperclip.copy(parse_code(code))\n return None",
"def save_clipboard(dist):\n tmpimg = ImageGrab.grabclipboard()\n if tmpimg:\n tmpimg.save(dist, 'PNG', compress_level=9)\n return dist\n return ''",
"def do_copy_button( self, event ):\n #rint( \" do_copy_button -- this is all \")\n data = self.msg_text.get( 1.0, Tk.END )\n pyperclip.copy( data )",
"def run_paste(self, expanded, unexpanded) :\n\t\tif expanded :\n\t\t\treturn self.errormessage(\"Doesn't need any argument\")\n\t\tif not self.HasPerms(self.__context, 'View management screens') :\n\t\t\treturn -1\n\t\tif not hasattr(self, '_clipboard') :\n\t\t\treturn self.errormessage(\"Clipboard is empty\")\n\t\ttry :\n\t\t\tself.__context.manage_pasteObjects(cb_copy_data = self._clipboard)\n\t\t\tself.htmlmessage(\"Clipboard's content pasted into %s\" % self.getcwd())\n\t\texcept CopyError :\n\t\t\treturn self.errormessage(\"Impossible to paste clipboard's content into %s\" % self.getcwd())",
"def scrape_paste(self,paste_id):\n parameter = {'i': paste_id}\n r = requests.get('https://scrape.pastebin.com/api_scrape_item.php',params=parameter)\n return r.text",
"def exportHtmlFile(self):\n\n fileName = QtGui.QFileDialog.getSaveFileName(None,\"Save html file\", os.getenv('HOME'))\n if fileName:\n fileName += \".html\"\n #print ((\"Exporting: to \" + fileName))\n filedata = \"<html>\\n<head>\\n<title>\" + self.settings['projectName'] + \"</title>\\n</head>\\n<body>\\n\"\n #filedata += str(self.htmlResults.encode('utf-8'))\n modData = \"\"\n for c in self.htmlResults:\n if ord(c) < 128:\n modData += c\n else:\n modData += \"&#\" + str(ord(c)) + \";\"\n filedata += modData\n filedata += \"</body>\\n</html>\"\n f = open(fileName, 'w')\n f.write(filedata)\n f.close()\n self.log += \"Search Results exported to \" + fileName + \"\\n\"\n QtGui.QMessageBox.information(None, \"Html file Export\", str(fileName) + \" exported\")",
"def wonder():\n copy()\n get_soup()\n get_text()\n change_write_text()\n Check_status_time_stamp()",
"def clip_copy(num):\n if g.browse_mode == \"ytpl\":\n\n p = g.ytpls[int(num) - 1]\n link = \"https://youtube.com/playlist?list=%s\" % p['link']\n\n elif g.browse_mode == \"normal\":\n item = (g.model.songs[int(num) - 1])\n link = \"https://youtube.com/watch?v=%s\" % item.ytid\n\n else:\n g.message = \"clipboard copy not valid in this mode\"\n g.content = generate_songlist_display()\n return\n\n if has_pyperclip:\n\n try:\n pyperclip.copy(link)\n g.message = c.y + link + c.w + \" copied\"\n g.content = generate_songlist_display()\n\n except Exception as e:\n xprint(link)\n xprint(\"Error - couldn't copy to clipboard.\")\n xprint(e.__doc__)\n xprint(\"\")\n input(\"Press Enter to continue.\")\n g.content = generate_songlist_display()\n\n else:\n g.message = \"pyperclip module must be installed for clipboard support\\n\"\n g.message += \"see https://pypi.python.org/pypi/pyperclip/\"\n g.content = generate_songlist_display()",
"def clip(save, edit, browser, overwrite):\n html = clipboard.get_clipboard_html()\n if html is None:\n click.echo('No html in the clipboard')\n return\n\n if save is None:\n content = html2md.html_to_markdown(html).strip()\n click.echo(content)\n return\n\n if not save.endswith('.md'):\n click.echo('Note must have extension \".md\"')\n return\n\n note = Note(save)\n if os.path.exists(note.path.abs) and not overwrite:\n click.echo('Note already exists at \"{}\" (specify `--overwrite` to overwrite)'.format(note.path.abs))\n return\n\n html = parsers.rewrite_external_images(html, note)\n content = html2md.html_to_markdown(html).strip()\n note.write(content)\n\n if browser:\n click.launch('http://localhost:{0}/{1}'.format(conf.PORT, note.path.rel))\n\n if edit:\n click.edit(filename=note.path.abs)",
"def direct_save():\n c = ClipboardMemo()\n c.save()",
"def copy_to_clipboard(input):\n #\n # Define Tk Window and Prevent from Showing\n #\n root = tk.Tk()\n root.withdraw()\n #\n # Clear Clipboard and Append Text\n #\n root.clipboard_clear()\n root.clipboard_append(input)",
"def exportBookmarksHtml(self, filePath=''):\n if not filePath:\n filePath = self.getFileName(_('TreeLine - Export HTML Bookmarks'),\n 'html')\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n addBranches = ExportDialog.exportWhat != ExportDialog.selectNode\n title = _bookmarkTitle\n if len(self.selectedNodes) == 1 and addBranches:\n title = self.selectedNodes[0].title()\n self.selectedNodes = self.selectedNodes[0].childList\n lines = ['<!DOCTYPE NETSCAPE-Bookmark-file-1>',\n '<meta http-equiv=\"Content-Type\" content=\"text/html; '\n 'charset=utf-8\">', '<title>{0}</title>'.format(title),\n '<h1>{0}</h1>'.format(title)]\n for node in self.selectedNodes:\n lines.extend(node.exportHtmlBookmarks(addBranches))\n with open(filePath, 'w', encoding='utf-8') as f:\n f.writelines([(line + '\\n') for line in lines])\n return True",
"def copyToClipboard(copy_str):\n\tcopier = Tk()\n\t# keep the window from showing\n\tcopier.withdraw()\n\tcopier.clipboard_clear()\n\t# text saved to clipboard\n\tcopier.clipboard_append(copy_str)\n\tcopier.destroy()",
"def from_clipboard(self):\n for url in QApplication.clipboard().mimeData().urls():\n src = url.path()\n dst = os.path.join(self.current_location(), os.path.basename(src))\n try:\n if os.path.islink(src) or os.path.isfile(src):\n copyfile(src, dst, overwrite=False)\n elif os.path.isdir(src):\n copytree(src, dst, overwrite=False)\n except:\n QMessageBox.critical(self, 'Error copying file/dir', traceback.format_exc())",
"def _on_articles_copy_link(self, evt=None):\n \n # get selected articles\n articles = self._articles_view.GetSelectedArticles()\n if not articles:\n return\n \n # format links\n text = \"\"\n for article in articles:\n if article.doi:\n text += \"http://dx.doi.org/%s\\n\" % article.doi\n \n # make text object for data\n obj = wx.TextDataObject()\n obj.SetText(text.strip())\n \n # paste to clipboard\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(obj)\n wx.TheClipboard.Close()",
"async def paste(text: str) -> str:\n\n async with aiohttp.ClientSession() as aioclient:\n post = await aioclient.post(\"https://hastebin.com/documents\", data=text)\n if post.status == 200:\n response = await post.text()\n return f\"https://hastebin.com/{response[8:-2]}\"\n\n # Fallback bin\n post = await aioclient.post(\"https://bin.drlazor.be\", data={\"val\": text})\n if post.status == 200:\n return post.url",
"def convert_html():\n return",
"def exportHtmlSingle(self, filePath=''):\n if not filePath:\n filePath = self.getFileName(_('TreeLine - Export HTML'), 'html')\n if not filePath:\n return False\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n if ExportDialog.exportWhat == ExportDialog.entireTree:\n self.selectedNodes = [self.rootNode]\n outputGroup = treeoutput.OutputGroup(self.selectedNodes,\n ExportDialog.includeRoot,\n ExportDialog.exportWhat !=\n ExportDialog.selectNode,\n ExportDialog.openOnly, True)\n outputGroup.addBlanksBetween()\n outputGroup.addIndents()\n outputGroup.addSiblingPrefixes()\n outGroups = outputGroup.splitColumns(ExportDialog.numColumns)\n htmlTitle = os.path.splitext(os.path.basename(filePath))[0]\n indent = globalref.genOptions.getValue('IndentOffset')\n lines = ['<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 '\n 'Transitional//EN\">', '<html>', '<head>',\n '<meta http-equiv=\"Content-Type\" content=\"text/html; '\n 'charset=utf-8\">', '<title>{0}</title>'.format(htmlTitle),\n '<style type=\"text/css\"><!--',\n 'div {{margin-left: {0}em}}'.format(indent),\n 'td {padding: 10px}', 'tr {vertical-align: top}',\n '--></style>', '</head>', '<body>']\n if ExportDialog.addHeader:\n headerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(True))\n if headerText:\n lines.append(headerText)\n lines.extend(['<table>', '<tr><td>'])\n lines.extend(outGroups[0].getLines())\n for group in outGroups[1:]:\n lines.append('</td><td>')\n lines.extend(group.getLines())\n lines.extend(['</td></tr>', '</table>'])\n if ExportDialog.addHeader:\n footerText = (globalref.mainControl.activeControl.printData.\n formatHeaderFooter(False))\n if footerText:\n lines.append(footerText)\n lines.extend(['</body>', '</html>'])\n with open(filePath, 'w', encoding='utf-8') as f:\n f.writelines([(line + '\\n') for line in lines])\n return True",
"def write_html(self, filename):\n # todo: allow writing in split mode\n html = self.to_html()\n open(filename, 'wt').write(html)\n print('Exported app to %r' % filename)",
"def print_contents(browser, dest='~/.browser.html'):\n import os\n open(os.path.expanduser(dest), 'w').write(browser.contents)",
"def _get_pastes_page_content(self):\n url = self._get_post_list_url()\n content = self._make_request(url, to_json=False)\n return content.text",
"def save(self, filename):\n outfile = open(filename, \"w\")\n outfile.write(self.html.encode('utf8'))\n outfile.close()",
"def paste(self, text):\n if self.file is None:\n return self.paste_to_stdout(text)\n return self.paste_to_file(text)",
"def __editPaste(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").paste()\n else:\n self.activeWindow().paste()",
"def write(self,out):\n with open( out, \"wb\") as fi:\n fi.write(html.tostring(self.book))"
] | [
"0.5765858",
"0.56113607",
"0.55505186",
"0.55185163",
"0.54795206",
"0.5450024",
"0.5427715",
"0.54080874",
"0.5321129",
"0.5313652",
"0.52682567",
"0.52502394",
"0.52400166",
"0.5201354",
"0.5193638",
"0.51862955",
"0.51706195",
"0.51384276",
"0.5110709",
"0.51039314",
"0.50777346",
"0.50516135",
"0.5049289",
"0.5045748",
"0.50399256",
"0.5038537",
"0.50320333",
"0.50315464",
"0.50192195",
"0.5017713"
] | 0.6119939 | 0 |
Made after reading config file. Check for ffmpeg in path. Create .thumbnails dir if necessary and create .nomedia in it. Copy photobox file to destination dir. Handle priority between command line and config file. | def setup_part2(args):
if args.update:
args.sourcedir = args.source.sourcedir
args.bydir = args.source.bydir
args.bydate = args.source.bydate
args.diary = args.source.diary
args.recursive = args.source.recursive
args.dates = args.source.dates
args.github_pages = args.source.github_pages
elif args.gallery:
args.source.sourcedir = args.sourcedir
args.source.bydir = args.bydir
args.source.bydate = args.bydate
args.source.diary = args.diary
args.source.recursive = args.recursive
args.source.dates = args.dates
args.source.github_pages = args.github_pages
update_config(args)
if args.github_pages:
args.html_suffix = '.html'
else:
args.html_suffix = '.htm'
rootext = os.path.splitext(args.rootarg)[1]
if rootext:
args.rootname = os.path.basename(args.rootarg)
else:
args.rootname = 'index' + args.html_suffix
if args.sourcedir:
args.sourcedir = os.path.abspath(args.sourcedir)
if os.path.splitdrive(args.sourcedir)[0]:
drive, rest = os.path.splitdrive(args.sourcedir)
args.sourcedir = drive.upper() + rest
if not os.path.isdir(args.sourcedir):
error('Directory not found', args.sourcedir)
else:
if args.gallery and args.diary is False and args.update is None:
error('Directory not found', 'Use --sourcedir')
if args.dest:
args.dest = os.path.abspath(args.dest)
if args.dest is None:
args.dest = args.root
if args.blogger and args.urlblogger is None:
error('No blogger url (--url)')
if args.gallery or args.update:
# check for ffmpeg and ffprobe in path
for exe in ('ffmpeg', 'ffprobe'):
try:
check_output([exe, '-version'])
except FileNotFoundError:
error('File not found', exe)
if args.github_pages:
args.thumbrep = 'thumbnails'
else:
args.thumbrep = '.thumbnails'
args.thumbdir = os.path.join(args.dest, args.thumbrep)
if not os.path.exists(args.thumbdir):
os.mkdir(args.thumbdir)
open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()
favicondst = os.path.join(args.dest, 'favicon.ico')
if not os.path.isfile(favicondst):
faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')
shutil.copyfile(faviconsrc, favicondst)
photoboxdir = os.path.join(args.dest, 'photobox')
if not os.path.exists(photoboxdir):
photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')
shutil.copytree(photoboxsrc, photoboxdir)
if args.dates:
if not(args.gallery or args.create):
# silently ignored for the moment, otherwise all other commands will
# launch a wanrning or an error on the default --dates value
pass
if args.dates == 'source':
pass
elif args.dates == 'diary':
if args.create:
error('Incorrect date format', args.dates)
elif re.match(r'\d+-\d+', args.dates):
date1, date2 = args.dates.split('-')
if validate_date(date1) and validate_date(date2):
args.dates = date1, date2
else:
error('Incorrect date format', args.dates)
else:
error('Incorrect date format', args.dates) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def take_one_shot(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))",
"def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")",
"def thumbnail(self, fnameIn, fnameOut):\n cmd = \"convert -define jpeg:size=500x150 \"\n cmd += '\"%s\" ' % os.path.join(self.downloadFolder, fnameIn)\n cmd += \"-auto-orient -thumbnail 250x150 \"\n cmd += '\"%s\" ' % os.path.join(self.thumbnailFolder, fnameOut)\n self.log(\"creating thumbnail ...\")\n self.log(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()",
"def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))",
"def generate_thumbnail():\n import tempfile\n import glob\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n\n if not v:\n return\n\n # do not generate a thumbnail from a Repr\n if '@' in v.take_name:\n return\n\n task = v.task\n project = task.project\n # repo = project.repository\n imf = project.image_format\n width = int(imf.width * 0.5)\n height = int(imf.height * 0.5)\n\n temp_output = tempfile.mktemp()\n\n current_frame = pm.currentTime(q=1)\n output_file = pm.playblast(\n fmt='image',\n startTime=current_frame,\n endTime=current_frame,\n sequenceTime=1,\n forceOverwrite=1,\n filename=temp_output,\n clearCache=1,\n showOrnaments=1,\n percent=100,\n wh=(width, height),\n offScreen=1,\n viewer=0,\n compression='PNG',\n quality=70,\n framePadding=0\n )\n pm.currentTime(current_frame)\n\n output_file = output_file.replace('####', '*')\n found_output_file = glob.glob(output_file)\n if found_output_file:\n output_file = found_output_file[0]\n\n from anima.ui import utils\n utils.upload_thumbnail(task, output_file)\n\n return found_output_file",
"def main(base_dir: str, output_dir: str) -> None:\n base_path = pathlib.Path(base_dir)\n output_path = pathlib.Path(output_dir).expanduser()\n\n stage_copy_images(base_path, output_path)\n stage_extract_videos(base_path, output_path)",
"def copy_file_to_server():\r\n utils.system_output('mv /home/chronos/user/Downloads/* /usr/local/autotest/results/default/',ignore_status=True)\r\n logging.info(\"Video Copied to Log location\")",
"def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()",
"def copyMedia(source, target):\n if not os.path.exists(target):\n print(\"copying source,target:\", source, target)\n shutil.copy2(source, target)",
"def prep(path,date,image):\n \n # run bash code with 'Popen'\n P = Popen('cp '+path+date+'/final/'+image+' ./', shell=True)\n P.wait()\n P = Popen('mv '+image+' '+image+'.fz', shell=True)\n P.wait()\n P = Popen('funpack *.fz', shell=True)\n P.wait()\n P = Popen('rm -rf *.fz', shell=True)\n P.wait()",
"def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()",
"def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)",
"def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'",
"def createThumbnail(self, useCursorPosition=False, dbPath = None, versionInt = None):\n\n return \"\"\n # logger.debug(\"Func: createThumbnail\")\n # projectPath = self.projectDir\n # if useCursorPosition:\n # versionInt = self.currentVersionIndex\n # dbPath = self.currentDatabasePath\n # else:\n # if not dbPath or not versionInt:\n # msg = \"Both dbPath and version must be defined if useCursorPosition=False\"\n # raise Exception ([360, msg])\n #\n # versionStr = \"v%s\" % (str(versionInt).zfill(3))\n # dbDir, shotNameWithExt = os.path.split(dbPath)\n # shotName = os.path.splitext(shotNameWithExt)[0]\n #\n # thumbPath = \"{0}_{1}_thumb.jpg\".format(os.path.join(dbDir, shotName), versionStr)\n # relThumbPath = os.path.relpath(thumbPath, projectPath)\n #\n # # create a thumbnail using playblast\n # thumbDir = os.path.split(thumbPath)[0]\n # if os.path.exists(thumbDir):\n # # frame = pm.currentTime(query=True)\n # frame = cmds.currentTime(query=True)\n # # store = pm.getAttr(\"defaultRenderGlobals.imageFormat\")\n # store = cmds.getAttr(\"defaultRenderGlobals.imageFormat\")\n # # pm.setAttr(\"defaultRenderGlobals.imageFormat\", 8) # This is the value for jpeg\n # cmds.setAttr(\"defaultRenderGlobals.imageFormat\", 8) # This is the value for jpeg\n # # pm.playblast(completeFilename=thumbPath, forceOverwrite=True, format='image', width=221, height=124, showOrnaments=False, frame=[frame], viewer=False, percent=100)\n # cmds.playblast(completeFilename=thumbPath, forceOverwrite=True, format='image', width=221, height=124, showOrnaments=False, frame=[frame], viewer=False, percent=100)\n # # pm.setAttr(\"defaultRenderGlobals.imageFormat\", store) #take it back\n # cmds.setAttr(\"defaultRenderGlobals.imageFormat\", store) #take it back\n # else:\n # # pm.warning(\"something went wrong with thumbnail. Skipping thumbnail\")\n # cmds.warning(\"something went wrong with thumbnail. Skipping thumbnail\")\n # return \"\"\n # # return thumbPath\n # return relThumbPath",
"def write_thumbnails(self, appstruct):\n slugser = slugify(appstruct[\"serial\"])\n pdf_filename = \"thumbnails/%s/uploaded.pdf\" % slugser\n top_file = \"thumbnails/%s/top.png\" % slugser\n mos_file = \"thumbnails/%s/mosaic.png\" % slugser\n \n thumg = ThumbnailGenerator(pdf_filename)\n self.save_blob(thumg.top_thumbnail(), top_file)\n self.save_blob(thumg.mosaic_thumbnail(), mos_file)",
"def main():\n # load parameters\n params = load_params()\n # check that both profiles exist (we assume that this means the directories\n # are found on disk)\n if not os.path.exists(params['profile1']):\n emsg = 'profile1={0} does not exist'\n eargs = [params['profile1']]\n raise AperoCopyError(emsg.format(*eargs))\n if not os.path.exists(params['profile2']):\n emsg = 'profile2={0} does not exist'\n eargs = [params['profile2']]\n raise AperoCopyError(emsg.format(*eargs))\n # get a list of files from profile 1 for each block kinds\n files1, paths1 = get_files_profile1(params)\n # get the output files for profile 2 for each block kind\n files2, files3, paths2, paths3 = get_files_profile2(params, files1, paths1)\n # copy files from profile 1 to profile 2 for each block kind\n # must copy files to a temporary path first (copying can be slow)\n copy_files(params, files1, files2, files3)\n # may need to update profile 2 (via git) to match profile 1\n update_git_profile2(params)\n # remove all old files from profile 2 blocks\n success = reset_profile2(params, paths3)\n # rename the directories in profile 2 (this is quicker than copying)\n if success:\n success = rename_directories(params, paths2, paths3)\n # update databases for profile 2\n if success:\n update_databases_profile2(params)\n # return to __main__\n return",
"def copy_support_files() -> None:\n # root folder files\n filelist = {\"favicon128.png\",\n \"favicon96.png\",\n \"favicon72.png\",\n \"favicon48.png\",\n \"favicon32.png\",\n \"favicon24.png\",\n \"favicon16.png\",\n \"favicon.ico\",\n \"apple-touch-icon.png\",\n \"apple-touch-icon-precomposed.png\",\n \"apple-touch-icon-72x72.png\",\n \"apple-touch-icon-72x72-precomposed.png\",\n \"apple-touch-icon-114x114.png\",\n \"apple-touch-icon-114x114-precomposed.png\",\n \"apple-touch-icon-144x144.png\",\n \"apple-touch-icon-144x144-precomposed.png\",\n \"uca_style.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/\" + filename, WEBOUT_PATH)\n except FileNotFoundError:\n report_error(\"Missing file: resources/\" + filename)\n # image folder files\n filelist = {\"film.png\",\n \"stylifera75.png\",\n \"DOI_logo.svg\",\n \"size_hist.png\",\n \"size_ind.png\",\n \"size_mean.png\",\n \"size_range.png\",\n \"size_summary.png\",\n \"double_clawed.jpg\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/images/\" + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/images/\" + filename)\n filelist = {\"specific_word_cloud.png\",\n \"binomial_word_cloud.png\"}\n for filename in filelist:\n try:\n shutil.copy2(TMP_PATH + filename, WEBOUT_PATH + \"images/\")\n except FileNotFoundError:\n report_error(\"Missing file: \" + TMP_PATH + filename)\n # font-awesome files\n filelist = {\"fontawesome.min.js\",\n \"brands.min.js\",\n \"regular.min.js\",\n \"solid.min.js\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/font-awesome/js/\" + filename, WEBOUT_PATH + \"js/\")\n except FileNotFoundError:\n report_error(\"Missing file: resources/font-awesome/js/\" + TMP_PATH + filename)\n # flag-icon files\n filelist = {\"flag-icons.min.css\"}\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/css/\" + filename, WEBOUT_PATH + \"images/flag-icon-css/css/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/css/\" + TMP_PATH + filename)\n filelist = {\"de.svg\", # Germany\n \"es.svg\", # Spain\n \"ru.svg\", # Russia\n \"fr.svg\", # France\n \"pt.svg\", # Portugal\n \"dk.svg\", # Denmark\n \"nl.svg\", # Netherlands\n \"jp.svg\", # Japan\n \"cn.svg\", # China\n \"us.svg\", # USA\n \"th.svg\", # Thailand\n \"va.svg\", # Vatican\n \"it.svg\", # Italy\n \"kr.svg\", # South Korea\n \"pl.svg\", # Poland\n \"mm.svg\", # Myanamar (Burma)\n \"sa.svg\", # Saudi Arabia (best option for Arabic of those available)\n \"id.svg\", # Indonesia\n \"za.svg\", # South Africa (best option for Afrikaans)\n \"my.svg\", # Malaysia (for Malay)\n \"mg.svg\", # Madagascar (for Malagasy)\n \"ir.svg\", # Iran (for Persian)\n \"vn.svg\"} # Vietnam\n for filename in filelist:\n try:\n shutil.copy2(\"resources/flag-icon-css/flags/4x3/\" + filename, WEBOUT_PATH +\n \"images/flag-icon-css/flags/4x3/\")\n except FileNotFoundError:\n report_error(\"Missing file: images/flag-icon-css/flags/4x3/\" + TMP_PATH + filename)",
"def post_process_hls(d):\r\n\r\n log('post_process_hls()> start processing', d.name)\r\n\r\n local_video_m3u8_file = os.path.join(d.temp_folder, 'local_video.m3u8')\r\n local_audio_m3u8_file = os.path.join(d.temp_folder, 'local_audio.m3u8')\r\n\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_video_m3u8_file}\" -c copy \"file:{d.temp_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n # retry without \"-c copy\" parameter, takes longer time\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_video_m3u8_file}\" \"file:{d.temp_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n log('post_process_hls()> ffmpeg failed:', output)\r\n return False\r\n\r\n if 'dash' in d.subtype_list:\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_audio_m3u8_file}\" -c copy \"file:{d.audio_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n # retry without \"-c copy\" parameter, takes longer time\r\n cmd = f'\"{config.ffmpeg_actual_path}\" -loglevel error -stats -y -protocol_whitelist \"file,http,https,tcp,tls,crypto\" ' \\\r\n f'-allowed_extensions ALL -i \"{local_audio_m3u8_file}\" \"file:{d.audio_file}\"'\r\n error, output = run_command(cmd, d=d)\r\n\r\n if error:\r\n log('post_process_hls()> ffmpeg failed:', output)\r\n return False\r\n\r\n log('post_process_hls()> done processing', d.name)\r\n\r\n return True",
"def save_video(foldername, songname, songlen, num_steps, output):\n num_steps_by_len = num_steps / songlen\n p = subprocess.Popen(['ffmpeg', '-f', 'image2', '-r', str(num_steps_by_len), '-i', '%d.png', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', 'movie.mp4'], cwd=foldername)\n p.wait()\n\n p = subprocess.Popen(['ffmpeg', '-i', 'movie.mp4', '-i', '../audio_files/' + songname + '.mp3', '-map', '0:v', '-map', '1:a', '-c', 'copy', output], cwd=foldername)\n p.wait()",
"def create_movie(name, folder):\n cmd = [\"ffmpeg\", \"-framerate\", \"1\", \"-i\", folder + \"/pic%04d.png\", \"-c:v\",\n \"libx264\", \"-r\", \"30\", \"-pix_fmt\", \"yuv420p\", name]\n return subprocess.call(cmd)",
"def _copy_snpeff_config(self):\n\n CONFIG = sequana_data(\"snpEff.config\", \"snpeff\")\n os.makedirs(self.snpeff_datadir, exist_ok=True)\n shutil.copyfile(CONFIG, self.configfile)",
"def let_camera_update_parameters(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -ss 00:00:02 -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))",
"def _init(args, workflows_dir, config_path):\n for file in [\"samples.tsv\", \"config.yaml\"]:\n src = os.path.join(workflows_dir, args.workflow.replace(\"-\", \"_\"), file)\n dest = os.path.join(os.path.dirname(config_path), file)\n\n copy_file = True\n if os.path.exists(dest) and args.force is False:\n choices = {\"yes\": True, \"y\": True, \"no\": False, \"n\": False}\n\n sys.stdout.write(f\"File: {dest} already exists. Do you want to overwrite it? (yes/no) \")\n while True:\n choice = input().lower()\n if choice in choices:\n copy_file = choices[choice]\n break\n else:\n print(\"Please respond with yes (y) or no (n).\")\n\n if copy_file:\n shutil.copyfile(src, dest)",
"def get_video_as_images():\n experiments = ['me1.mp4']\n try:\n if (os.path.isdir(\"dump\")):\n shutil.rmtree('dump')\n except OSError:\n print (\"Deletion of the directory failed\")\n exit()\n os.system('mkdir dump')\n for experiment in experiments:\n exp_no_ext = experiment.split('.')[0]\n subdir_cmd = \"dump/{0}\".format(exp_no_ext)\n os.mkdir(subdir_cmd)\n os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))\n run_all(exp_no_ext)",
"def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)",
"def main():\n parser = CustomArgumentParser()\n parser.add_argument(\"-s\", \"--simon-sez\",\n help=\"Really, Simon sez copy the data!\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--src-directory\",\n help=\"Copy metadata from files in this directory.\")\n parser.add_argument(\"-d\", \"--dst-directory\",\n help=\"Copy metadata to matching files in this directory.\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Log level to DEBUG.\",\n action=\"store_true\")\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n error = False\n\n # Require these two arguments.\n for arg in [args.src_directory, args.dst_directory]:\n if not arg:\n logger.error(\n \"Required src or dst directory parameter missing.\")\n error = True\n # XXX: Duplicates exit below. Can't check directory if null.\n logger.error(\"Exiting due to errors.\")\n parser.usage_message()\n sys.exit(1)\n\n if (os.path.exists(args.src_directory) and\n os.path.isdir(args.src_directory)):\n src_directory = args.src_directory\n else:\n logger.error(\n \"--src-directory={} does not exist or is not a directory.\".format(\n args.dst_directory))\n error = True\n\n if (os.path.exists(args.dst_directory) and\n os.path.isdir(args.dst_directory)):\n dst_directory = args.dst_directory\n else:\n logger.error(\n \"--dst-directory={} does not exist or is not a directory.\".format(\n args.dst_directory))\n error = True\n\n if error:\n logger.error(\"Exiting due to errors.\")\n parser.usage_message()\n sys.exit(1)\n else:\n process_all_files(src_directory, dst_directory, simon_sez=args.simon_sez)",
"def make_image_dir(to_path, filenames):\n image_dir = os.path.join(to_path, \"image_2\")\n os.makedirs(image_dir)\n for f in filenames:\n image_file = os.path.join(image_dir, f + \".png\")\n os.system(\"cp sample.png {}\".format(image_file))",
"def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)",
"def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)",
"def handle_image(name):\n from_path = args.from_dir + name\n to_path = args.to_dir + name\n\n if width != args.width:\n subprocess.call('jpegtran -rotate 90 -grayscale ' + from_path + ' > ' \\\n + to_path, shell=True)\n else:\n subprocess.call('jpegtran -grayscale ' + from_path + ' > ' + to_path,\\\n shell=True)"
] | [
"0.5893241",
"0.57616824",
"0.5529838",
"0.55258685",
"0.55187756",
"0.55067265",
"0.5499056",
"0.54705536",
"0.5458571",
"0.5449385",
"0.54415053",
"0.53093326",
"0.5306078",
"0.53042513",
"0.5287746",
"0.5274733",
"0.5265946",
"0.5246467",
"0.52405924",
"0.5228054",
"0.5227246",
"0.5223553",
"0.5209518",
"0.519676",
"0.51964355",
"0.5181166",
"0.51608443",
"0.51529205",
"0.5132727",
"0.5124742"
] | 0.57895535 | 1 |
Returns the q'th percentile of the distribution given in the argument 'data'. Uses the 'precision' parameter to control the noise level. | def Quantile(data, q, precision=1.0):
N, bins = np.histogram(data, bins=precision*np.sqrt(len(data)))
norm_cumul = 1.0*N.cumsum() / len(data)
for i in range(0, len(norm_cumul)):
if norm_cumul[i] > q:
return bins[i] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Quartiles(data):\n q = np.percentile(data, [25, 50, 75])\n\n return q[0], q[1], q[2]",
"def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)",
"def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):\r\n ps = [p_vals] if np.isscalar(p_vals) else p_vals\r\n\r\n if not sorted_:\r\n data = sorted(data)\r\n n = len(data)\r\n d = []\r\n for p in ps:\r\n fi = p * n / 100 - 0.5\r\n if fi <= 0: # maybe extrapolate?\r\n d.append(data[0])\r\n elif fi >= n - 1:\r\n d.append(data[-1])\r\n else:\r\n i = int(fi)\r\n d.append((i+1 - fi) * data[i] + (fi - i) * data[i+1])\r\n return d[0] if np.isscalar(p_vals) else d",
"def get_percentile(self, q):\n return None",
"def get_percentile(self, q):\n return None",
"def quantile(x, p):\n sorted_x = sorted(x)\n # round p_index to base int\n p_index = int(p * len(x))\n return sorted_x[p_index]",
"def calcrange_quartile(data, log=False):\n if not isinstance(data, numpy.ndarray):\n data = numpy.array(data)\n if log:\n data = data[data > 0.]\n\n if len(data) == 0:\n if log: return 0.1, 1.\n else: return 0., 1.\n\n data = numpy.sort(data)\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n if log:\n return q1 / (q3 - q1), q3 * (q3 - q1)\n else:\n return q1 - (q3 - q1), q3 + (q3 - q1)",
"def quantile(x,p):\n p_index = int(p*len(x))\n return sorted(x)[p_index]",
"def quantile(x, p):\n p_index = int(p * len(x))\n return sorted(x)[p_index]",
"def quantile(x, p):\n p_index = int(p * len(x))\n return sorted(x)[p_index]",
"def quantile(x, p):\n p_index = int(p * len(x))\n return sorted(x)[p_index]",
"def IQR(data):\n return percentile(data, 75) - percentile(data, 25)",
"def quantile(xs: List[float], p: float) -> float:\n p_index = int(p * len(xs))\n return sorted(xs)[p_index]",
"def default_quantile():\n return np.logspace(-5, 0, 100)",
"def get_percentile(self, q):\n return self.sum_stat_sample_ratio.get_percentile(q)",
"def percentile(t: torch.tensor, q: float):\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result",
"def get_percentile(self, q):\n return self.sum_stat_sample_delta.get_percentile(q)",
"def percentile(field, q):\n # https://gist.github.com/spezold/42a451682422beb42bc43ad0c0967a30\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (field.shape[1] - 1))\n result = field.kthvalue(k, dim=1).values\n return result",
"def _quantile(data, quantile):\r\n index = quantile * (len(data) - 1)\r\n bottom_index = int(floor(index))\r\n top_index = int(ceil(index))\r\n\r\n difference = index - bottom_index\r\n output = (1 - difference) * \\\r\n data[bottom_index] + difference * data[top_index]\r\n\r\n return output",
"def get_percentile(self, q):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)",
"def get_quartile_data(cls, data: tuple or list) -> tuple:\n cls._data_validation(data)\n # Sort the data\n sorted_data = sorted(list(data))\n # Get q2, which is the median\n q2 = cls.get_median(data)\n first_half_data = list()\n second_half_data = list()\n # add to first half until median, then add to second half\n for i in range(len(sorted_data)):\n # if less than q2, first half\n if sorted_data[i] < q2:\n first_half_data.append(sorted_data[i])\n # if greather than q2, second half, skips q2\n elif sorted_data[i] > q2:\n second_half_data.append(sorted_data[i])\n # use median method on halves to get quartiles\n q1 = cls.get_median(first_half_data)\n q3 = cls.get_median(second_half_data)\n iqr = q3-q1\n return q1, q2, q3, iqr",
"def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)",
"def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])",
"def _percentile(self, data, percent):\n if not data:\n return None\n k = (len(data) - 1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return data[int(k)]\n d0 = data[int(f)] * (c - k)\n d1 = data[int(c)] * (k - f)\n return d0 + d1",
"def quantile(a, prob):\n a=numpy.asanyarray(a)\n a=a[numpy.logical_not(numpy.isnan(a))].ravel()\n n=a.size\n\n if prob>=1-.5/n:\n return a.max()\n elif prob<=.5/n:\n return a.min()\n\n # find the two bounds we're interpreting between:\n # that is, find i such that (i+.5) / n <= prob <= (i+1.5)/n\n t=n*prob-.5\n i=int(t)\n\n # partial sort so that the ith element is at position i, with bigger ones\n # to the right and smaller to the left\n a.sort()\n\n if i==t: # did we luck out and get an integer index?\n return a[i]\n else:\n # we'll linearly interpolate between this and the next index\n smaller=a[i]\n larger=a[i+1:].min()\n if numpy.isinf(smaller):\n return smaller # avoid inf - inf\n return smaller+(larger-smaller)*(t-i)",
"def get_quartile_data(cls, data: tuple or list) -> tuple:\n cls._data_validation(data)\n from math import floor\n # Sort the data\n n = cls.get_n(data)\n if n == 0:\n # Empty dataset, returns zeroes\n return 0, 0, 0, 0\n sorted_data = sorted(list(data))\n n_is_odd = True if n % 2 == 1 else False\n\n # Get middle index\n odd_middle_index = floor(n / 2)\n even_upper_index = floor(n / 2)\n even_lower_index = floor(n / 2) - 1\n\n # Get q2, which is the median\n q2 = cls.get_median(data)\n first_half_data = list()\n second_half_data = list()\n\n # add to first half until median, then add to second half\n if n_is_odd:\n for i in range(n):\n if i < odd_middle_index:\n first_half_data.append(sorted_data[i])\n # note how if index = middle_index, skips\n elif i > odd_middle_index:\n second_half_data.append(sorted_data[i])\n else:\n for i in range(n):\n if i <= even_lower_index:\n first_half_data.append(sorted_data[i])\n # note how if index = middle_index, skips\n else:\n second_half_data.append(sorted_data[i])\n # use median method on halves to get quartiles\n q1 = cls.get_median(first_half_data)\n q3 = cls.get_median(second_half_data)\n iqr = q3 - q1\n return q1, q2, q3, iqr",
"def quartiles(x, percentile):\n length = len(x)\n\n if percentile == 25:\n center = length // 4\n elif percentile == 75:\n center = length // 2 + length // 4\n\n x.sort()\n\n if length % 2 == 0:\n return (x[center - 1] + x[center]) / 2\n else:\n return x[center]",
"def get_percentile(before, level):\n snr = np.array(before.getColumnByName('snr')[:])\n return np.percentile(snr, level)",
"def quantile_func(q):\n def f(x):\n return np.quantile(x, q)\n\n return f"
] | [
"0.72270185",
"0.7012418",
"0.6803648",
"0.67029893",
"0.67029893",
"0.669614",
"0.6685391",
"0.65556973",
"0.65500534",
"0.65500534",
"0.65500534",
"0.64803755",
"0.64564687",
"0.64286137",
"0.64046985",
"0.6381233",
"0.6363074",
"0.636056",
"0.6354891",
"0.63055617",
"0.6262271",
"0.62104005",
"0.61781657",
"0.61349875",
"0.61099523",
"0.60976803",
"0.6073696",
"0.6039872",
"0.5991325",
"0.598613"
] | 0.7864236 | 0 |
Downloads a YouTube video by its unique id. | def youtube_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):
raw_video_info = get_content('http://www.youtube.com/get_video_info?video_id=%s' % id)
video_info = parse.parse_qs(raw_video_info)
if video_info['status'] == ['ok'] and ('use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']):
title = parse.unquote_plus(video_info['title'][0])
stream_list = parse.parse_qs(raw_video_info)['url_encoded_fmt_stream_map'][0].split(',')
else:
# Parse video page when video_info is not usable.
video_page = get_content('http://www.youtube.com/watch?v=%s' % id)
ytplayer_config = json.loads(match1(video_page, r'ytplayer.config\s*=\s*([^\n]+);'))
title = ytplayer_config['args']['title']
stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
streams = {
parse.parse_qs(stream)['itag'][0] : parse.parse_qs(stream)
for stream in stream_list
}
for codec in yt_codecs:
itag = str(codec['itag'])
if itag in streams:
download_stream = streams[itag]
break
url = download_stream['url'][0]
if 'sig' in download_stream:
sig = download_stream['sig'][0]
else:
sig = decrypt_signature(download_stream['s'][0])
url = '%s&signature=%s' % (url, sig)
type, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data",
"def get_video(self, video_id):\n uri = 'videos/' + video_id\n return self.make_request(uri)",
"def download_video(self, url):\n yt = YouTube(url)\n yt_filtered = yt.streams.filter(progressive=True, file_extension=\"mp4\")\n yt_resolutions = yt_filtered.order_by(\"resolution\")\n\n # Downloads the first video that fits the description\n video = yt_resolutions.desc().first()\n video.download()\n\n # Returns the filename\n return video.default_filename",
"def get_youtube_video_url(video_id):\n url = \"https://www.youtube.com/watch?v=\" + video_id\n return url",
"def download_from_youtube():\n linkinput = input(\"Enter the url you want to download: \")\n youtube_object = Youtube(linkinput)\n youtube_object.youtube()",
"def get_video_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/video.webm')\n ec = {'ThreatGrid.Sample.Id': sample_id}\n demisto.results([\n {\n 'Type': entryTypes['note'],\n 'EntryContext': ec,\n 'HumanReadable': '### ThreatGrid Sample Run Video File -\\n'\n + 'Your sample run video file download request has been completed successfully for '\n + sample_id,\n 'Contents': ec,\n 'ContentsFormat': formats['json']\n },\n fileResult(sample_id + '.webm', r.content)\n ])",
"def get_yt_video(yt_url):\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': '%(id)s.%(ext)s'\n }\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result = ydl.extract_info(\n #'http://www.youtube.com/watch?v=BaW_jenozKc',\n yt_url,\n download=True # We just want to extract the info\n )\n\n if 'entries' in result:\n # Can be a playlist or a list of videos\n video = result['entries'][0]\n else:\n # Just a video\n video = result\n\n return video",
"def play_youtube(self, media_id):\n pass",
"def movieid_first_video_url(self, movie_id):\n YOUTUBE_URL = \"https://www.youtube.com/watch?v=\"\n VIDEOS_URL = \"https://api.themoviedb.org/3/movie/%s/videos\"\n url_with_movieid = VIDEOS_URL % (movie_id)\n parm_dict = {\"api_key\": self.api_key, \"language\": self.language}\n url = url_with_movieid + \"?\" + urlencode(parm_dict, doseq=True)\n # print url\n\n response = requests.get(url)\n json_dict = json.loads(response.text)\n response.close()\n\n youtube_video_key = json_dict['results'][0]['key']\n return YOUTUBE_URL + youtube_video_key",
"async def get_video(videoid):\n\theaders = {\n\t\t'Client-ID': config['twitch_clientid'],\n\t\t'Authorization': f\"Bearer {get_token()}\",\n\t}\n\tdata = await common.http.request_coro(\"https://api.twitch.tv/helix/videos\", {\"id\": videoid.lstrip('v')}, headers=headers)\n\treturn json.loads(data)[\"data\"][0]",
"def url(yt_id: str) -> str:\n return \"https://www.youtube.com/watch?v={}\".format(yt_id)",
"def youtube_download(url, output_dir='.', merge=True, info_only=False):\n \n id = match1(url, r'youtu.be/([^/]+)') or parse_query_param(url, 'v')\n assert id\n \n youtube_download_by_id(id, title=None, output_dir=output_dir, merge=merge, info_only=info_only)",
"def get_video(lesson_id, video_id):\n url = '{0}?cat={1}&video={2}'.format(BASE_URL, lesson_id, video_id)\n page = requests.get(url, verify=False)\n soup = BeautifulSoup(page.content)\n return soup.find('iframe')['src'].split('/')[-1]",
"def play_youtube(self, media_id):\n raise NotImplementedError()",
"def fetch_youtube_url(search_term, dev_key=None):\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n if in_cache:\r\n return YOUTUBE_VIDEO_URL + video_id\r\n if not dev_key:\r\n YOUTUBE_SEARCH_BASE = \"https://www.youtube.com/results?search_query=\"\r\n try:\r\n response = requests.get(YOUTUBE_SEARCH_BASE + search_term).content\r\n html_response = html.fromstring(response)\r\n video = html_response.xpath(\"//a[contains(@class, 'yt-uix-tile-link')]/@href\")\r\n video_id = re.search(\"((\\?v=)[a-zA-Z0-9_-]{4,15})\", video[0]).group(0)[3:]\r\n log.debug(f\"Found video id {video_id} for search term {search_term}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id)\r\n return YOUTUBE_VIDEO_URL + video_id\r\n except AttributeError as e:\r\n log.warning(f\"Could not find scrape details for {search_term}\")\r\n capture_exception(e)\r\n return None\r\n except IndexError as e:\r\n log.warning(f\"Could not perform scrape search for {search_term}, got a different HTML\")\r\n capture_exception(e)\r\n return None\r\n else:\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n developerKey=dev_key,\r\n cache_discovery=False)\r\n try:\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n\r\n if not in_cache:\r\n search_response = youtube.search().list(q=search_term,\r\n part='id, snippet').execute()\r\n for v in search_response['items']:\r\n if v['id']['kind'] == VIDEO:\r\n video_id = v['id']['videoId']\r\n log.debug(f\"Adding Video id {video_id}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id) \r\n return YOUTUBE_VIDEO_URL + video_id\r\n except HttpError as err:\r\n err_details = loads(err.content.decode('utf-8')).get('error').get('errors')\r\n secho(\"Couldn't complete search due to following errors: \", fg='red')\r\n for e in err_details:\r\n error_reason = e.get('reason')\r\n error_domain = e.get('domain')\r\n error_message = e.get('message')\r\n\r\n if error_reason == 'quotaExceeded' or error_reason == 'dailyLimitExceeded':\r\n secho(f\"\\tYou're over daily allowed quota. Unfortunately, YouTube restricts API keys to a max of 10,000 requests per day which translates to a maximum of 100 searches.\", fg='red')\r\n secho(f\"\\tThe quota will be reset at midnight Pacific Time (PT).\" ,fg='red')\r\n secho(f\"\\tYou can request for Quota increase from https://console.developers.google.com/apis/api/youtube.googleapis.com/quotas.\", fg='red')\r\n else:\r\n secho(f\"\\t Search failed due to {error_domain}:{error_reason}, message: {error_message}\")\r\n return None",
"def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item",
"async def youtube(self, ctx, *args):\n if not args:\n await ctx.send(\"usage: `>youtube [search string]`\")\n return\n search_string = \" \".join(args)\n search_string = urllib.parse.urlencode({'search_query': search_string})\n response = requests.get('http://www.youtube.com/results?' + search_string + \"&hl=en_US&app=desktop\")\n if response.status_code == 200:\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})', response.content.decode())\n try:\n first_result_url = 'http://www.youtube.com/watch?v=' + search_results[0]\n except IndexError:\n with open('downloads/yt_dump.txt', 'w') as f:\n f.write(response.content.decode())\n #print(response.is_redirect)\n return await ctx.send(\"Found nothing!\")\n await ctx.send(first_result_url)\n self.logger.info(misolog.format_log(ctx, f\"{first_result_url}\"))\n else:\n await ctx.send(\"Error: status code \" + str(response.status_code))\n self.logger.info(misolog.format_log(ctx, f\"error{response.status_code}\"))",
"async def get_youtube_video(self, ctx, *, query):\n\n if not query:\n return await ctx.send(\"Go on, search something.\")\n\n # Executor for sync function\n video_list = await self.bot.loop.run_in_executor(None, YouTube.sync_get_youtube_video, query)\n\n if not video_list:\n return await ctx.say(f\"Sorry, couldn't find anything for `{query}`\")\n\n # Return top hit\n await ctx.send(f'{video_list[0][\"video_url\"]}')",
"async def download_video(v_url):\n reply = await v_url.get_reply_message()\n if v_url.pattern_match.group(2) != \"\":\n url = v_url.pattern_match.group(2)\n elif reply is not None:\n url = reply.message\n url = re.findall(r\"\\bhttps?://.*\\.\\S+\", reply.message)[0]\n else:\n return\n type = (\n v_url.pattern_match.group(1).lower()\n if v_url.pattern_match.group(1) is not None\n else \"a\"\n )\n await v_url.edit(\"`Preparing to download...`\")\n out_folder = Config.TMP_DOWNLOAD_DIRECTORY + \"youtubedl/\"\n Config.TMP_DOWNLOAD_DIRECTORY + \"/thumb_image.jpg\"\n if not os.path.isdir(out_folder):\n os.makedirs(out_folder)\n if type == \"a\":\n opts = {\n \"format\": \"bestaudio\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"key\": \"FFmpegMetadata\",\n \"writethumbnail\": True,\n \"embedthumbnail\": True,\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"320\",\n }\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"quiet\": True,\n \"logtostderr\": False,\n }\n video = False\n song = True\n\n elif type == \"v\":\n opts = {\n \"format\": \"best\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"getthumbnail\": True,\n \"embedthumbnail\": True,\n \"xattrs\": True,\n \"writethumbnail\": True,\n \"key\": \"FFmpegMetadata\",\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\"key\": \"FFmpegVideoConvertor\", \"preferedformat\": \"mp4\"},\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"logtostderr\": False,\n \"quiet\": True,\n }\n song = False\n video = True\n\n try:\n await v_url.edit(\"`Fetching playlist data, please wait..`\")\n with YoutubeDL(opts) as ytdl:\n ytdl.extract_info(url)\n # print(ytdl_data['thumbnail'])\n filename = sorted(get_lst_of_files(out_folder, []))\n except DownloadError as DE:\n await v_url.edit(f\"`{str(DE)}`\")\n return\n except ContentTooShortError:\n await v_url.edit(\"`The download content was too short.`\")\n return\n except GeoRestrictedError:\n await v_url.edit(\n \"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`\"\n )\n return\n except MaxDownloadsReached:\n await v_url.edit(\"`Max-downloads limit has been reached.`\")\n return\n except PostProcessingError:\n await v_url.edit(\"`There was an error during post processing.`\")\n return\n except UnavailableVideoError:\n await v_url.edit(\"`Media is not available in the requested format.`\")\n return\n except XAttrMetadataError as XAME:\n await v_url.edit(f\"`{XAME.code}: {XAME.msg}\\n{XAME.reason}`\")\n return\n except ExtractorError:\n await v_url.edit(\"`There was an error during info extraction.`\")\n return\n except Exception as e:\n await v_url.edit(f\"{str(type(e)): {str(e)}}\")\n return\n c_time = time.time()\n await v_url.edit(\"`YouTube Playlist Downloading Processing Now.\\nPlease Wait!`\")\n if song:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = True\n supports_streaming = False\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 180\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n try:\n ytdl_data_name_audio = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_audio[: (len(ytdl_data_name_audio) - 4)]\n + \".jpg\"\n )\n print(ytdl_data_name_audio)\n file_path = single_file\n song_size = file_size(file_path)\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_audio}`\"\n + \"\\n\"\n + f\"Size👉 {song_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n allow_cache=False,\n thumb=thumb,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_audio}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)\n if video:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = False\n supports_streaming = True\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 0\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n # print(ytdl_data)\n # for file in os.listdir(\"./DOWNLOADS/youtubedl/\"):\n # if file.endswith(\".jpg\"):\n # thumb = \"./DOWNLOADS/youtubedl/\" + file\n # print(os.path.join(\"./DOWNLOADS/youtubedl/\", file))\n # image_link = ytdl_data['thumbnail']\n # downloaded_image = wget.download(image_link,out_folder)\n # thumb = ytdl_data_name_video + \".jpg\"\n file_path = single_file\n video_size = file_size(file_path)\n try:\n ytdl_data_name_video = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_video[: (len(ytdl_data_name_video) - 4)]\n + \".jpg\"\n )\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_video}`\"\n + \"\\n\"\n + f\"Size👉 {video_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n thumb=thumb,\n allow_cache=False,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_video}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)",
"def get_video(self, video_id):\n return self._videos.get(video_id, None)",
"async def retrieveJS(self, videoId=None):\n session = await self.getSession\n resp = await session.get(\"https://www.youtube.com/iframe_api\")\n # if resp.status == 200 and",
"def download_video(self, file_path, video_url, video_creation_time):\r\n logger.debug(\"Downloading video created at \" + _format_timestamp_iso(self.tz, video_creation_time) + \" from \"\r\n + video_url + \" to \" + file_path)\r\n failed = False\r\n try:\r\n self._download_with_api(file_path, video_url)\r\n except Exception as e:\r\n logger.debug(\"Video download failed using TikTokApi: \" + str(e))\r\n failed = True\r\n if not os.path.isfile(file_path):\r\n failed = True\r\n logger.debug(\"No file was created by TikTokApi at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n failed = True\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed TikTokApi download at \" + file_path)\r\n except Exception as ee:\r\n logger.error(\"Unable to delete malformed TikTokApi download at \" + str(ee))\r\n if failed:\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n try:\r\n logger.debug(\"Falling back to YouTube-dl\")\r\n self.fallback_counter += 1\r\n self._download_with_ytdl(file_path, video_url)\r\n if not os.path.isfile(file_path):\r\n raise AssertionError(\"No file was created by YouTube-dl at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed YouTube-dl download at \" + file_path)\r\n except Exception as ee:\r\n raise AssertionError(\"Malformed file was created at \" + file_path +\r\n \" and could not be removed: \" + str(ee))\r\n raise AssertionError(\"Malformed file was created at \" + file_path + \" and was removed\")\r\n failed = False\r\n except youtube_dl.utils.DownloadError as ee:\r\n logger.error(\"YouTube-dl DownloadError: \" + str(ee))\r\n self.ytdl_downloaderror_counter += 1\r\n failed = True\r\n except Exception as ee:\r\n logger.error(\"Video download failed with YouTube-dl: \" + str(ee))\r\n self.other_error_counter += 1\r\n failed = True\r\n if not failed:\r\n try:\r\n os.utime(file_path, (video_creation_time, video_creation_time))\r\n except Exception as e:\r\n logger.debug(\"Unable to set utime of \" + str(video_creation_time) + \" on file \" + file_path +\r\n \", Error: \" + str(e))\r\n return True\r\n return False",
"def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2",
"def download(video_identifier,\n output_filename,\n num_attempts=5,\n url_base='https://www.youtube.com/watch?v='):\n # Defensive argument checking.\n assert isinstance(video_identifier, str), 'video_identifier must be string'\n assert isinstance(output_filename, str), 'output_filename must be string'\n assert len(video_identifier) == 11, 'video_identifier must have length 11'\n\n status = False\n\n if not os.path.exists(output_filename):\n command = [\n 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',\n '-f', 'mp4', '-o',\n '\"%s\"' % output_filename,\n '\"%s\"' % (url_base + video_identifier)\n ]\n command = ' '.join(command)\n print(command)\n attempts = 0\n while True:\n try:\n subprocess.check_output(\n command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n attempts += 1\n if attempts == num_attempts:\n return status, 'Fail'\n else:\n break\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n return status, 'Downloaded'",
"def download_wrapper(youtube_id, output_dir):\n # we do this to align with names in annotations\n output_filename = os.path.join(output_dir, youtube_id + '.mp4')\n if os.path.exists(output_filename):\n status = tuple([youtube_id, True, 'Exists'])\n return status\n\n downloaded, log = download(youtube_id, output_filename)\n status = tuple([youtube_id, downloaded, log])\n return status",
"def google_youtube_details(vidid):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\t# TODO: make module option for safesearch\n\td = {\"id\" : quote(vidid.encode(\"utf-8\")), \"part\" : \"contentDetails,id,snippet,statistics,status\", \"key\" : API_KEY}\n\t\n\tf = urlopen(YOUTUBE_INFO_URL % (urlencode(d)))\n\tytdata = load(f)\n\tif f.getcode() == 200:\n\t\tif \"items\" in ytdata:\n\t\t\tresults = ytdata[\"items\"]\n\t\t\tif len(results) == 0:\n\t\t\t\treturn None\n\t\t\treturn results[0]\n\telse:\n\t\traise RuntimeError(\"Error (%s): %s\" % (f.getcode(), ytdata.replace(\"\\n\", \" \")))",
"def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))",
"def download_vid(vid_link, quality_num=None):\r\n if quality_num is not None:\r\n # if quality_num provided\r\n try:\r\n os.system(\"youtube-dl -f \"+str(quality_num)+\" \\'\"+str(vid_link)+\"\\'\")\r\n except Exception:\r\n print(Exception)\r\n else:\r\n # by default the best quality is downloaded\r\n try:\r\n os.system(\"youtube-dl \"+str(vid_link))\r\n except Exception:\r\n print(Exception)",
"def youtube_id_to_url(yt_video_id):\n return 'https://www.youtube.com/watch?v=' + yt_video_id",
"def download(target_url):\n program_location = sys.executable\n program_name = \"youtube-dl.exe\"\n # Define arguments. see this url for help\n # https://github.com/rg3/youtube-dl\n ignore_errors = \"-i\"\n safe_filenames = \"--restrict-filenames\"\n output_arg = \"-o\"\n output_template = \"download\\%(uploader)s\\%(playlist)s\\%(title)s-%(id)s.%(ext)s\"\n command = [program_name, ignore_errors, safe_filenames, output_arg, output_template, target_url]\n result = subprocess.call(command)\n print \"Command result: \", result"
] | [
"0.7745084",
"0.7303579",
"0.7203833",
"0.71133363",
"0.70373094",
"0.7022766",
"0.69767386",
"0.6973098",
"0.69691336",
"0.69578743",
"0.69145817",
"0.6872059",
"0.68037355",
"0.6796082",
"0.6795651",
"0.67516667",
"0.6571322",
"0.6560047",
"0.65302217",
"0.65141547",
"0.6513327",
"0.6511696",
"0.6491822",
"0.64788663",
"0.6476651",
"0.6446605",
"0.6435045",
"0.64282376",
"0.64242804",
"0.6374236"
] | 0.7562303 | 1 |
Check that user_data is a dict and that key is in there | def has_user_data(self, key):
return isinstance(self._user_data, dict) and key in self._user_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_dict(check):",
"def is_valid(data):\n return isinstance(data, dict) \\\n and \"u_id\" in data \\\n and \"token\" in data \\\n and isinstance(data[\"u_id\"], int) \\\n and isinstance(data[\"token\"], str)",
"def can_insert(data):\n return isinstance(data, dict)",
"def _is_key_value(data):\n if data is None:\n return False\n return all(x in data for x in ['key', 'value'])",
"def isDict(data):\n\ttry:\n\t\tfrom types import DictType\n\t\tif type(data) == DictType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type({}):\n\t\t\treturn True\n\treturn False",
"def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )",
"def _is_dict(val):\n\n return isinstance(val, dict)",
"def sanity_check(cls, data): # no version with ID, since PUT (update) isn't allowed\n data = _dict_sanity_check(data,\n mandatory_keys = [\n (\"user_id\", User.exists),\n (\"customer_id\", Customer.exists)\n ],\n optional_keys = [])\n return data, None",
"def verifyData(self, expectedDict):\n pass",
"def _is_dict(item):\n return isinstance(item, dict)",
"def validate_user_request_dict(request_dict):\n if 'first_name' not in request_dict:\n return False\n if 'last_name' not in request_dict:\n return False\n if 'id' not in request_dict:\n return False\n if 'email' not in request_dict:\n return False\n return True",
"def check_user_data_in_response(response_data):\n assert response_data[\"id\"] > 0\n assert response_data[\"name\"] == pytest.test_user.name\n assert response_data[\"email\"] == pytest.test_user.email\n assert response_data[\"gender\"] == pytest.test_user.gender\n assert response_data[\"status\"] == pytest.test_user.status",
"def __is_valid_dict(self, GRFData):\n\n if type(GRFData) is not dict:\n raise ValueError(\"Expected GRFData to be of type '{}', but received type '{}'.\".format(type(dict), type(GRFData)))\n\n for component in self.comp_list:\n if component not in GRFData.keys():\n raise ValueError(\"Component '{}' not found in GRFData.\".format(component))",
"def is_data_true(data):\n\n if not data:\n return False\n\n if not isinstance(data, dict):\n if not util.get_value_from_health_internal_tuple(data):\n return False\n return True\n\n for _k in data:\n if is_data_true(data[_k]):\n return True\n\n return False",
"def dict_support_required(self):\n\t\treturn self.typemanager.has_dicts",
"def test_process_dict_true(self):\n\n self.assertIn('userA@domain', self.temp_set)",
"def _validate_input_dict(self, input):\n if isinstance(input, dict):\n required = {\"type\", \"value\"}\n not_found = required - set(input.keys())\n if not_found:\n raise SpecificationError(\n \"Required key(s) not found in input dictionary: {}\".format(\n \", \".join(not_found)\n )\n )\n else:\n raise Exception(\"input element has to be a dictionary\")",
"def valid_user_data(user_data):\n return 'account_ids' in user_data and 'monthly_expenses' in user_data",
"def is_dict(value):\n return isinstance(value, dict)",
"def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None",
"def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)",
"def dict_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, dict): return False\n all_of = [value or True for value in verifield.values() if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))",
"def check_data_is_format(data):\n try:\n data_lst = data\n if not isinstance(data, list):\n data_lst = json.loads(data)\n\n for data in data_lst:\n if not isinstance(data, dict):\n raise ValueError(\"data contains not dict\")\n\n for key in data.keys():\n check_type(key)\n except ValueError as e:\n logging.error(\"data format check error %s\" % e)\n return False, None\n except Exception as e:\n logging.error(\"data format check unknown error %s\" % e)\n return False, None\n else:\n return True, data_lst",
"def is_dict(self) -> bool:\n return True",
"def _verfify_auth_and_profiles_data (self, data):\n if type(data.get('profiles')) == dict:\n if len(str(data.get('authURL', ''))) > 10 and len(str(data.get('authURL', ''))) < 50:\n return True\n return False",
"def is_typed_dict(self) -> bool:\n return True",
"def params_is_valid(data):\n if isinstance(data['title'], str) and isinstance(data['description'], str) and isinstance(data['params'], dict):\n return True\n else:\n return False",
"def update_response_json(user, data):\n if type(data) == dict and type(user) == str:\n if user in data.keys():\n data = data[user]\n response_json.response[user] = data\n return response_json.response\n return {'update': 'failed',\n 'reason': 'data provided incorrectly formatted. \\\n the data must be json formatted, and <user> must be of type string'}",
"def check_if_nested(data):\n if isinstance(data, dict):\n for k in data:\n if isinstance(data[k], (list, dict)):\n return True\n elif isinstance(data, list):\n for i in data:\n if isinstance(i, (list, dict)):\n return True\n return False",
"def verifyDictTypes( template, dictToCheck ):\n for key in dictToCheck:\n if not ( ( isinstance( dictToCheck[ key ], list ) and\n isinstance( template[ key ], list ) ) or\n ( isinstance( dictToCheck[ key ], dict ) and\n isinstance( template[ key ], dict ) ) or\n ( isinstance( dictToCheck[ key ], template[ key ] ) ) ):\n return False\n\n return True"
] | [
"0.719985",
"0.7077056",
"0.6867152",
"0.6618034",
"0.6470574",
"0.63971496",
"0.63777405",
"0.63742024",
"0.63429755",
"0.63425326",
"0.62384546",
"0.6238243",
"0.61970216",
"0.6187313",
"0.6175904",
"0.61512995",
"0.6139445",
"0.6129756",
"0.610137",
"0.6098412",
"0.6043313",
"0.60295683",
"0.6013052",
"0.597079",
"0.5963954",
"0.5947398",
"0.5927177",
"0.5924112",
"0.59203804",
"0.58964926"
] | 0.75419945 | 0 |
Return key from user_data if it's a dict | def get_user_data(self, key, default=None):
if not isinstance(self._user_data, dict):
return default
return self._user_data.get(key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, key):\n return self._user_data.get(key)",
"def _get_user_data(self):\n return {\"key\": self._key}",
"def get(self, key):\n return self._user_data.get(key)",
"def _get_key(self, object_type, user_key = None):\n\t\tif not user_key and not self.object_type_keys.has_key(object_type):\n\t\t\traise ParserError(\"Unknown key for object type: %s\\n\" % object_type)\n\n\t\t## Use a default key\n\t\tif not user_key:\n\t\t\tuser_key = self.object_type_keys[object_type]\n\n\t\treturn user_key",
"def get_key_from_data_dict(data: dict, key: str):\n retrieved_key = data.get(key, None)\n if not retrieved_key:\n LOG.info(\n f\"Could not get key {key} from request to the API. Data received: {data}\"\n )\n return retrieved_key",
"def key_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_data\")",
"def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key",
"def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None",
"def __is_key_in_json(self, key=str, json_dict=json):\n if key in json_dict:\n # noinspection PyUnresolvedReferences\n return json_dict[key]\n else:\n return self.NO_KEY_VALUE_FOR_ENTRY",
"def data_key(self):\n raise NotImplementedError",
"def datakey(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"datakey\")",
"def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n return user[\"id\"] if user and \"id\" in user else None",
"def has_user_data(self, key):\n return isinstance(self._user_data, dict) and key in self._user_data",
"def fetch(self, key: object, default=None):\n return self._user_data.get(key, default)",
"def get_key(dictionary: dict, *args) -> Union[str, bool, dict]:\n data = reduce(lambda c, k: c.get(k, {}), args, dictionary)\n if data == {}:\n return \"\"\n return data",
"def key(self):\n if \"key\" in self._prop_dict:\n return self._prop_dict[\"key\"]\n else:\n return None",
"def __getitem__(self, key):\n return self.data.get(key, '')",
"def getType(self):\n if self.use_dic:\n data = self.dic.keys()[0]\n act = self.dic[data].keys()[0]\n return self.dic[data][act].keys()[0]\n else:\n return None",
"def key_data(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_data\")",
"def key_data(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_data\")",
"def user_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_data\")",
"def user_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_data\")",
"def extract_backing_type(value: dict) -> str:\n return next(iter(value.keys()))",
"def get_key(self, item):\r\n return item[0]",
"def _get_user_id(user_data: dict):\n data = json.dumps(user_data).encode('utf-8')\n hashed_data = hashlib.sha256()\n hashed_data.update(data)\n return hashed_data.hexdigest()",
"def get_userdata(self, nick, datakey):\n nickkey = irc.strings.lower(nick)\n data = None\n if nickkey in self.users and datakey in self.users[nickkey]:\n data = self.users[nickkey][datakey]\n return data",
"def meta_value(request_object, dictkey):\n \n try:\n val = request_object.META[dictkey]\n except: # Exception as ex:\n val = ''\n return val",
"def getUserFromKey(key):\n\t\t#get(Key(key))\n\t\t#return None if no user found",
"def check_user_type(self): # FIXME Buggy as screen_name can be int\n try:\n int(self.user)\n return {'user_id': self.user}\n except ValueError:\n return {'screen_name': self.user}",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")"
] | [
"0.6847286",
"0.65308297",
"0.6454429",
"0.63964266",
"0.6262093",
"0.618459",
"0.61062187",
"0.60399806",
"0.59043604",
"0.5890058",
"0.5878438",
"0.5858267",
"0.5846593",
"0.58453906",
"0.5799683",
"0.57730305",
"0.57642144",
"0.57528245",
"0.5742863",
"0.5742863",
"0.5728808",
"0.5728808",
"0.5722891",
"0.5696669",
"0.5694399",
"0.5691886",
"0.56825244",
"0.5680933",
"0.56729406",
"0.567122"
] | 0.6621438 | 1 |
Function to test add furniture functionality. | def test_add_furniture(self):
add_furniture('invoice.csv', 'Elisa Miles', 'LR04', 'Leather Sofa', 25)
add_furniture('invoice.csv', 'Edward Data', 'KT78', 'Kitchen Table', 10)
add_furniture('invoice.csv', 'Alex Gonzales', 'BR02', 'Queen Mattress', 17)
# Generate list of rentals
with open('invoice.csv', 'r') as csvfile:
rentals = []
for row in csvfile:
rentals.append(row)
print(rentals)
# Assert statements
self.assertEqual(rentals[0], ('Elisa Miles,LR04,Leather Sofa,25\n'))
self.assertEqual(rentals[1], ('Edward Data,KT78,Kitchen Table,10\n'))
self.assertEqual(rentals[2], ('Alex Gonzales,BR02,Queen Mattress,17\n')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", 25.00)\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", 10.00)\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", 17.00)",
"def setUp(self):\n self.item = Furniture('11', 'sofa', '4', '5', 'suede', 'xl')",
"def test_add_stock_item(self):\n pass",
"def test_add_new_furniture(self):\n input_vars = ['4', 'Rug', '1', 'y', 'Berber', 's']\n inventory = {}\n with patch('builtins.input', side_effect=input_vars):\n main.add_new_item(inventory)\n self.assertEqual(inventory['4'],\n {\n 'product_code': '4',\n 'description': 'Rug',\n 'market_price': 24,\n 'rental_price': '1',\n 'material': 'Berber',\n 'size': 's'\n })",
"def test_api_can_add_food_to_a_meal(self):\n response = self.client.post(f'/api/v1/meals/{self.breakfast.id}/foods/{self.oatmeal.id}')\n # import code; code.interact(local=dict(globals(), **locals()))\n\n self.assertEqual(response.data['message'], \"Successfully added oatmeal to breakfast\")",
"def add_furniture():\n print(\"Attempting to seed the furniture collection.....\")\n print()\n\n chair_path = Path(\"chair.png\")\n\n couch = FurnitureItem(\n \"Comfy couch\",\n \"Well loved, but still in pretty good condition\",\n 60.00,\n 40,\n \"[email protected]\",\n \"Couch\",\n \"beige\",\n [50, 20, 10],\n )\n couch.set_image_filepath(chair_path)\n Database.add_item(couch)\n print(\"couch has been successfully added\")\n\n table = FurnitureItem(\n \"Dining room table\",\n \"Wooden dining room table. Has a few scuffs, but not bad!\",\n 30.00,\n 15,\n \"[email protected]\",\n \"Table\",\n \"wood\",\n [40, 20, 40],\n )\n table.set_image_filepath(chair_path)\n Database.add_item(table)\n print(\"table has been successfully added\")\n\n bed = FurnitureItem(\n \"Bed Frame\",\n \"Just selling the bed frame, you'll have \\\n to get your own mattress\",\n 55.00,\n 50,\n \"[email protected]\",\n \"Bed\",\n \"white\",\n [10, 20, 10],\n )\n bed.set_image_filepath(chair_path)\n Database.add_item(bed)\n print(\"bed has been successfully added\")\n\n desk = FurnitureItem(\n \"Ikea desk, no longer need it\",\n \"In great condition, this is truly a steal\",\n 60.00,\n 35,\n \"[email protected]\",\n \"Ikea Desk\",\n \"navy\",\n [20, 20, 30],\n )\n desk.set_image_filepath(chair_path)\n Database.add_item(desk)\n print(\"desk has been successfully added\")\n\n shelf = FurnitureItem(\n \"Book shelf, never used\",\n \"Brand new\",\n 110.00,\n 25,\n \"[email protected]\",\n \"Book Shelf\",\n \"black\",\n [10, 20, 100],\n )\n shelf.set_image_filepath(chair_path)\n Database.add_item(shelf)\n print(\"shelf has been successfully added\")\n\n print()\n print(\"Done seeding the furniture collection!\")\n print(\"----------------------------------------------\")",
"def test_add(self):\n # Everything added will be deleted later in test_delete.\n first_name = 'Trevor'\n last_name = 'Harvey'\n entry_date = '04/19/2012'\n title = 'Test'\n minutes = 34\n notes = 'testing entries. and regex (555) 555-3425'\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)\n # second test add\n first_name = 'Nik'\n last_name = 'Silver'\n entry_date = '01/14/1827'\n title = '[email protected]'\n minutes = 34\n notes = 'This is an email test.'\n\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)",
"def test_add_new_in_stock(add):\n length = len(STOCK)\n # here first parameter is for quantity and second for price while flower name is initialised already\n add[1].add_new_in_stock(10, 4.5)\n assert len(STOCK) == length + 1\n assert STOCK[-1] == {'flower_name': \"Sunflower\", 'quantity': 10, \"price\": 4.5}\n STOCK.pop()",
"def add_furniture(itemcode, description, marketprice, rentalprice):\n\n material = input(\"Enter item material: \")\n size = input(\"Enter item size (S,M,L,XL): \")\n newitem = Furniture(itemcode, description,\n marketprice, rentalprice\n , material, size)\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")",
"def test_add_to_stock(add):\n assert STOCK[0]['quantity'] == 20\n add[0].add_to_stock(10)\n assert STOCK[0]['quantity'] == 30\n STOCK[0]['quantity'] = 20",
"def test_add_meal(self):\n with self.client:\n response = self.add_meal(\"pilawo\", 15000)\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(data.get('message'), \"Meal successfully created\")",
"def test_add_one_more_test(self):\n self.assertTrue(True)",
"def test_form_submition_and_product_creation(user_company, client, authenticated_user):\n add_product_url = reverse('add-product')\n response = client.post(add_product_url, {\n 'name': 'Test_product_name',\n 'serial_number': 'XZ001', \n 'manufacturer': 'Test company',\n 'price_net': 415.26,\n 'description': fake.paragraph(),\n 'stock': 16\n })\n assert response.status_code == 302\n product = Product.objects.get(name='Test_product_name')\n assert response.url == reverse('product-detail',kwargs={'pk': product.pk}) \n assert product.user == authenticated_user\n assert product in Product.objects.all()",
"def test_add_item_to_cart(client):\n raise NotImplemented('Acceptance test failed')",
"def test_add_new_in_stock_negative(add):\n # here first parameter is for quantity and second for price while flower name is initialised already\n\n for i in [(0, 1.1), (\"we\", \"EW\"), (0, 0)]:\n add[1].add_new_in_stock(10, 4.5), i\n assert not STOCK[-1] == {'flower_name': \"Sunflower\", 'quantity': 10, \"price\": 4.5}\n STOCK.pop()",
"def test_foodtrucks_create(self):\n\t\tprint 'API Test: create a new foodtruck'\n\t\turl = reverse('foodtruck_list')\n\t\tdata = {\"status\" : \"APPROVED\",\\\n\t\t \"expirationdate\" : \"2015-03-15T00:00:00\",\\\n\t\t \"permit\" : \"14MFF-0107\",\\\n\t\t \"block\" : \"3794\",\\\n\t\t \"received\" : \"Jun 24 2014 1:49PM\",\\\n\t\t \"facilitytype\" : \"Truck\",\\\n\t\t \"blocklot\" : \"3794002A\",\\\n\t\t \"locationdescription\" : \"02ND ST: TOWNSEND ST to KING ST (700 - 799)\",\\\n\t\t \"cnn\" : 148000,\\\n\t\t \"priorpermit\" : 1,\\\n\t\t \"approved\" : \"2014-06-24T13:55:30\",\\\n\t\t \"noisent\" : \"2013-07-25T00:00:00\",\\\n\t\t \"schedule\" : \"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule¶ms=permit=14MFF-0107&ExportPDF=1&Filename=14MFF-0107_schedule.pdf\",\\\n\t\t \"address\" : \"750 02ND ST\",\\\n\t\t \"applicant\" : \"Steve's Mobile Deli\",\\\n\t\t \"lot\" : \"002A\",\\\n\t\t \"fooditems\" : \"Cold Truck: Pre-packaged sandwiches: Burgers: Hot Dogs: Muffin Sandwiches: Enchiladas: Bagels: Burritos: Salads: Snacks: Beverages\",\\\n\t\t \"longitude\" : -122.402978526686,\\\n\t\t \"latitude\" : 37.7302216813049, \\\n\t\t \"y\" : 2093947.369,\\\n\t\t \"x\" : 6011371.493,\\\n\t\t \"objectid\" : 554527}\n\t\t\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\t\n\t\tquant = '1.000000'\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[k], v)\n\t\t\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[0][k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[0][k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[0][k], v)\n\t\tprint 'pass'",
"def setUp(self):\n self.client = APIClient()\n self.apple = Food.objects.create(name=\"apple\", calories=50)\n self.oatmeal = Food.objects.create(name=\"oatmeal\", calories=400)\n self.breakfast = Meal.objects.create(name=\"breakfast\")\n self.snack = Meal.objects.create(name=\"snack\")\n self.lunch = Meal.objects.create(name=\"lunch\")\n self.dinner = Meal.objects.create(name=\"dinner\")\n self.breakfast.foods.add(self.apple)",
"def get_furniture():",
"def test_add_item_adds_single_entry():\n sc.menu = sc.default_menu\n sc.current.add_item('Coffee', 1)\n assert sc.current.receipt == {'subtotal': 1.59, 'Coffee': 1}",
"def test_add_to_fav_(self):\n result = self.client.post(\"/add_to_fav\", data={\"yelp_biz_id\":\"JA_V9TqDCrkgknqrcUndIQ\", \n \"yelp_rest_name\":\"Siam\", \"yelp_rating\":\"4\", \n \"yelp_category\":\"Thai\", \"yelp_price\":\"$$\", \n \"yelp_image_url\":\"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg\" })\n\n DB_result = Restaurant_details.query.filter_by(biz_id = \"JA_V9TqDCrkgknqrcUndIQ\").first()\n self.assertIsNotNone(DB_result) #testing that the returned result is not NONE\n self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be\n \n self.assertIn(b\"Your Favourite has been saved\", result.data)",
"def test_create_ingredient_successful(self):\n payload = {'name':'Cabbage'}\n self.client.post(INGREDIENTS_URL, payload)\n exists = Ingredient.objects.all().filter(user=self.user, name=payload['name']).exists\n self.assertTrue(exists)",
"def test_get_food(self):\n pass",
"def test_add_item_using_post(self):\n pass",
"def test_create_ingredient(self):\n\n ingredient_payload = {'name': 'Test Ingredient'}\n self.client.post(URL_INGREDIENTS, ingredient_payload)\n\n is_ingredient_created = Ingredient.objects.filter(\n user=self.user,\n name=ingredient_payload['name']\n ).exists()\n\n self.assertTrue(is_ingredient_created)",
"def test_create_shelf(self, *_):\n form = forms.ShelfForm()\n form.data[\"user\"] = self.local_user.id\n form.data[\"name\"] = \"new shelf name\"\n form.data[\"description\"] = \"desc\"\n form.data[\"privacy\"] = \"unlisted\"\n request = self.factory.post(\"\", form.data)\n request.user = self.local_user\n\n views.create_shelf(request)\n\n shelf = models.Shelf.objects.get(name=\"new shelf name\")\n self.assertEqual(shelf.privacy, \"unlisted\")\n self.assertEqual(shelf.description, \"desc\")\n self.assertEqual(shelf.user, self.local_user)",
"def test_add_book(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n assert first_book_list.add_book(first_book)\n assert first_book_list.find_book(\"First Man\")\n assert first_book_list.num_books() == 1",
"def test_add_furniture_write(_customers_to_add):\n\n test_invoice = \"../data/test-invoice.csv\"\n csv_contents = []\n\n if Path(test_invoice).exists():\n remove(test_invoice)\n\n for customer in _customers_to_add:\n l.add_furniture(\n test_invoice, customer[0], customer[1], customer[2], customer[3]\n )\n\n with open(test_invoice, \"r\") as csv_file:\n contents = reader(csv_file, delimiter=',')\n for line in contents:\n if line != []:\n csv_contents += [line]\n\n csv_contents += contents\n\n assert _customers_to_add == csv_contents",
"def test_add_to_cart(self):\n\n # test sale item that can be sold\n response = self.client.get(\n '/self.base_url/sales/3/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"These are the items on your Cart\")\n self.assertEqual(response.status_code,200)",
"def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', '[email protected]')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))",
"def test_add_item_adds_multiple_entries():\n sc.menu = sc.default_menu\n sc.current.add_item('Coffee', 2)\n sc.current.add_item('Coffee', 1)\n sc.current.add_item('Tea', 1)\n assert sc.current.receipt == {'subtotal': 6.36, 'Coffee': 3, 'Tea': 1}"
] | [
"0.80027395",
"0.70377046",
"0.6918816",
"0.68016565",
"0.6707079",
"0.66921055",
"0.66690326",
"0.6534658",
"0.6481919",
"0.6451947",
"0.64004886",
"0.63858724",
"0.63445914",
"0.6246608",
"0.6238948",
"0.62372917",
"0.6225444",
"0.62142056",
"0.6196843",
"0.61662406",
"0.61525416",
"0.61463815",
"0.61367375",
"0.6128628",
"0.6087897",
"0.6084826",
"0.60704744",
"0.60496575",
"0.60485226",
"0.60328287"
] | 0.7485811 | 1 |
runs an automatic check to see if any transcriptions need to be started or are already finished and need to be reuploded\n\n Needs dbConnection & an integer representing the max concurrent transcriptons that can be ran at a time\n\n This is a function that you dont want to parse and upload files from the 'transcripts' folder into. because you really dont know which files are in progress or not whatever. ill fix later . | def runAutoCheck(dbConnection, maxConcurrent):
# checks if any shows are pending.
fileContent = DatabaseInteract.checkPre(dbConnection)
if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';")
dbConnection.commit()
cursor.close()
url = fileContent[0]
indexID = str(fileContent[1]) # get the ID instead of the filename
service = str(fileContent[3])
# podcastName = fileContent[2]
Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parseUpload(dbconnection, fileName):\n nhContent = ParseText.nohupTranscriptionContent(fileName)\n count = 0\n while count < len(nhContent[0]):\n try:\n rtf = nhContent[0][count]\n transcription = nhContent[1][count].replace(\"'\", \"''\").replace(\"_\", \"\")\n dbID = nhContent[2][count].replace(\".\", \"\")\n duration = nhContent[3][count]\n DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)\n count += 1\n except:\n print(\"couldnt upload one at index \" + str(count))\n count += 1",
"def check_transcripts(request):\r\n transcripts_presence = {\r\n 'html5_local': [],\r\n 'html5_equal': False,\r\n 'is_youtube_mode': False,\r\n 'youtube_local': False,\r\n 'youtube_server': False,\r\n 'youtube_diff': True,\r\n 'current_item_subs': None,\r\n 'status': 'Error',\r\n }\r\n try:\r\n __, videos, item = _validate_transcripts_data(request)\r\n except TranscriptsRequestValidationException as e:\r\n return error_response(transcripts_presence, e.message)\r\n\r\n transcripts_presence['status'] = 'Success'\r\n\r\n filename = 'subs_{0}.srt.sjson'.format(item.sub)\r\n content_location = StaticContent.compute_location(item.location.course_key, filename)\r\n try:\r\n local_transcripts = contentstore().find(content_location).data\r\n transcripts_presence['current_item_subs'] = item.sub\r\n except NotFoundError:\r\n pass\r\n\r\n # Check for youtube transcripts presence\r\n youtube_id = videos.get('youtube', None)\r\n if youtube_id:\r\n transcripts_presence['is_youtube_mode'] = True\r\n\r\n # youtube local\r\n filename = 'subs_{0}.srt.sjson'.format(youtube_id)\r\n content_location = StaticContent.compute_location(item.location.course_key, filename)\r\n try:\r\n local_transcripts = contentstore().find(content_location).data\r\n transcripts_presence['youtube_local'] = True\r\n except NotFoundError:\r\n log.debug(\"Can't find transcripts in storage for youtube id: %s\", youtube_id)\r\n\r\n # youtube server\r\n youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])\r\n youtube_text_api['params']['v'] = youtube_id\r\n youtube_response = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])\r\n\r\n if youtube_response.status_code == 200 and youtube_response.text:\r\n transcripts_presence['youtube_server'] = True\r\n #check youtube local and server transcripts for equality\r\n if transcripts_presence['youtube_server'] and transcripts_presence['youtube_local']:\r\n try:\r\n youtube_server_subs = get_transcripts_from_youtube(\r\n youtube_id,\r\n settings,\r\n item.runtime.service(item, \"i18n\")\r\n )\r\n if json.loads(local_transcripts) == youtube_server_subs: # check transcripts for equality\r\n transcripts_presence['youtube_diff'] = False\r\n except GetTranscriptsFromYouTubeException:\r\n pass\r\n\r\n # Check for html5 local transcripts presence\r\n html5_subs = []\r\n for html5_id in videos['html5']:\r\n filename = 'subs_{0}.srt.sjson'.format(html5_id)\r\n content_location = StaticContent.compute_location(item.location.course_key, filename)\r\n try:\r\n html5_subs.append(contentstore().find(content_location).data)\r\n transcripts_presence['html5_local'].append(html5_id)\r\n except NotFoundError:\r\n log.debug(\"Can't find transcripts in storage for non-youtube video_id: %s\", html5_id)\r\n if len(html5_subs) == 2: # check html5 transcripts for equality\r\n transcripts_presence['html5_equal'] = json.loads(html5_subs[0]) == json.loads(html5_subs[1])\r\n\r\n command, subs_to_use = _transcripts_logic(transcripts_presence, videos)\r\n transcripts_presence.update({\r\n 'command': command,\r\n 'subs': subs_to_use,\r\n })\r\n return JsonResponse(transcripts_presence)",
"def insertTranscription(dbConnection, realtimefactor, transcription, duration, dbID):\n try:\n cursor = dbConnection.cursor()\n cursor.execute(\"UPDATE transcriptions SET realtimefactor = '\" + realtimefactor + \"', transcription = '\" + transcription + \"', datetranscribed = now(), duration = '\" + duration + \"' WHERE id = '\" + str(dbID) + \"';\")\n dbConnection.commit()\n cursor.close()\n return True\n except Exception as e:\n Tools.writeException(\"uploadTranscriptionData\", e)\n return False",
"def transcript_sequence(species,aceVersion,log=0):\n \n os.chdir(os.environ['PYDATA']+'/%s/log'%species)\n logFile=open('%s_ace_transcripts.txt'%species,'w')\n t1=time.time()\n #create ace transcript_sequence\n path=os.environ['PYDATA']+\"/\"+species+\"/aceview/\"+species+\"_transcript_sequence.bkdb\"\n if os.path.exists(path):\n os.remove(path)\n transcriptDB=bsddb.btopen(path,'w')\n \n #test if mRNAs sequences are in one file or in several chromosome files\n try:\n sequenceFile = open('%s/%s_%s/AceView.ncbi_37.all_mrnas_dna.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower()),'r')\n chrFlag=0 \n except: \n chrFlag=1 \n \n if chrFlag: \n #open database for relation between chromosome and Ensembl region\n path=os.environ['PYDATA']+'/'+species+'/ensembl/'+species+'_region_by_chromosome.bkdb'\n chrDB=bsddb.btopen(path,'r')\n chromosomes=chrDB.keys()\n tscriptNb=0 \n for chromosome in chromosomes:\n print 'processing chromosome: '+chromosome\n try: \n sequenceFile = open('%s/%s_%s/x1.all_mrnas_fasta.%s.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower(),chromosome),'r') \n region=chrDB[chromosome] \n geneName='' \n transcriptName=''\n sequence='' \n for lines in sequenceFile:\n tscriptNb=tscriptNb+1 \n line = lines.split('\\n')[0]\n if not line:\n #save last transcript\n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,chromosome=chromosome,region=region,sequence=sequence),protocol=-1)\n break\n # get some informations \n if line[0]=='>': \n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,chromosome=chromosome,region=region,sequence=sequence),protocol=-1)\n transcriptName = line.split(':')[1] \n shortName=transcriptName.split(aceVersion)[0] \n transcriptLetter=shortName.split('.')[-1]\n geneName=shortName.split('.'+transcriptLetter)[0] \n sequence='' \n else:\n # Construct sequence\n sequence=sequence+line\n except:\n logFile.write('no AceView files %s/x1.all_mrnas_fasta.%s.fasta' %(os.environ['ACEDATA'],chromosome)) \n transcriptDB.close()\n chrDB.close()\n else: \n tscriptNb=0 \n sequenceFile = open('%s/%s_%s/AceView.ncbi_37.all_mrnas_dna.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower()),'r') \n geneName='' \n transcriptName=''\n sequence='' \n for lines in sequenceFile:\n tscriptNb=tscriptNb+1 \n line = lines.split('\\n')[0]\n if not line:\n #save last transcript\n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,sequence=sequence),protocol=-1)\n break\n # get some informations \n if line[0]=='>': \n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,sequence=sequence),protocol=-1)\n transcriptName = line.split(':')[1] \n shortName=transcriptName.split(aceVersion)[0] \n transcriptLetter=shortName.split('.')[-1]\n geneName=shortName.split('.'+transcriptLetter)[0] \n sequence='' \n else:\n # Construct sequence\n sequence=sequence+line \n transcriptDB.close()\n \n t2=time.time()\n if log!=0:\n log.write('\\t%u\\t%.2f\\n'%(tscriptNb,t2-t1))",
"async def test_get_transcripts_from_gene(test_db):\n resp = await test_db.get_transcripts_from_gene(\"BRAF\", 2145, 2145)\n assert len(resp) == 32\n\n resp = await test_db.get_transcripts_from_gene(\"BRAF\", 140453136,\n 140453136)\n assert len(resp) == 0",
"def test_process_file(self):\n # 1\n self.assertEqual(get_file_reply(files[0][0], files[0][1]), \"Inserted 4 Records\")\n results = self.database_connection.select('''SELECT COUNT(*) FROM ''' + table_name)[0][0]\n # 2\n self.assertEqual(results, 4)\n # csv, renewing connection\n self.database_connection.connect()\n # 3\n self.assertEqual(get_file_reply(files[1][0], files[1][1]), \"Inserted 4 Records\")\n results = self.database_connection.select('''SELECT COUNT(*) FROM ''' + table_name)[0][0]\n # 4\n self.assertEqual(results, 8)\n self.database_connection.connect()\n # 5\n self.assertFalse(get_file_reply(files[0][0], files[1][1]))",
"def run_tximport():\n eligible_experiments = (\n Experiment.objects.annotate(num_organisms=Count(\"organisms\"))\n .filter(num_organisms=1, technology=\"RNA-SEQ\", num_processed_samples=0)\n .prefetch_related(\"samples__results\")\n )\n\n paginator = Paginator(eligible_experiments, PAGE_SIZE)\n page = paginator.page()\n\n # Next is to figure out how many samples were processed for\n # each experiment. Should be able to reuse code from salmon\n # cause it does this stuff.\n tximport_pipeline = ProcessorPipeline.TXIMPORT\n\n while True:\n creation_count = 0\n\n for experiment in page.object_list:\n quant_results = get_quant_results_for_experiment(experiment)\n\n if should_run_tximport(experiment, quant_results, True):\n processor_job = ProcessorJob()\n processor_job.pipeline_applied = tximport_pipeline.value\n processor_job.ram_amount = 8192\n # This job doesn't need to run on a specific volume\n # but it uses the same Nomad job as Salmon jobs which\n # do require the volume index.\n processor_job.volume_index = random.choice(list(get_active_volumes()))\n processor_job.save()\n\n assoc = ProcessorJobOriginalFileAssociation()\n # Any original file linked to any sample of the\n # experiment will work. Tximport is somewhat special\n # in that it doesn't actuallhy use original files so\n # this is just used to point to the experiment.\n assoc.original_file = experiment.samples.all()[0].original_files.all()[0]\n assoc.processor_job = processor_job\n assoc.save()\n\n creation_count += 1\n\n try:\n send_job(tximport_pipeline, processor_job)\n except Exception:\n # If we cannot queue the job now the Foreman will do\n # it later.\n pass\n\n logger.info(\"Created %d tximport jobs for experiments past the thresholds.\", creation_count)\n\n if not page.has_next():\n break\n else:\n page = paginator.page(page.next_page_number())",
"def start_transcribing():\n transcribe.main()",
"def subprocess_transcribe_function( fname, voicenote_filename_regex ):\n if not hasattr( subprocess_transcribe_function, \"client\" ):\n # Init function failed.\n return None\n if subprocess_transcribe_function.verbose:\n # TODO: We should (probably?) queue these messages and print() on a single thread/process...but....\n print( \"Transcribing {}...\".format( fname ) )\n try:\n ret = ( recording_date_from_full_path( fname, voicenote_filename_regex ), fname, transcribe_wav( fname, client=subprocess_transcribe_function.client ) )\n except BaseException as e:\n # Do NOT kill the program. We'll leave the audio file in the unprocessed directory.\n print( \"ERROR:\" )\n print( e )\n ret = None\n return ret",
"def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def gbk_upload(self):\n t_count = 0\n os.chdir(self.path)\n print(os.getcwd())\n if os.path.isdir(self.path + '/Databases') is False:\n os.mkdir('Databases')\n for tier in os.listdir(os.getcwd()):\n if tier == 'Databases':\n continue\n db_name = str(tier) + '.db'\n if os.path.isfile(self.path + '/Databases/' + db_name) is False:\n print('Copying Template BioSQL Database... '\n 'This may take a few minutes...')\n shutil.copy2(where.Templates + '/Template_BioSQL_DB.db',\n self.path + '/Databases/%s' % db_name)\n else:\n os.remove(self.path + '/Databases/' + db_name)\n print('Copying Template BioSQL Database... '\n 'This may take a few minutes...')\n shutil.copy2(where.Templates + '/Template_BioSQL_DB.db',\n self.path + '/Databases/%s' % db_name)\n\n server = BioSeqDatabase.open_database(\n driver='sqlite3', db=(\n self.path + '/Databases/' + db_name))\n os.chdir(tier)\n for gene in os.listdir(os.getcwd()):\n os.chdir(gene)\n sub_db_name = gene\n for file in os.listdir(os.getcwd()):\n try:\n if sub_db_name not in server.keys():\n server.new_database(sub_db_name)\n db = server[sub_db_name]\n count = db.load(SeqIO.parse(file, 'genbank'))\n server.commit()\n print('Server Commited %s' % sub_db_name)\n print('%s database loaded with %s.' % (db.dbid, file))\n print(\n \"That file contains %s genbank records.\" %\n str(count))\n t_count = t_count + count\n print(\n 'The total number of files loaded so far is %i.' %\n t_count)\n except BaseException:\n server.rollback()\n try:\n del server[sub_db_name]\n server.commit()\n except BaseException:\n raise\n raise\n os.chdir('..')\n os.chdir('..')",
"def check_active_requests():\n\n active_requests = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='waiting'\")\n for request in active_requests:\n\n\t# Check requested status \n\tif DownloaderSPAN512.check_request_done(request):\n\t dlm_cout.outs(\"Restore (GUID: %s) has succeeded. Will create file entries.\\n\" % request['guid'])\n\t create_file_entries(request)\n\n\telse:\n#\t dlm_cout.outs(\"Request (GUID: %s) has failed.\\n\" \\\n#\t \"\\tDatabase failed to report the data as restored.\" % request['guid'])\n#\t jobtracker.query(\"UPDATE requests SET status='failed', \" \\\n# \"details='Request failed. Why ?', \" \\\n# \"updated_at='%s' \" \\\n# \"WHERE guid='%s'\" % (jobtracker.nowstr(), request['guid']))\n\n query = \"SELECT (TO_SECONDS('%s')-TO_SECONDS(created_at)) \" \\\n \"AS deltaT_seconds \" \\\n \"FROM requests \" \\\n \"WHERE guid='%s'\" % \\\n (jobtracker.nowstr(), request['guid'])\n row = jobtracker.query(query, fetchone=True)\n #if row['deltaT_seconds']/3600. > config.download.request_timeout:\n if row/3600. > config.download.request_timeout:\n dlm_cout.outs(\"Restore (GUID: %s) is over %d hr old \" \\\n \"and still not ready. Marking \" \\\n \"it as failed.\" % \\\n (request['guid'], config.download.request_timeout))\n jobtracker.query(\"UPDATE requests \" \\\n \"SET status='failed', \" \\\n \"details='Request took too long (> %d hr)', \" \\\n \"updated_at='%s' \" \\\n \"WHERE guid='%s'\" % \\\n (config.download.request_timeout, jobtracker.nowstr(), \\\n request['guid']))",
"def checkFiles(self): \r\n mdate_filenames_list = []\r\n mdate_filenames_tuple = {}\r\n last24 = []\r\n now = datetime.datetime.now() \r\n noise,ft = file_type.split('.')\r\n ## note can do an entry bg color stoplight thing >24 hrs = red, 12-24 hrs = yellow < 12 = green nice little if loop\r\n for f in filenames_list:\r\n if os.path.isfile(f):\r\n lastmod_date = datetime.datetime.fromtimestamp(os.path.getmtime(f))\r\n mdate_filenames_tuple = lastmod_date, f\r\n mdate_filenames_list.append(mdate_filenames_tuple)\r\n \r\n if now - lastmod_date < file_age:\r\n \r\n #print (\"{} was last modified on {:%a %b %d %Y, %H:%M:%S, %Z}. Moving to 'destinaiton' transfer folder.\".format(f, lastmod_date))\r\n last24.append(f)\r\n shutil.copy2(f, destination)\r\n xferTime=time.time()\r\n \r\n fa = str(file_age) \r\n with sqlite3.connect('fileTransfer.db') as connection:\r\n c = connection.cursor()\r\n c.execute(\"INSERT INTO tbl_lastRun(col_timestamp, col_source, col_destination, col_file_type, col_file_age) VALUES (?,?,?,?,?)\",(xferTime, source, destination, ft, hrs))\r\n connection.commit()\r\n connection.close \r\n\r\n clear(self)\r\n ask_quit(self)",
"def database_script_check(table, bs_id, attempt_num):\n con = lite.connect(DB_FILE, timeout = TIMEOUT)\n con.row_factory = lite.Row\n with con:\n cur = con.cursor()\n #get script data\n cur.execute(\"SELECT * FROM {0} WHERE AttemptNum=? AND BSID=?\".format(table),\n (attempt_num, bs_id))\n rows = cur.fetchall()\n\n error_data = []\n for row in rows:\n if row['Started'] == None or row['Ended'] == None or row['Exit'] != 0:\n error_data.append([row['Command'], row['Arguments'], row['ExpProc']])\n return error_data",
"def createStructuredTranscript_Non_Core_Doc():\n\n #create a temporary folder that will hold the data transformed from doc to docx\n os.system('mkdir ' + INPUT_FOLDER+'temp')\n\n core_doc_asset = []\n missing_count = 0\n missing_files=[]\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.doc\"):\n\n # RG numbers for the core asset\n if (\"RG-50.030\" not in file and\n \"RG-50.106\" not in file and\n \"RG-50.549\" not in file):\n \n\n \n # convert file to docx, storing it in an untracked folder called temp\n file_docx = file + 'x'\n command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]\n call(command, shell=True)\n\n # append to the array\n core_doc_asset.append(file_docx)\n \n\n \n\n # get the units for each file, store them and update tracker\n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n \n not_processed=0\n processed_doc=0\n \n # get the units for each file, store them and update tracker \n for mongo_rg in core_doc_asset:\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n \n \n \n units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_doc\")\n\n not_processed=not_processed+1\n\n if False in processed:\n\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n not_processed=not_processed+1\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n \n\n #delete the temporary folder\n os.system('rm -r ' + INPUT_FOLDER+'temp')\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')\n file.write('\\n'.join(missing_files))\n\n \n # success\n pprint.pprint(\"Non-core doc files were successfully processed, but there are \" + str(missing_count) + \" missing\")",
"def verify_count(upload_id, localstore, language):\n reader = csv.DictReader(open(localstore, 'r'))\n\n if len(reader.fieldnames) < 2:\n msg = _('There are missing columns in the uploaded Subject file')\n\n return {'task_id': None, 'success': False, 'messages': [msg]}\n\n upload = SubjectUpload.objects.get(id=upload_id)\n upload.subject_name = reader.fieldnames[1][0:50]\n upload.save()\n\n logger.debug('Created new SubjectUpload transaction record for \"%s\".',\n upload.subject_name)\n\n # do this in bulk!\n # insert upload_id, portable_id, number\n sql = 'INSERT INTO \"%s\" (\"%s\",\"%s\",\"%s\") VALUES (%%(upload_id)s, %%(geoid)s, %%(number)s)' % (\n SubjectStage._meta.db_table, SubjectStage._meta.fields[1].attname,\n SubjectStage._meta.fields[2].attname,\n SubjectStage._meta.fields[3].attname)\n args = []\n\n try:\n for row in reader:\n args.append({\n 'upload_id': upload.id,\n 'geoid': row[reader.fieldnames[0]].strip(),\n 'number': row[reader.fieldnames[1]].strip()\n })\n # django ORM takes about 320s for 280K geounits\n #SubjectStage(upload=upload, portable_id=row[reader.fieldnames[0]],number=row[reader.fieldnames[1]]).save()\n\n # direct access to db-api takes about 60s for 280K geounits\n cursor = connection.cursor()\n cursor.executemany(sql, tuple(args))\n\n logger.debug('Bulk loaded CSV records into the staging area.')\n except AttributeError:\n msg = _('There are an incorrect number of columns in the uploaded '\n 'Subject file')\n\n return {'task_id': None, 'success': False, 'messages': [msg]}\n except Exception:\n msg = _('Invalid data detected in the uploaded Subject file')\n\n return {'task_id': None, 'success': False, 'messages': [msg]}\n\n nlines = upload.subjectstage_set.all().count()\n geolevel, nunits = LegislativeLevel.get_basest_geolevel_and_count()\n\n prev_lang = None\n if not language is None:\n prev_lang = get_language()\n activate(language)\n\n # Validation #1: if the number of geounits in the uploaded file\n # don't match the geounits in the database, the content is not valid\n if nlines != nunits:\n # The number of geounits in the uploaded file do not match the base geolevel geounits\n msg = _(\n 'There are an incorrect number of geounits in the uploaded Subject file. '\n )\n if nlines < nunits:\n missing = nunits - nlines\n msg += _n('There is %(count)d geounit missing.',\n 'There are %(count)d geounits missing.', missing) % {\n 'count': missing\n }\n else:\n extra = nlines - nunits\n msg += _n('There is %(count)d extra geounit.',\n 'There are %(count)d extra geounits.', extra) % {\n 'count': extra\n }\n\n # since the transaction was never committed after all the inserts, this nullifies\n # all the insert statements, so there should be no quarantine to clean up\n\n logger.debug(msg)\n\n upload.status = 'ER'\n upload.save()\n\n status = {'task_id': None, 'success': False, 'messages': [msg]}\n\n else:\n # The next task will preload the units into the quarintine table\n task = verify_preload.delay(upload_id, language=language).task_id\n\n status = {\n 'task_id': task,\n 'success': True,\n 'messages': [_('Verifying consistency of uploaded geounits ...')]\n }\n\n # reset language to default\n if not prev_lang is None:\n activate(prev_lang)\n\n return status",
"def run():\n check_active_requests()\n start_downloads()\n check_download_attempts()\n numsuccess = verify_files()\n recover_failed_downloads()\n check_downloading_requests()\n acknowledge_downloaded_files()\n if can_request_more():\n make_request()\n return numsuccess",
"def verify_files():\n toverify = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='unverified'\")\n\n numverified = 0\n for file in toverify:\n\n actualsize = pipeline_utils.get_file_size(file['filename'])\n\n expectedsize = file['size']\n\n last_attempt_id = jobtracker.query(\"SELECT id \" \\\n \"FROM download_attempts \" \\\n \"WHERE file_id=%s \" \\\n \"ORDER BY id DESC \" % file['id'], \\\n fetchone=True)\n \n queries = []\n if actualsize == expectedsize:\n dlm_cout.outs(\"Download of %s is complete and verified.\" % \\\n os.path.split(file['filename'])[-1])\n # Everything checks out!\n queries.append(\"UPDATE files \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n\n\t # Mark the beam as downloaded in the main database\n\t #mark_beam_downloaded(os.path.split(file['filename'])[-1]))\n\n numverified += 1\n else:\n dlm_cout.outs(\"Verification of %s failed. \\n\" \\\n \"\\tActual size (%d bytes) != Expected size (%d bytes)\" % \\\n (os.path.split(file['filename'])[-1], actualsize, expectedsize))\n \n # Boo... verification failed.\n queries.append(\"UPDATE files \" \\\n \"SET status='failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='verification_failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n jobtracker.query(queries)\n return numverified",
"def resetScript(dbConnection, maxConcurrent):\n while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins\n time.sleep(120)\n emptyPodcastFolder = Tools.cleanupFolder(\"podcasts\")\n DatabaseInteract.refreshDatabase(dbConnection)",
"def process_files(file_list, mdb_database, mdb_user, mdb_pwd, mdb_server, mdb_auth):\n uri = \"mongodb://{0}:{1}@{2}/{3}\".format(mdb_user, mdb_pwd, mdb_server, mdb_auth)\n db_name = '{0}'.format(mdb_database)\n client = pymongo.MongoClient(uri)\n db = client[db_name]\n raw_msg = db['raw_messages']\n total_processed = 0\n total_queries = 0\n total_error = 0\n unique_queries_to_add = []\n queries_hash = set()\n\n for file_name in file_list:\n print('--> processing file: {0}'.format(file_name))\n with open(file_name) as f:\n lines = f.readlines()\n total_queries += len(lines)\n for i, line in enumerate(lines):\n try:\n jsonn = json.loads(line)\n jsonn['insertTime'] = get_timestamp()\n doc_hash = calculate_hash(jsonn)\n if doc_hash not in queries_hash:\n unique_queries_to_add.append(jsonn)\n queries_hash.add(doc_hash)\n total_processed += 1\n except Exception as e:\n print('ERROR: file {0} line {1} --- {2}'.format(file_name, i, e))\n total_error += 1\n\n total_unique_queries = len(unique_queries_to_add)\n print('- Total processed: {0} from {1}'.format(total_processed, total_queries))\n print('- Total unique queries: {0} '.format(total_unique_queries))\n print('- Total errors: {0}'.format(total_error))\n print('- Adding queries to MongoDB')\n for jsonn in tqdm(unique_queries_to_add):\n raw_msg.insert_one(jsonn)",
"def ifAlreadyDone(self, cxRepo, schemaRepo, schema, tablename):\n logging.debug(f\"\"\"check if {schema}.{tablename} has been analyzed\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name from {schemaRepo}.tablediff where lower\n (table_name) = lower('{tablename}') and schema1 = '{schema}' and\n server1_status = 'ready' and server1_status = 'ready' and result in\n ('ready', 'init')\"\"\"\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n row = curs.fetchone()\n if row is None:\n return 1\n else:\n return 0",
"def should_start_analysis(self):\n return len(self.task_queue) >= self.bulk_size",
"def validate_and_submit(self, filename):\n\n matches = [p for p in self.process_list[self.name] if filename == p.source]\n if filename not in self.transfer_queue[self.name] and not matches:\n t = threading.Thread(target=self.is_stable, args=(filename,))\n t.setDaemon(True)\n t.start()",
"def processFile(fileName):\n\n cursor = db.cursor()\n cursor.execute(\"BEGIN\")\n institutionCounter = 0\n\n def submitInstitute(bankCode, bankName, bic):\n try:\n cursor.execute(\"INSERT INTO institutions (bankCode, bic, name) VALUES(?,?,?)\", (bankCode, bic, bankName))\n except sqlite3.Error as e:\n print(\"Sorry , Error: {0} while inserting {1} ({2})\".format(e.args[0], bankCode, bic))\n\n book = xlrd.open_workbook(fileName, 'r')\n sheet = book.sheet_by_index(0)\n\n for row_index in range(2, sheet.nrows):\n submitInstitute(sheet.cell(row_index,0).value, sheet.cell(row_index,2).value, sheet.cell(row_index,1).value)\n institutionCounter += 1\n\n return institutionCounter",
"def task_run_core():\n\n ## initialize parameters\n if task_get_option('format'):\n fmts = task_get_option('format')\n else:\n fmts = 'HB' # default value if no format option given\n for fmt in fmts.split(','):\n last_updated = fetch_last_updated(fmt)\n write_message(\"last stored run date is %s\" % last_updated)\n\n sql = {\n \"all\" : \"\"\"SELECT br.id FROM bibrec AS br, bibfmt AS bf\n WHERE bf.id_bibrec = br.id AND bf.format = '%s'\"\"\" % fmt,\n \"last\": \"\"\"SELECT br.id FROM bibrec AS br\n INNER JOIN bibfmt AS bf ON bf.id_bibrec = br.id\n WHERE br.modification_date >= '%(last_updated)s'\n AND bf.format='%(format)s'\n AND bf.last_updated < br.modification_date\"\"\" \\\n % {'format': fmt,\n 'last_updated': last_updated.strftime('%Y-%m-%d %H:%M:%S')},\n \"missing\" : \"\"\"SELECT br.id\n FROM bibrec as br\n LEFT JOIN bibfmt as bf\n ON bf.id_bibrec = br.id AND bf.format ='%s'\n WHERE bf.id_bibrec IS NULL\n AND br.id BETWEEN %%s AND %%s\n \"\"\" % fmt,\n }\n sql_queries = []\n cds_query = {}\n if task_has_option(\"all\"):\n sql_queries.append(sql['all'])\n if task_has_option(\"last\"):\n sql_queries.append(sql['last'])\n if task_has_option(\"collection\"):\n cds_query['collection'] = task_get_option('collection')\n else:\n cds_query['collection'] = \"\"\n\n if task_has_option(\"field\"):\n cds_query['field'] = task_get_option('field')\n else:\n cds_query['field'] = \"\"\n\n if task_has_option(\"pattern\"):\n cds_query['pattern'] = task_get_option('pattern')\n else:\n cds_query['pattern'] = \"\"\n\n if task_has_option(\"matching\"):\n cds_query['matching'] = task_get_option('matching')\n else:\n cds_query['matching'] = \"\"\n\n if task_has_option(\"recids\"):\n recids = split_cli_ids_arg(task_get_option('recids'))\n else:\n recids = []\n\n ### sql commands to be executed during the script run\n ###\n bibreformat_task(fmt, sql, sql_queries, cds_query, task_has_option('without'), not task_has_option('noprocess'), recids)\n return True",
"def is_ready_to_recognize():\n time.sleep(0.01)\n ready_to_recognize_file = open(\"varThread\\\\ready_recognize.txt\", \"r\")\n if int(ready_to_recognize_file.read(1)) == 1:\n ready_to_recognize_file.close()\n is_recognized_file = open('varThread\\\\recognized.check', 'r')\n is_recognized = is_recognized_file.read(1)\n if is_recognized == '0':\n is_recognized_file.close()\n return True \n else:\n is_recognized_file.close()\n return False \n else:\n ready_to_recognize_file.close()\n return False",
"def abortStageIn(dbh, lfns, DBReleaseIsAvailable):\n\n numberOfFiles = len(lfns)\n numberOfDBReleaseFiles = 0\n\n if DBReleaseIsAvailable:\n for lfn in lfns:\n if isDBReleaseFile(dbh, lfn): # multi-trf jobs will have more than one DBRelease file\n numberOfDBReleaseFiles += 1\n\n if numberOfDBReleaseFiles < numberOfFiles:\n tolog(\"Number of locally available DBRelease files = %d (%d files in total), continue with stage-in\" % (numberOfDBReleaseFiles, numberOfFiles))\n status = False # do not abort stage-in\n else:\n tolog(\"Number of locally available DBRelease files = %d (%d files in total), abort stage-in\" % (numberOfDBReleaseFiles, numberOfFiles))\n status = True # abort stage-in\n\n return status",
"def insert_files(db: DB, files: list, user_id: int,) -> bool:\n db_updated = False\n for file in files:\n\n with open(file, newline='') as csv_file:\n rows = list(csv.reader(csv_file))\n\n row = 1 if \"Apple\" in file else 4\n\n for i in range(row, len(rows)):\n receipt = None\n\n if \"Apple\" in file:\n receipt = AppleReceipt(db, rows[i], user_id)\n elif \"ESL\" in file:\n receipt = ESLReceipt(db, rows[i], user_id)\n\n if receipt is not None and not receipt.exists_in_db():\n receipt.insert_to_db()\n db_updated = True\n\n if db_updated:\n log(\"The database has been updated.\", level=\"debug\")\n else:\n log(\"No updates made to the database.\", level=\"debug\")\n\n return db_updated",
"def transcribe_proc():\n while True:\n # Get result of transcription\n transcribe_result = transcriber.transcribe_stream(\n audio_stream(), sample_rate, sample_width, channels\n )\n\n _LOGGER.debug(\"Transcription result: %s\", transcribe_result)\n\n transcribe_result = transcribe_result or Transcription.empty()\n transcribe_dict = dataclasses.asdict(transcribe_result)\n transcribe_dict[\"timeout\"] = is_timeout\n\n print_json(transcribe_dict)\n transcription_printed.set()",
"def perform_process(transformer: transformer_class.Transformer, check_md: dict) -> dict:\n # Process each CSV file into BETYdb\n start_timestamp = datetime.datetime.now()\n files_count = 0\n files_csv = 0\n lines_read = 0\n error_count = 0\n files_loaded = []\n for one_file in check_md['list_files']():\n files_count += 1\n if os.path.splitext(one_file)[1].lower() == '.csv':\n files_csv += 1\n\n # Make sure we can access the file\n if not os.path.exists(one_file):\n msg = \"Unable to access csv file '%s'\" % one_file\n logging.debug(msg)\n return {'code': -1000,\n 'error': msg}\n\n try:\n # Read in the lines from the file\n with open(one_file, 'r') as in_file:\n reader = csv.DictReader(in_file)\n files_loaded.append(one_file)\n for row in reader:\n centroid_lonlat = [row['lon'], row['lat']]\n time_fmt = row['dp_time']\n timestamp = row['timestamp']\n dp_metadata = {\n \"source\": row['source'],\n \"value\": row['value']\n }\n trait = row['trait']\n\n __internal__.create_datapoint_with_dependencies(transformer.args.clowder_url, transformer.args.clowder_key,\n trait, (centroid_lonlat[1], centroid_lonlat[0]), time_fmt,\n time_fmt, dp_metadata, timestamp)\n lines_read += 1\n\n except Exception:\n logging.exception(\"Error reading CSV file '%s'. Continuing processing\", os.path.basename(one_file))\n error_count += 1\n\n if files_csv <= 0:\n logging.info(\"No CSV files were found in the list of files to process\")\n if error_count > 0:\n logging.error(\"Errors were found during processing\")\n return {'code': -1001, 'error': \"Too many errors occurred during processing. Please correct and try again\"}\n\n return {\n 'code': 0,\n configuration.TRANSFORMER_NAME: {\n 'version': configuration.TRANSFORMER_VERSION,\n 'utc_timestamp': datetime.datetime.utcnow().isoformat(),\n 'processing_time': str(datetime.datetime.now() - start_timestamp),\n 'num_files_received': str(files_count),\n 'num_csv_files': str(files_csv),\n 'lines_loaded': str(lines_read),\n 'files_processed': str(files_loaded)\n }\n }"
] | [
"0.5762406",
"0.56196094",
"0.5543294",
"0.54251677",
"0.54135394",
"0.53959346",
"0.5356444",
"0.5315741",
"0.5314573",
"0.5207271",
"0.5122057",
"0.510715",
"0.50821364",
"0.50786346",
"0.5072173",
"0.50637865",
"0.50570357",
"0.505323",
"0.50277853",
"0.5014339",
"0.4977006",
"0.49643832",
"0.49452335",
"0.49304596",
"0.4917264",
"0.4916763",
"0.4914915",
"0.49081287",
"0.4903229",
"0.49031702"
] | 0.7170837 | 0 |
Waits for the running transcription processes to end (2 min intervals). \n Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the databases | def resetScript(dbConnection, maxConcurrent):
while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins
time.sleep(120)
emptyPodcastFolder = Tools.cleanupFolder("podcasts")
DatabaseInteract.refreshDatabase(dbConnection) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def task_queue_podcasts():\n data = get_task_arguments()\n user_uid = data[\"user_uid\"]\n\n client = google.cloud.storage.Client()\n bucket = client.get_bucket(settings.PODCAST_STORAGE_BUCKET)\n podcasts = Podcast.get_user_podcasts(user_uid)\n for podcast in podcasts:\n old_entries = [entry for entry in podcast.feed.entries\n if entry.published + datetime.timedelta(settings.EPISODE_EXPIRATION_DAYS) <\n datetime.datetime.utcnow()]\n\n for old_entry in old_entries:\n link = old_entry.link\n path = urllib.parse.urlparse(link).path\n bucket_relative_path = os.path.sep.join(path.split(os.path.sep)[2:])\n blob = bucket.blob(bucket_relative_path)\n blob.delete()\n podcast.feed.remove(old_entry)\n podcast.save()\n\n # determine if the podcast has been used in recent enough time\n if podcast.last_accessed + datetime.timedelta(settings.PODCAST_EXPIRATION_DAYS) < \\\n datetime.datetime.utcnow():\n podcast.delete()\n else:\n add_task(url_for(\"task_recursive_download_podcast\"),\n {\"user_uid\": user_uid, \"podcast_id\": podcast.id})\n return OK_RESPONSE",
"def transcribe_proc():\n while True:\n # Get result of transcription\n transcribe_result = transcriber.transcribe_stream(\n audio_stream(), sample_rate, sample_width, channels\n )\n\n _LOGGER.debug(\"Transcription result: %s\", transcribe_result)\n\n transcribe_result = transcribe_result or Transcription.empty()\n transcribe_dict = dataclasses.asdict(transcribe_result)\n transcribe_dict[\"timeout\"] = is_timeout\n\n print_json(transcribe_dict)\n transcription_printed.set()",
"def cleanup(self):\n self.all_wav_to_mp3()\n self.past_songs_db.close()\n self.move_tracks_to_music_folder( )\n self.delete_leftovers()\n print \"Cleanup finished\"",
"def parseUpload(dbconnection, fileName):\n nhContent = ParseText.nohupTranscriptionContent(fileName)\n count = 0\n while count < len(nhContent[0]):\n try:\n rtf = nhContent[0][count]\n transcription = nhContent[1][count].replace(\"'\", \"''\").replace(\"_\", \"\")\n dbID = nhContent[2][count].replace(\".\", \"\")\n duration = nhContent[3][count]\n DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)\n count += 1\n except:\n print(\"couldnt upload one at index \" + str(count))\n count += 1",
"def transcribeAll(service, url, fileName):\n if(service == \"omny.fm\"):\n url = url.replace(\".mp3\",\"\") + \".mp3\"\n subprocess.Popen(\"wget -c -O ./podcasts/\" + fileName + \".mp3 \" + url + \" && sleep 40 && ffmpeg -i ./podcasts/\"\n + fileName + \".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/\" + fileName + \".wav && sleep 10 && rm ./podcasts/\" \n + fileName + \".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false \"\n + \"--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 \"\n + \"--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id\" + fileName \n + \" utterance-id\" + fileName + \"|' 'scp:echo utterance-id\" + fileName + \" ./podcasts/\" + fileName + \".wav|' 'ark:/dev/null' &\", shell=True)",
"def wait(self):\n num_pings = 0\n # Some streams seem to start fine with up to 4 pings before beginning download?\n # More investigation is needed\n max_pings = 1 + self._pingouts\n # timeout after 1 minute\n timeout = datetime.datetime.now() + datetime.timedelta(minutes=1)\n try:\n for line in self._process.stderr:\n # TODO: add mpegts or other variants depending on the container settings? or no?\n # if \"Output #0, mp4\" in line:\n if \"Output #0\" in line:\n self._process.communicate()\n self.move_to_dest()\n self._pingouts = 0\n break\n elif \"HandleCtrl, Ping\" in line:\n num_pings += 1\n if num_pings > max_pings:\n # The main issue with this is that the slain processes will not have their files moved\n # But I think this is preferable to the other solutions I've come up with.\n # For future reference, those were:\n #\n # 1) Sending SIGINT then continuing to read stderr until it exited (sometimes it doesn't)\n # 2) Sending SIGINT, storing a reference to the process, then restarting the download.\n # This prevents the process from being garbage collected until the Watcher is\n # 3) Sending SIGINT, then storing info about src and dest paths for the stopped download.\n # If a reference to the process is NOT stored, there's no way to be sure it has finished writing\n # (if it's writing at all). The only way was to give them a grace period and then just start\n # moving, but this adds undesirable time to the cleanup phase, when we may want to restart\n # a falsely completed Watcher asap.\n # 4) Just moving the file straightaway. This is obviously bad since ffmpeg takes a few moments to\n # finish.\n # NOTE: only option #1 was actually tried, the others were partially written before being\n # abandoned as their problems became clear\n #\n # Two additional options exist (not mutually exclusive):\n # 1) Passing the dead processes off to a queue and having another thread clean up.\n # 2) Having regular maintenance sweep the active folder and move files it can be sure are done\n # to their proper folders.\n #\n # I *probably* need to use 1) eventually, especially once I figure out how to actually end\n # stuck processes without killing the parent. But it requires a lot more code.\n # Until then let's just see how this works.\n #\n # When that time does come, a Downloader copy constructor may be useful.\n download_logger.debug(\"Download pinged {} times: Stopping\".format(num_pings))\n self._pingouts += 1\n self.stop()\n\n # close stderr to force the loop to exit\n time.sleep(0.1)\n self._process.stderr.close()\n time.sleep(0.1)\n # process will be garbage collected when the next one is started, or the Watcher dies\n # self._process = None\n # This *should* work for newer builds of FFmpeg without librtmp.\n # Only question is whether 1 minute is too long (or too short).\n # UPDATE: Why doesn't this ever seem to work?\n # is it because FFmpeg freezes output and hangs now? so we're never getting another line to iterate over\n # elif datetime.datetime.now() > timeout:\n # download_logger.debug(\"Download of {} timed out\".format(self.outfile))\n # self.stop()\n # time.sleep(0.1)\n # self._process.stderr.close()\n # time.sleep(0.1)\n else:\n time.sleep(0.2)\n\n except ValueError:\n download_logger.debug('ffmpeg stderr closed unexpectedly')\n\n # Is it possible for the process to end prematurely?\n return self._process.returncode",
"def _cleanUp(self):\r\n limit = datetime.now() - timedelta(seconds=self._timeout)\r\n\r\n toClean = [msg for msg in self._incompleteMsgs if msg.older(limit)]\r\n\r\n if toClean:\r\n for msg in toClean:\r\n self._incompleteMsgs.remove(msg)\r\n\r\n log.msg('{0} incomplete messages have been dropped '\r\n 'from assembler.'.format(len(toClean)))\r\n\r\n toClean = [uri for uri, (_, timestamp) in self._binaries.iteritems()\r\n if timestamp < limit]\r\n\r\n if toClean:\r\n for uri in toClean:\r\n del self._binaries[uri]\r\n\r\n log.msg('{0} unused binaries have been dropped '\r\n 'from assembler.'.format(len(toClean)))",
"def Main(self):\n while not self.IsStopping():\n if not self.args.truncate_interval:\n # Truncating is disabled. But we should keep the main thread running,\n # or else PluginSandbox will assume the plugin has crashed, and will\n # take the plugin down.\n # TODO(kitching): Consider altering PluginSandbox to allow Main to\n # return some particular value which signifies \"I am\n # exiting of my own free will and I should be allowed to\n # continue running normally.\"\n self.Sleep(100)\n continue\n\n self.info('Truncating database...')\n self.buffer_file.Truncate()\n self.info('Truncating complete. Sleeping %d secs...',\n self.args.truncate_interval)\n self.Sleep(self.args.truncate_interval)",
"def finish(self):\n old_message = None\n cooldown = 5\n while not self.queue_manager.check_finished():\n status = self.get_upload_status(0)\n datagen_workers = f\"{status.sets_being_generated} data generators, \"\n msg = f\"Waiting for {datagen_workers}{status.sets_being_loaded} uploads to finish\"\n if old_message != msg or cooldown < 1:\n old_message = msg\n self.logger.info(msg)\n self.update_running_totals()\n self.print_running_totals()\n cooldown = 5\n else:\n cooldown -= 1\n time.sleep(WAIT_TIME)\n\n self.log_failures()\n\n self.logger.info(\"\")\n self.logger.info(\" == Results == \")\n self.update_running_totals()\n self.print_running_totals()\n elapsed = format_duration(timedelta(seconds=time.time() - self.start_time))\n\n if self.run_until.sobject_name:\n result_msg = f\"{self.sobject_counts[self.run_until.sobject_name].successes} {self.run_until.sobject_name} records and associated records\"\n else:\n result_msg = f\"{self.run_until.target:,} iterations\"\n\n self.logger.info(f\"☃ Snowfakery created {result_msg} in {elapsed}.\")",
"def finish_stager_tasks(self):\n\n update_files = {}\n messages = []\n while not self.finished_queue.empty():\n file = self.finished_queue.get()\n update_files[file['content_id']] = {'status': ContentStatus.AVAILABLE,\n 'pfn_size': file['pfn_size'],\n 'pfn': file['pfn']}\n msg = {'event_type': 'FILE_AVAILABLE',\n 'payload': {'scope': file['scope'],\n 'name': file['name'],\n 'startEvent': file['min_id'],\n 'lastEvent': file['max_id'],\n 'pfn': file['pfn']},\n 'created_at': date_to_str(datetime.datetime.utcnow())}\n messages.append(msg)\n\n self.logger.info('Got %s staged outputs' % len(update_files))\n update_contents_by_id(update_files)\n\n if self.send_messaging:\n for msg in messages:\n self.messaging_queue.put(msg)",
"def cleaner():\n session = Session()\n while True:\n _database_operations.purge_old_jobs(session)\n time.sleep(30)",
"def main(self):\n no_posts_found = 0\n while True:\n print(f\"...Searching for posts to cleanse..\")\n for post in self.reddit.subreddit(self.subreddit).stream.submissions(pause_after=1):\n if post is None:\n no_posts_found += 1\n print(f\".....Will run through {self.subreddit} one final time\")\n break\n else:\n if post.locked:\n post.mod.remove()\n else:\n post.mod.lock()\n post.mod.remove()\n print(f\"Post removed: {post.id}\")\n if no_posts_found == 2:\n print(f\"{self.subreddit} has been successfully cleansed.\")\n break\n print(f\"...Taking a small break! Be back in {self.delay} seconds\")\n time.sleep(self.delay)",
"def process_transcript(transcript_label):\n transcript_key = f\"{transcript_label}.json\"\n\n # Load Transcribe output from S3.\n raw_transcript = get_transcribe_output(transcript_key)\n\n # Parse to assign speaker parts.\n speaker_parts = assign_speakers(raw_transcript)\n\n # Identify Karen and Georgia.\n assigned = karen_or_georgia(speaker_parts)\n\n # Update the full transcript.\n build_transcript(assigned)\n\n # Upload the latest transcript to S3.\n s3 = boto3.resource(\"s3\")\n s3.Bucket(os.getenv(\"S3_BUCKET\")).upload_file(\"main_transcript.txt\", \"main_transcript.txt\")",
"def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')",
"async def clean_up():\n # Load Settings\n settings = await fetch_settings()\n\n try:\n if settings[\"previous version\"] == settings[\"version\"]:\n await upgrade()\n except KeyError:\n await upgrade()\n\n old_version = settings[\"previous version\"]\n new_version = settings[\"version\"]\n\n if float(new_version) <= 1.2:\n # Deleting repeats\n connection = await connect()\n repeats = await connection.fetch(f\"\"\"\n SELECT * FROM \"{settings[\"table\"]}\"\n WHERE \"UID\" IN (SELECT \"UID\" FROM \"{settings[\"table\"]}\" GROUP BY \"UID\" HAVING COUNT(*) > 1);\n \"\"\")\n\n uniques = {}\n removed = []\n\n for article in repeats:\n if article[\"UID\"] in uniques.keys():\n removed.append(uniques[article[\"UID\"]][\"ID\"])\n uniques[article[\"UID\"]] = article\n\n for article_id in removed:\n await connection.execute(f\"\"\"\n DELETE FROM \"{settings[\"table\"]}\"\n WHERE \"ID\" = {article_id};\n \"\"\")\n\n # Fixing IDs\n all_articles = await connection.fetch(f\"\"\"\n SELECT * FROM \"{settings[\"table\"]}\";\n \"\"\")\n\n transaction = connection.transaction()\n await transaction.start()\n\n try:\n # Empty Table\n await connection.execute(f\"\"\"\n DELETE FROM \"{settings[\"table\"]}\";\n \"\"\")\n\n # Reset ID Column\n await connection.execute(f\"\"\"\n ALTER SEQUENCE \"{settings[\"table\"]}_ID_seq\"\n RESTART WITH 1\n \"\"\")\n\n # Reinsert Articles\n for article in all_articles:\n text = unquote(article[\"Text\"].replace(\"'\", \"''\"))\n\n date_released = article[\"dateReleased\"]\n if date_released.year >= 3300:\n date_released = date_released.replace(year=(article[\"dateReleased\"].year - GAME_YEAR_OFFSET))\n\n title = article[\"Title\"].strip().replace(\"'\", \"''\")\n if title == \"\" or title is None:\n title = \"No Title Available\"\n\n await connection.execute(f\"\"\"\n INSERT INTO \"{settings[\"table\"]}\" (\"Title\", \"UID\", \"dateReleased\", \"dateAdded\", \"Text\")\n VALUES ($1, $2, $3, $4, $5);\n \"\"\", title, article[\"UID\"], date_released, article[\"dateAdded\"], text)\n except Exception as e:\n print(\"\\n\\nProcess failed due to exception. Reverting.\\n\\n\")\n await transaction.rollback()\n raise e\n\n else:\n await transaction.commit()\n\n await connection.close()\n\n settings = await fetch_settings()\n settings[\"previous version\"] = settings[\"version\"]\n\n with open(\"Settings.json\", \"w\") as file:\n json.dump(settings, file, indent=2)",
"def min_cleanup(self):\n self.past_songs_db.close()",
"def runAutoCheck(dbConnection, maxConcurrent):\n # checks if any shows are pending.\n fileContent = DatabaseInteract.checkPre(dbConnection)\n if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):\n cursor = dbConnection.cursor()\n cursor.execute(\"UPDATE transcriptions SET pending = TRUE WHERE id = '\" + str(fileContent[1]) + \"';\")\n dbConnection.commit()\n cursor.close()\n url = fileContent[0]\n indexID = str(fileContent[1]) # get the ID instead of the filename\n service = str(fileContent[3])\n # podcastName = fileContent[2]\n Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done",
"async def process(self, timeout=60):\n\n previous_date = self.previous_date()\n new_date = previous_date\n last_sent_message_date = previous_date\n now = pendulum.now('UTC')\n\n self.log.info(\"Begining processing feed %s, previous date %s\",\n self.name, previous_date)\n\n for entry in await self.fetch_and_parse(timeout):\n\n pubdate = dateutil.parser.parse(entry.published, tzinfos=rssalertbot.BOGUS_TIMEZONES)\n entry.published = pendulum.from_timestamp(pubdate.timestamp())\n # also save a prettified string format\n entry.datestring = self.format_timestamp_local(entry.published)\n\n # skip anything that's stale\n if entry.published <= previous_date:\n continue\n\n event_id = md5((entry.title + entry.description).encode()).hexdigest()\n last_sent = self.storage.load_event(self.feed, event_id)\n re_alert = self.cfg.get('re_alert', rssalertbot.RE_ALERT_DEFAULT)\n should_delete_message = False\n\n if entry.published > now:\n if last_sent and now < last_sent.add(hours=re_alert):\n continue\n self.storage.save_event(self.feed, event_id, now)\n else:\n if entry.published > new_date:\n new_date = entry.published\n should_delete_message = last_sent\n\n self.log.debug(\"Found new entry %s\", entry.published)\n\n # alert on it\n await self.alert(entry)\n if new_date > last_sent_message_date:\n self.storage.save_date(self.feed, new_date)\n last_sent_message_date = new_date\n\n if should_delete_message:\n self.log.debug(f\"Deleting stored date for message {event_id}\")\n self.storage.delete_event(self.feed, event_id)\n\n self.log.info(\"End processing feed %s, previous date %s\", self.name, new_date)",
"def start_queue(self):\n working_list = self.generate_tweets_queue()\n tweet_list = working_list[\"tweets\"]\n padding_list = working_list[\"padding\"]\n\n for tweet in tweet_list:\n counter = PADDING_RATIO\n # main tweet\n post = self.tdata.post_update(tweet[1])\n if post:\n print \"\\\"\" + tweet[1] + \"\\\" tweet updated successfully.\"\n self.tdata.send_tweet(tweet[0], self.user_data[\"uid\"])\n else:\n print \"Failed to send... exiting.\"\n sys.exit(1)\n # padding updates\n while(counter > 0):\n sleep(BASE_DELAY)\n pad_tweet = padding_list.pop()\n post = self.tdata.post_update(pad_tweet[1])\n if post:\n print \"\\\"\" + pad_tweet[1] + \"\\\" padding tweet updated successfully.\"\n self.tdata.send_padding_tweet(pad_tweet[0], self.user_data[\"uid\"])\n counter -= 1\n else:\n print \"Failed to update padding tweet... exiting.\"\n sys.exit(1)",
"def __removing_loop(self) -> None:\r\n\r\n # repeat until stop flag is set\r\n while not self.__stopper.wait(self.CLEANUP_EXPIRED_INTERVAL):\r\n now = int(datetime.now(self.__tz).timestamp())\r\n log.debug('Removing...')\r\n\r\n # iterate through database and remove expired encounters\r\n for enc_id, despawn_time in self.__pokes_db.copy().items():\r\n if despawn_time - now < 5:\r\n del self.__pokes_db[enc_id]",
"def run(self):\n logic.remove_movie_medias()\n (quote, movie_name) = get_quote_and_movie_name()\n print('Chosen movie: {}'.format(movie_name))\n movie_poster_url = get_movie_poster_url(movie_name)\n self.sending_process(quote, movie_poster_url, movie_name)\n logic.remove_movie_medias()\n sys.exit()",
"def startScandir(self):\n while self.isAlive:\n files = self.getNewFiles(self.inbox)\n while len(files) > 0:\n for full_filename in files:\n try:\n self.workflow.processFile(full_filename, 'new')\n except:\n et, ev, tb = sys.exc_info()\n serviceconfig.logger.error('got exception during the processing of the new file \"%s\"\\n\"%s\"' % (full_filename, str(ev)))\n serviceconfig.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))\n serviceconfig.sendMail('ERROR', 'File Processing FAILURE: %s' % str(et), 'Exception generated during the processing of the new file \"%s\":\\n%s\\n%s' % (full_filename, str(ev), ''.join(traceback.format_exception(et, ev, tb))))\n self.reportAction(full_filename, 'failure', str(et))\n files = self.getNewFiles(self.inbox)\n if self.timeout > 0:\n count = (self.timeout*60) / 10\n i = 0\n try:\n while self.isAlive:\n time.sleep(10)\n i = i+1\n if i >= count:\n break\n except:\n et, ev, tb = sys.exc_info()\n serviceconfig.logger.error('got Sleep exception \"%s\"' % str(ev))\n serviceconfig.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))\n serviceconfig.sendMail('ERROR', 'Sleep Processing FAILURE: %s' % str(et), 'Exception generated during the sleep process:\\n%s\\n%s' % (str(ev), ''.join(traceback.format_exception(et, ev, tb))))\n else:\n self.isAlive = False\n serviceconfig.logger.info('No more files to process. Exiting...')",
"def cleanup(self):\n log = logging.getLogger('mailman.runner')\n # Send SIGTERMs to all the child processes and wait for them all to\n # exit.\n for pid in self._kids:\n try:\n os.kill(pid, signal.SIGTERM)\n except OSError as error:\n if error.errno == errno.ESRCH:\n # The child has already exited.\n log.info('ESRCH on pid: %d', pid)\n # Wait for all the children to go away.\n while self._kids:\n try:\n pid, status = os.wait()\n self._kids.drop(pid)\n except OSError as error:\n if error.errno == errno.ECHILD:\n break\n elif error.errno == errno.EINTR:\n continue\n raise",
"def clean_documents():\n start = datetime.now()\n for i, raw_filename in enumerate(os.listdir(RAW_DIR)):\n fullpath = os.path.join(RAW_DIR, raw_filename)\n if os.path.isfile(fullpath):\n print(\"Cleaning {0} {1}\".format(i, fullpath), file=stderr)\n try:\n with open(fullpath, \"r\") as f:\n text = f.read()\n text = clean(text)\n soup = BeautifulSoup(text, \"html.parser\")\n cleaned = visible_text(soup)\n score = germanwings_score(cleaned)\n if not score:\n print(\"not germanwings: {0}\".format(raw_filename))\n else:\n clean_filename = os.path.join(CLEAN_DIR, raw_filename)\n with open(clean_filename, \"w\") as f:\n f.write(cleaned.encode(\"ascii\", \"ignore\"))\n except Exception as exc:\n print(\"{0}: {1}\".format(fullpath, exc), file=stderr)\n end = datetime.now()\n print(\"Elapsed time to clean: {0}\".format(end - start), file=stderr)",
"def _parse_transcription_file(self, root: str, name: str) -> None:\n trans_path = os.path.join(root, name)\n with open(trans_path, \"r\", encoding=\"utf-8\") as trans:\n # Each line has the form \"ID THE TARGET TRANSCRIPTION\"\n for line in trans:\n id_, transcript = line.split(maxsplit=1)\n dropped = self._process_audio(root, id_)\n if not dropped:\n self._process_transcript(transcript)",
"def __cleanup(self, ttl_in_sec):\n ttl_in_ms = ttl_in_sec * 1000\n while True:\n logging.debug(\"cleanup action...\")\n current_ts = self.__current_timestamp_in_ms()\n self.lock.acquire()\n for key, value in self.orderedDict.items():\n if value[1] > current_ts - ttl_in_ms:\n break\n else:\n self.orderedDict.pop(key, None)\n self.lock.release()\n time.sleep(ttl_in_sec)",
"def main_loop(bot):\n # Start looping\n i = 0\n bot.tick()\n for comment in bot.r_all.stream.comments():\n # Check if comment is and iambic pentameter\n done = bot.process_comment(comment)\n # If enough commebts have been processed, kill the procgram\n if done:\n exit()\n # Increment counter\n i += 1\n # Report periodically\n if i >= bot.options.report_every:\n # Print infos\n percent_length_removed = (bot.n_length_removed) / bot.options.report_every * 100\n print('Analyzed %d comments, ' % i +\n '%.2f%% too short/long, ' % percent_length_removed +\n 'found %d iambic pentameters ' % bot.n_pentameters_epoch +\n '(total: %d), ' % bot.n_pentameters +\n '%.1f comments/s' % (i / bot.tick()))\n sys.stdout.flush()\n # Sleep a bit\n time.sleep(bot.options.sleep_for) # Reset periodic counters\n # Reset periodic counters\n bot.n_length_removed = 0\n bot.n_pentameters_epoch = 0\n i = 0\n # Occasionally tweet a quatrain\n try:\n bot.tweet_quatrain()\n except Exception as e:\n print(\"Failed to tweet \" + str(e), file=sys.stderr)",
"def parse_transcript(self):\n\t\t\n\t\toutput_text = tempfile.NamedTemporaryFile(mode = 'r')\n\t\twith tempfile.NamedTemporaryFile(delete=False) as input_text:\n\t\t\tinput_text.write(self.transcript_string.encode('utf-8'))\n\t\t\t#to write to the file, convert to utf-8; to use for jinja, convert it back to unicode\n\n\t\tos.popen(\"python vocab_resources/splitta/sbd.py -m vocab_resources/splitta/model_nb -t \" + input_text.name +\" -o \" + output_text.name)\n\t\tos.remove(input_text.name)\n\n\t\twith open(output_text.name) as parsed_text:\n\t\t\tsentence_index = {}\n\t\t\tfor index, sentence in enumerate(parsed_text):\n\t\t\t\tsentence = sentence.rstrip()\n\t\t\t\tsentence_index[index] = sentence\n\n\t\tsentence_index[len(sentence_index)] = \"Unable_to_find_matching_sentence\" #avoid outliers\n\t\tself.sentence_index = sentence_index",
"def main():\n exit_if_already_started()\n while True:\n for timeframe in ['all', 'month', 'week']:\n subreddits = load_list('subs.txt')\n while subreddits:\n # Grab all images/comments from sub, remove from list\n parse_subreddit(subreddits.pop(0), timeframe)",
"def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None"
] | [
"0.5957652",
"0.5755388",
"0.54659915",
"0.5391661",
"0.5354948",
"0.5351097",
"0.5270798",
"0.5212245",
"0.5207515",
"0.5192871",
"0.5167002",
"0.5137688",
"0.5125688",
"0.51148933",
"0.5111066",
"0.5101877",
"0.5099718",
"0.509771",
"0.50841075",
"0.5051348",
"0.5043869",
"0.50335795",
"0.50252724",
"0.50173515",
"0.50056624",
"0.5000102",
"0.49870256",
"0.49866924",
"0.49820483",
"0.49778092"
] | 0.67138106 | 0 |
This parses the content of nohup. The size of nohup is basically unlimited but each line has to be under 300000 characters(?). This then returns the following...\n\n index 0 a list of all the occurences of realTimeFactor\n index 1 a list of all the occurences of transcriptions\n index 2 a list of all the occurences of the transcription ID\n index 3 a list of all the occurences of the total transcription time.\n\n \n\n \\Example usage\n parsedContent = nohupTranscriptionContent("ok.txt") | def nohupTranscriptionContent(filePath):
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fileTranscriptionContent(filePath):\n try:\n continu = True\n f = open(filePath, 'r')\n fileContent = \"\"\n while (continu):\n temp = f.readline(300000)\n if(len(temp) == 0):\n continu = False\n else:\n fileContent += temp\n results = []\n f.close()\n url = re.findall(r'URL:(.*?)\\n', fileContent)\n results.append(url)\n realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)\n results.append(realTimeFactor)\n transcription = re.findall(r'utterance-id1 (.*?)\\n', fileContent)\n for item in transcription:\n if(len(item) > 500):\n results.append(item.replace(\"'\", \"''\"))\n if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):\n return results\n else:\n Tools.writeException(\"fileTranscriptionContent\", \"ERROR attempted to parse \" + filePath + \" but got \" + str(results))\n return False\n except Exception as e:\n Tools.writeException(\"fileTranscriptionContent\", e)",
"def parseUpload(dbconnection, fileName):\n nhContent = ParseText.nohupTranscriptionContent(fileName)\n count = 0\n while count < len(nhContent[0]):\n try:\n rtf = nhContent[0][count]\n transcription = nhContent[1][count].replace(\"'\", \"''\").replace(\"_\", \"\")\n dbID = nhContent[2][count].replace(\".\", \"\")\n duration = nhContent[3][count]\n DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)\n count += 1\n except:\n print(\"couldnt upload one at index \" + str(count))\n count += 1",
"def parser_txt_file(self, content):\n ai_cpu_str = str(content.replace(b'\\n\\x00', b' ___ ').replace(b'\\x00', b' ___ '))[2:-1]\n ai_cpu_lines = ai_cpu_str.split(\" ___ \")\n result_list = list()\n ai_cpu_total_time_summary = 0\n # Node serial number.\n serial_number = 1\n for i in range(len(ai_cpu_lines) - 1):\n node_line = ai_cpu_lines[i]\n thread_line = ai_cpu_lines[i + 1]\n if \"Node\" in node_line and \"Thread\" in thread_line:\n # Get the node data from node_line\n result = self._get_kernel_result(\n serial_number,\n node_line.split(','),\n thread_line.split(',')\n )\n\n if result is None:\n continue\n\n result_list.append(result)\n # Calculate the total time.\n total_time = result[2]\n ai_cpu_total_time_summary += total_time\n # Increase node serial number.\n serial_number += 1\n elif \"Node\" in node_line and \"Thread\" not in thread_line:\n node_type_name = node_line.split(',')[0].split(':')[-1]\n logger.warning(\"The node type:%s cannot find thread data\", node_type_name)\n return ai_cpu_total_time_summary, result_list",
"def _parse(self, verbose=False):\n instructions = json.load(open(self.filename, 'rb'))\n self.bpm = instructions['header']['bpm']\n self.ticks_per_beat = instructions['header']['PPQ']\n self.song_length = instructions['duration']\n self.phraseLength = instructions['phraseLength']\n\n print ('Parsing file:', self.filename)\n print ('Title', instructions['header']['name']) \n print ('BPM', self.bpm) \n\n EIGHTH_NOTE_INTERVAL_S = 60 / (2*self.bpm)\n\n # Parse the messages into buckets for each half-beat. Put them in 32-beat chunks\n chunks = []\n current_chunk = []\n index = 0\n for time in np.arange(0, self.song_length, EIGHTH_NOTE_INTERVAL_S):\n for message in instructions['tracks'][1]['notes']:\n if (message['time'] >= time and message['time'] < time + EIGHTH_NOTE_INTERVAL_S):\n current_chunk.append(str(message['midi']))\n chunks.append(current_chunk)\n index += 1\n current_chunk = []\n\n # For each bucktet, create parsed messages\n phrases = []\n current_phrase = []\n current_phrase_parsed = []\n for phrase_index in range(self.phraseLength):\n current_phrase = chunks[phrase_index*self.phraseLength:(phrase_index+1)*self.phraseLength]\n index_word = 0\n for word in current_phrase:\n word_parsed = str(index_word) + ',' + ','.join(word)\n if index_word == 0:\n self.initial_notes.append(word_parsed)\n current_phrase_parsed.append(word_parsed)\n index_word += 1\n phrases.append(current_phrase_parsed)\n current_phrase_parsed = []\n current_phrase=[]\n\n # Put them in the markov-chain\n for phrase in phrases:\n self._sequence(phrase)\n \n # Print out the resulting chunks\n if verbose:\n print ('Initial notes', self.initial_notes)\n print ('Matrix')\n self.markov_chain.print_as_matrix(20)",
"def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines",
"def readTotitle(fh):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)",
"def parse(lines: List[str]):\n\n len_lines = len(lines)\n i = 0\n\n node_list = []\n \n while i < len_lines:\n line = lines[i]\n l = line.strip()\n if len(l) == 0:\n i += 1\n continue\n ls = l.split(\"\\t\")\n nlines = int(ls[0])\n content_lines = lines[i: i + nlines + 1]\n node = _build_node(content_lines)\n node_list.append(node)\n\n i = i + nlines + 1\n \n return node_list",
"def read_subtitles(self):\n\n # Group 1: index, Group 2: Start Time, Group 3: End Time, Group 4: Text\n\n patterns = [\n r\"(\\d+)\\n(\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d) --> (\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d)\\n((?:.+\\n)*.+)\",\n r\"(\\d+)\\r\\n(\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d) --> (\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d)\\r\\n((?:.+\\r\\n)*.+)\",\n # Reports pattern\n r\"(\\d+)\\r(\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d) --> (\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d)\\n((?:.+\\r)*.+)\"\n ]\n\n for pattern in patterns:\n re_subs = re.findall(pattern, self.subtitles, re.M | re.I)\n if(len(re_subs) > 1):\n self.re_subs = re_subs\n return\n\n raise Exception(\n f're_subs length is {len(re_subs)}. Maybe the regex pattern is falty?')",
"def parse_story_file(content):\n content_raw = content.split(\"@highlight\")[0]\n content = \" \".join(filter(None, [x.strip() for x in content_raw.split(\"\\n\")]))\n return content",
"def pre_process(fname, num_ex, alt_speaker):\n conversation = []\n with PathManager.open(fname) as f:\n lines = f.readlines()\n random.shuffle(lines)\n lines = lines[:num_ex]\n for line in lines:\n data = json.loads(line)\n dialogue = data[\"dialog\"]\n for item in dialogue:\n speaker = item[0]['id']\n text = item[0]['text']\n conversation += [(speaker, text)]\n conversation += [(END_OF_CONVO, END_OF_CONVO)]\n\n return conversation",
"def parse_transcript(self):\n\t\t\n\t\toutput_text = tempfile.NamedTemporaryFile(mode = 'r')\n\t\twith tempfile.NamedTemporaryFile(delete=False) as input_text:\n\t\t\tinput_text.write(self.transcript_string.encode('utf-8'))\n\t\t\t#to write to the file, convert to utf-8; to use for jinja, convert it back to unicode\n\n\t\tos.popen(\"python vocab_resources/splitta/sbd.py -m vocab_resources/splitta/model_nb -t \" + input_text.name +\" -o \" + output_text.name)\n\t\tos.remove(input_text.name)\n\n\t\twith open(output_text.name) as parsed_text:\n\t\t\tsentence_index = {}\n\t\t\tfor index, sentence in enumerate(parsed_text):\n\t\t\t\tsentence = sentence.rstrip()\n\t\t\t\tsentence_index[index] = sentence\n\n\t\tsentence_index[len(sentence_index)] = \"Unable_to_find_matching_sentence\" #avoid outliers\n\t\tself.sentence_index = sentence_index",
"def haikus_for_document(filename):\n text = get_text(filename)\n haikus = []\n # SpaCy has a maximum text size of 1,000,000 characters.\n # Let's use one fewer to be on the safe side.\n for chunk in chunks(text,999_999): # this underscore syntax was introduced in Python 3.6\n doc = nlp(chunk)\n for sent in doc.sents:\n haiku = check_haiku(sent)\n if haiku:\n haikus.append(haiku)\n return haikus",
"def get926Monologue(filename):\n doc = Document(filename)\n monologue = \"\"\n o = re.compile('track [0-9][0-9]')\n\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n\n # timestamp e.g 15:01:27\n isHeader = o.match(paragraph.lower()) \n # ensure paragraph is not just empty line\n hasText = paragraph.lstrip()\n\n # ensure it is not an empty line\n if hasText and not isHeader:\n monologue += paragraph\n \n return [{'unit': monologue}]",
"def read_transcription_file(file_path, audio_file_path):\n with open(file_path) as in_file:\n last_timestamp = 0\n res = []\n transcription = \"\"\n for line in in_file:\n time_stamp_match = re.match(\"\\[([0-9\\]+\\.[0-9]+)\\]\", line)\n #if this regex matched then the line is a timestamp\n if time_stamp_match:\n timestamp = float(time_stamp_match.group(1))\n if transcription and transcription.strip() not in ['(())', \"<no-speech>\"]:\n single_instance = {\"start_time\": last_timestamp, \n \"end_time\": timestamp,\n \"transcription\": transcription,\n \"audio_file\" : audio_file_path}\n res.append(single_instance)\n last_timestamp = timestamp\n else:\n last_timestamp = timestamp # this handles silence at beginning\n else:\n transcription = line.strip()\n \n return res",
"def parse_sambamba_output(self):\r\n exons = []\r\n with open (self.file_path, \"r\") as sambamba_output:\r\n for line in sambamba_output:\r\n if line.startswith('#'):\r\n fields = line.strip().split()\r\n else:\r\n description = list(line.strip().split())\r\n i = 0\r\n exon_dict = {}\r\n while i<len(fields):\r\n exon_dict[fields[i]] = description[i]\r\n i += 1\r\n exons.append(exon_dict)\r\n return exons",
"def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]",
"def create_hn_text(self):\n text_list = [f\"Top {STORIES_NUMBER} from HackerNews:\"]\n sorted_stories = self.get_top_stories()\n # Format slack text\n for story in sorted_stories:\n text_list.append(\n \"*<{}|{}>* - <{}|{}>\".format(\n \"{}/item?id={}\".format(HN_URL, story[\"id\"]),\n story[\"score\"],\n # Ask HN type posts do not have 'url' key, so using get to return None\n story.get('url'),\n story[\"title\"],\n )\n )\n self.logger.debug(text_list)\n return \"\\n>\".join(text_list)",
"def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)",
"def transcript_lines(transcript_text):\n lines = []\n for line in transcript_text.splitlines():\n if line.strip() and line.strip()[0] != '#':\n split = line.split(':')\n speaker = split[0][-1]\n utterance = ' '.join(split[1:]).strip()\n lines.append((speaker, utterance))\n return lines",
"def parse(text, showToc=True):\n p = Parser(show_toc=showToc)\n return p.parse(text)",
"def transcribe(self, fp):\n\n fp.seek(44, os.SEEK_SET)\n\n # FIXME: Can't use the Decoder.decode_raw() here, because\n # pocketsphinx segfaults with tempfile.SpooledTemporaryFile()\n data = fp.read()\n transcribed = []\n while True:\n try:\n self._decoder.start_utt()\n self._decoder.process_raw(data, False, True)\n self._decoder.end_utt()\n hyp = self._decoder.hyp()\n result = hyp.hypstr if hyp is not None else ''\n transcribed = [result] if result != '' else []\n self._logger.info('Transcribed: %r', transcribed)\n break\n except RuntimeError:\n self.reinit()\n\n if self._logfile is not None:\n with open(self._logfile, 'r+') as f:\n for line in f:\n self._logger.debug(line.strip())\n if self._logger.getEffectiveLevel() == logging.DEBUG:\n print(line.strip())\n f.truncate()\n\n return transcribed",
"def ParseSeqFile(FilePath):\n SeqFile = rSeqFile(FilePath)\n TidyFile = TidyLines(SeqFile)\n \n result = []\n\n for line in TidyFile:\n t = ( ProcessLine(line) )\n result.append(t)\n return(result)",
"def parse_fastqc_report(self, file_contents, s_name=None, root=None):\n \n section_headings = {\n 'sequence_quality': r'>>Per base sequence quality\\s+(pass|warn|fail)',\n 'per_seq_quality': r'>>Per sequence quality scores\\s+(pass|warn|fail)',\n 'sequence_content': r'>>Per base sequence content\\s+(pass|warn|fail)',\n 'gc_content': r'>>Per sequence GC content\\s+(pass|warn|fail)',\n 'n_content': r'>>Per base N content\\s+(pass|warn|fail)',\n 'seq_length_dist': r'>>Sequence Length Distribution\\s+(pass|warn|fail)',\n 'seq_dup_levels': r'>>Sequence Duplication Levels\\s+(pass|warn|fail)',\n 'adapter_content': r'>>Adapter Content\\s+(pass|warn|fail)',\n }\n stats_regexes = {\n 'total_sequences': r\"Total Sequences\\s+(\\d+)\",\n 'sequence_length': r\"Sequence length\\s+([\\d-]+)\",\n 'percent_gc': r\"%GC\\s+(\\d+)\",\n 'percent_dedup': r\"#Total Deduplicated Percentage\\s+([\\d\\.]+)\",\n 'percent_duplicates': r\"#Total Duplicate Percentage\\s+([\\d\\.]+)\", # old versions of FastQC\n }\n \n # Make the sample name from the input filename if we find it\n fn_search = re.search(r\"Filename\\s+(.+)\", file_contents)\n if fn_search:\n s_name = self.clean_s_name(fn_search.group(1) , root)\n \n # Throw a warning if we already have this sample and remove prev data\n # Unzipped reports means that this can be quite frequent\n # This gives a good idea of how horribly messy this module has become\n # TODO: Refactorrrr!\n if s_name in self.fastqc_stats:\n for k in self.fastqc_data:\n if k == 'sequence_quality':\n for j in self.fastqc_data[k]:\n self.fastqc_data[k][j].pop(s_name, None)\n elif k == 'adapter_content':\n aks = self.fastqc_data[k].keys()\n for s in aks:\n sn, _ = s.split(' - ')\n if sn == s_name:\n self.fastqc_data[k].pop(s, None)\n else :\n self.fastqc_data[k].pop(s_name, None)\n \n for k in self.fastqc_statuses:\n self.fastqc_statuses[k].pop(s_name, None)\n self.fastqc_stats.pop(s_name, None)\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n \n s = defaultdict(lambda: dict())\n s['seq_len_bp'] = 0\n s['seq_len_read_count'] = 0\n self.seq_lengths = set()\n adapter_types = []\n in_module = None\n for l in file_contents.splitlines():\n \n # Search for general stats\n for k, r in stats_regexes.items():\n r_search = re.search(r, l)\n if r_search:\n try:\n s[k] = float(r_search.group(1))\n except ValueError:\n s[k] = r_search.group(1) \n \n # Parse modules\n if in_module is not None:\n if l == \">>END_MODULE\":\n in_module = None\n else:\n \n if in_module == 'sequence_quality':\n quals = re.search(\"([\\d-]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\", l)\n if quals:\n bp = self.avg_bp_from_range(quals.group(1))\n groups = ['base', 'mean', 'median', 'lower_quart', 'upper_quart', '10_percentile', '90_percentile']\n for idx, g in enumerate(groups):\n try:\n self.fastqc_data['sequence_quality'][g][s_name][bp] = float(quals.group( idx + 1 ))\n except:\n self.fastqc_data['sequence_quality'][g][s_name][bp] = quals.group( idx + 1 )\n \n \n if in_module == 'per_seq_quality' or in_module == 'n_content':\n sections = l.split()\n try:\n self.fastqc_data[in_module][s_name][float(sections[0])] = float(sections[1])\n except ValueError:\n pass # First line - headers\n \n if in_module == 'sequence_content':\n l.replace('NaN','0')\n seq_matches = re.search(\"([\\d-]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\", l)\n if seq_matches:\n bp = self.avg_bp_from_range(seq_matches.group(1))\n groups = ['base', 'G', 'A', 'T', 'C']\n for idx, g in enumerate(groups):\n if idx == 0:\n self.fastqc_data['sequence_content'][s_name][bp][g] = seq_matches.group( idx + 1 )\n else:\n self.fastqc_data['sequence_content'][s_name][bp][g] = float(seq_matches.group( idx + 1 ))\n \n if in_module == 'gc_content':\n gc_matches = re.search(\"([\\d]+)\\s+([\\d\\.E]+)\", l)\n if gc_matches:\n self.fastqc_data['gc_content'][s_name][int(gc_matches.group(1))] = float(gc_matches.group(2))\n \n if in_module == 'seq_length_dist':\n len_matches = re.search(\"([\\d-]+)\\s+([\\d\\.E]+)\", l)\n if len_matches:\n bp = self.avg_bp_from_range(len_matches.group(1))\n self.fastqc_data['seq_length_dist'][s_name][bp] = float(len_matches.group(2))\n self.seq_lengths.add(bp)\n s['seq_len_bp'] += float(len_matches.group(2)) * bp\n s['seq_len_read_count'] += float(len_matches.group(2))\n \n if in_module == 'seq_dup_levels':\n if l[:1] == '#':\n # Start of module - replace default dict with an OrderedDict\n self.fastqc_data['seq_dup_levels'][s_name] = OrderedDict()\n continue # Skip header line\n sections = l.split()\n try:\n # Version 11 of FastQC\n # #Duplication Level\tPercentage of deduplicated\tPercentage of total\n self.fastqc_data['seq_dup_levels_dedup'][s_name][sections[0]] = float(sections[1])\n self.fastqc_data['seq_dup_levels'][s_name][sections[0]] = float(sections[2])\n except IndexError:\n # Version 10 of FastQC and below just gives percentage, no % of dedup\n # #Duplication Level\tRelative count\n self.fastqc_data['seq_dup_levels'][s_name][sections[0]] = float(sections[1])\n \n if in_module == 'adapter_content':\n if l[:1] == '#':\n adapter_types = l[1:].split(\"\\t\")[1:]\n else:\n cols = l.split(\"\\t\")\n pos = int(cols[0].split('-', 1)[0])\n for idx, val in enumerate(cols[1:]):\n a = adapter_types[idx]\n k = \"{} - {}\".format(s_name, a)\n self.fastqc_data['adapter_content'][k][pos] = float(val)\n \n else:\n # See if this is the start of a new section\n for k, r in section_headings.items():\n r_search = re.search(r, l)\n if r_search:\n in_module = k\n # Add to the global statuses dict\n self.fastqc_statuses[k][s_name] = r_search.group(1)\n \n # Work out the average sequence length\n if s['seq_len_read_count'] > 0:\n s['avg_sequence_length'] = s['seq_len_bp'] / s['seq_len_read_count']\n \n # Get percent duplicates (percent unique given)\n if 'percent_dedup' in s:\n s['percent_duplicates'] = 100 - s['percent_dedup']\n \n # Add parsed stats to dicts\n self.fastqc_stats[s_name] = s",
"def parse_text(self):\n text = self.get_data()\n line1 = text[0]\n index_list = [0]\n start_index = 3\n for i in range(1, len(text)):\n\n if line1.startswith('*'):\n index_list, start_index = self.star_parser(index_list, line1)\n elif line1.startswith('.'):\n start_index = self.dot_parser(start_index, line1, text, i)\n else:\n print \"\".rjust(start_index) + line1\n line1 = text[i]\n # Parse the last line\n if text[-1].startswith('*'):\n self.star_parser(index_list, text[-1])\n elif text[-1].startswith('.'):\n print '-'.rjust(start_index) + text[-1].lstrip('.')\n else:\n print \"\".rjust(start_index) + text[-1]",
"def entrez_fasta_parser(handleFasta):\n fullList = handleFasta.read().split(\"\\n\") \n resL = []\n seqFlag = False\n for fullLine in fullList:\n if fullLine == \"\":\n seqFlag = False\n continue\n elif fullLine[0] == \">\":\n resL.append(fullLine + \"\\n\")\n seqFlag = True\n elif seqFlag:\n resL[-1] += fullLine \n return resL",
"def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data",
"def hyou_reader():\n with open(HYOU_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_match = [\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*〔(\\S*)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*〔(\\S+)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*(\\S+)\")\n ]\n\n voc_key = [\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 3, \"Meaning\": 4},\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 0, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 2, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 0, \"Meaning\": 2},\n ]\n\n match_count = len(voc_match)\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"第\") != -1 and voc_line.find(\"课\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n sound_list = sound_reader(lesson_count)\n elif not voc_line.find(\"----\") != -1 and voc_line != \"\\n\":\n voc_line.strip()\n\n voc_dict = {}\n for i in range(0, match_count):\n voc_group = voc_match[i].match(voc_line)\n if voc_group:\n for key, value in voc_key[i].items():\n if value != 0:\n voc_dict[key] = voc_group.group(value)\n else:\n voc_dict[key] = \"\"\n break\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_dict[\"Time\"] = sound_list[voc_count]\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list",
"def process_test(self, data):\n new_utts = []\n for l in data:\n tem = []\n for sent in l:\n tem.append([\"<s>\"] + sent + [\"</s>\"])\n new_utts.append(tem)\n return new_utts # 以输入的测试标题为topic,四句空诗",
"def parser_binary_file(self, content):\n result_list = list()\n ai_cpu_total_time_summary = 0\n # Node serial number.\n serial_number = 1\n\n i = 0\n ai_cpu_format = StructType.format(DataPreProcessParser.AI_CPU_STRUCT.values())\n ai_cpu_size = StructType.sizeof(DataPreProcessParser.AI_CPU_STRUCT.values())\n while i < len(content):\n ai_cpu_data = struct.unpack(ai_cpu_format, content[i:i + ai_cpu_size])\n ai_cpu = DataPreProcessParser.AiCpuStruct(*ai_cpu_data)\n if ai_cpu.task_id < self._task_id_threshold:\n node_type_name = f'{ai_cpu.stream_id}_{ai_cpu.task_id}'\n if self._op_task_dict and node_type_name in self._op_task_dict:\n node_type_name = self._op_task_dict[node_type_name].split('/')[-1]\n else:\n logger.warning(\"[profiler] the op name of %s cannot be found.\", node_type_name)\n exe_time = (float(ai_cpu.run_end) - float(ai_cpu.run_start)) / self._ms_unit\n total_time = ai_cpu.total_time / self._ms_unit\n result_list.append([serial_number, node_type_name, total_time, ai_cpu.dispatch_time / self._ms_unit,\n exe_time, ai_cpu.run_start_counter / self._us_unit,\n ai_cpu.run_end_counter / self._us_unit])\n\n ai_cpu_total_time_summary += total_time\n # Increase node serial number.\n serial_number += 1\n\n i = i + self._ai_cpu_len\n\n return ai_cpu_total_time_summary, result_list",
"def get_messages(message_count):\r\n\r\n file = open('messages.htm', encoding='UTF-8')\r\n\r\n html = file.read().split('</p>')\r\n file.close()\r\n\r\n TOTAL[0] = len(html) - 1\r\n\r\n # Gets rid of formatting at the beginning\r\n start = html[0].find('<div class=\"message\">')\r\n while not html[0][start].isnumeric():\r\n start += 1\r\n html[0] = html[0][start:]\r\n\r\n html.pop()\r\n\r\n threads = []\r\n\r\n que = Queue(maxsize=50)\r\n for line in html:\r\n try:\r\n clean_line = BeautifulSoup(line, 'lxml').getText()\r\n except Exception:\r\n print('Install lxml')\r\n #print(line)\r\n if len(clean_line) != 0:\r\n t = threading.Thread(target=add_option,\r\n args=(message_count, que, threads))\r\n que.put(clean_line)\r\n\r\n t.daemon = True\r\n t.start()\r\n threads.append(t)\r\n\r\n que.join()"
] | [
"0.5664039",
"0.5516081",
"0.520067",
"0.50546473",
"0.5002102",
"0.49736872",
"0.48318633",
"0.47706318",
"0.47663316",
"0.4757304",
"0.47494912",
"0.47263288",
"0.47130182",
"0.47129261",
"0.4693467",
"0.46848562",
"0.46799994",
"0.4672537",
"0.46613166",
"0.4635302",
"0.4634309",
"0.46326125",
"0.462585",
"0.46196043",
"0.46092904",
"0.45994005",
"0.45774668",
"0.45719868",
"0.45629025",
"0.4558935"
] | 0.754473 | 0 |
This parses the content of the transcription file. The size of the file can basically be unlimited but each line has to be under 300000 characters(?). This then returns the following...\n\n index 0 url\n index 1 realTimeFactor\n index 2 transcription\n | def fileTranscriptionContent(filePath):
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nohupTranscriptionContent(filePath):\n try:\n continu = True\n fileContent = \"\"\n f = open(filePath, 'r')\n while (continu):\n temp = f.readline(900000)\n if(len(temp) == 0):\n continu = False\n else:\n fileContent += temp\n results = []\n realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)\n results.append(realTimeFactor)\n transcription = re.findall(r'utterance-id(.*?) (.*?)\\n', fileContent)\n transcriptionList = []\n transcriptionIDList = []\n for item in transcription:\n if(len(item[1]) > 1000):\n transcriptionIDList.append(item[0])\n transcriptionList.append(item[1])\n results.append(transcriptionList)\n results.append(transcriptionIDList)\n transcriptionTime = re.findall(r'seconds / (.*?) seconds\\.', fileContent)\n results.append(transcriptionTime)\n return results\n except Exception as e:\n Tools.writeException(\"nohupTranscriptionContent\", e)\n return False",
"def read_transcription_file(file_path, audio_file_path):\n with open(file_path) as in_file:\n last_timestamp = 0\n res = []\n transcription = \"\"\n for line in in_file:\n time_stamp_match = re.match(\"\\[([0-9\\]+\\.[0-9]+)\\]\", line)\n #if this regex matched then the line is a timestamp\n if time_stamp_match:\n timestamp = float(time_stamp_match.group(1))\n if transcription and transcription.strip() not in ['(())', \"<no-speech>\"]:\n single_instance = {\"start_time\": last_timestamp, \n \"end_time\": timestamp,\n \"transcription\": transcription,\n \"audio_file\" : audio_file_path}\n res.append(single_instance)\n last_timestamp = timestamp\n else:\n last_timestamp = timestamp # this handles silence at beginning\n else:\n transcription = line.strip()\n \n return res",
"def _parse_transcription_file(self, root: str, name: str) -> None:\n trans_path = os.path.join(root, name)\n with open(trans_path, \"r\", encoding=\"utf-8\") as trans:\n # Each line has the form \"ID THE TARGET TRANSCRIPTION\"\n for line in trans:\n id_, transcript = line.split(maxsplit=1)\n dropped = self._process_audio(root, id_)\n if not dropped:\n self._process_transcript(transcript)",
"def parse_file(self, file_lines):\n # separate the file into chunks of text\n chunks, chunk = [], []\n # check to see what format the corpus is in, we assume that the headers are the same for all\n # texts in the file... (maybe not safe?)\n if re.match('Primary publication:', file_lines[0]):\n header = re.compile('Primary publication:')\n else:\n header = re.compile(r'&?P\\d{6}')\n for line in file_lines:\n if header.match(line):\n if len(chunk) > 0: # pylint: disable=len-as-condition\n chunks.append(chunk)\n chunk = [line]\n else:\n if len(line) > 0: # pylint: disable=len-as-condition\n chunk.append(line)\n chunks.append(chunk)\n self.chunks = chunks\n # create a rich catalog from the chunks\n re_translit = re.compile(r'(\\d+\\'?\\.) ?(.*)')\n re_normaliz = re.compile(r'(#tr\\.ts:) ?(.*)')\n re_translat = re.compile(r'(#tr\\.en:) ?(.*)')\n for chunk in self.chunks:\n text = chunk\n if chunk[0].startswith('Primary publication:'):\n # we've got full metadata, add additional parsing later\n metadata = chunk[:25]\n text = chunk[26:]\n else: # no metadata\n metadata = []\n pnum = ''.join([c for c in text[0].split('=')[0] if c != '&']).rstrip()\n edition = text[0].split('=')[1].lstrip()\n text = text[3:]\n translit = []\n normaliz = []\n translat = []\n for line in text:\n if re.match(r'\\d+\\'?\\.', line):\n translit.append(re_translit.match(line).groups()[1])\n if line.startswith('#tr.ts:'):\n normaliz.append(re_normaliz.match(line).groups()[1])\n if line.startswith('#tr.en:'):\n translat.append(re_translat.match(line).groups()[1])\n self.catalog[pnum] = {'metadata': metadata,\n 'pnum': pnum,\n 'edition': edition,\n 'raw_text': text,\n 'transliteration': translit,\n 'normalization': normaliz,\n 'translation': translat}",
"def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)",
"def transcribe(self, fp):\n\n fp.seek(44, os.SEEK_SET)\n\n # FIXME: Can't use the Decoder.decode_raw() here, because\n # pocketsphinx segfaults with tempfile.SpooledTemporaryFile()\n data = fp.read()\n transcribed = []\n while True:\n try:\n self._decoder.start_utt()\n self._decoder.process_raw(data, False, True)\n self._decoder.end_utt()\n hyp = self._decoder.hyp()\n result = hyp.hypstr if hyp is not None else ''\n transcribed = [result] if result != '' else []\n self._logger.info('Transcribed: %r', transcribed)\n break\n except RuntimeError:\n self.reinit()\n\n if self._logfile is not None:\n with open(self._logfile, 'r+') as f:\n for line in f:\n self._logger.debug(line.strip())\n if self._logger.getEffectiveLevel() == logging.DEBUG:\n print(line.strip())\n f.truncate()\n\n return transcribed",
"def parse_transcript(self):\n\t\t\n\t\toutput_text = tempfile.NamedTemporaryFile(mode = 'r')\n\t\twith tempfile.NamedTemporaryFile(delete=False) as input_text:\n\t\t\tinput_text.write(self.transcript_string.encode('utf-8'))\n\t\t\t#to write to the file, convert to utf-8; to use for jinja, convert it back to unicode\n\n\t\tos.popen(\"python vocab_resources/splitta/sbd.py -m vocab_resources/splitta/model_nb -t \" + input_text.name +\" -o \" + output_text.name)\n\t\tos.remove(input_text.name)\n\n\t\twith open(output_text.name) as parsed_text:\n\t\t\tsentence_index = {}\n\t\t\tfor index, sentence in enumerate(parsed_text):\n\t\t\t\tsentence = sentence.rstrip()\n\t\t\t\tsentence_index[index] = sentence\n\n\t\tsentence_index[len(sentence_index)] = \"Unable_to_find_matching_sentence\" #avoid outliers\n\t\tself.sentence_index = sentence_index",
"def ParseSeqFile(FilePath):\n SeqFile = rSeqFile(FilePath)\n TidyFile = TidyLines(SeqFile)\n \n result = []\n\n for line in TidyFile:\n t = ( ProcessLine(line) )\n result.append(t)\n return(result)",
"def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n decoded_lines = decoded_text.split('\\n')\n\n # Remove titles of Wikipedia articles if desired\n if self.remove_headers:\n filtered_lines = []\n for line in decoded_lines:\n line_strip = line.strip()\n if len(line_strip) > 0:\n if line_strip[0] != '=' and line_strip[-1] != '=':\n filtered_lines.append(line)\n decoded_lines = filtered_lines\n\n eol = self.eol or ''\n if self.split_by_line:\n text = [(line.lstrip() + eol,) for line in decoded_lines]\n else:\n text = [(eol.join(decoded_lines),)]\n\n return text",
"def parse_text(self):\n self.text={}\n for i, lang in enumerate(LANGS):\n text=file(self.src).read()\n self.text[lang]=\"\"\n extracted, finish = \"\", 0\n start_string, stop_string = r\"<!--%s-->\" % lang, r\"<!--/%s-->\" % lang\n # Iterates to check multiple blocks of text within the file!\n # Pay attention to infinite loops!\n # AttributeError exception raised when no more blocks to extract exist\n while True:\n try:\n start=re.compile(start_string, re.IGNORECASE).search(text).span()[1]\n finish=re.compile(stop_string, re.IGNORECASE).search(text).span()[0]\n extracted+=text[start:finish]\n text=text[finish+1:]\n except AttributeError:\n break\n self.text[lang]+=extracted",
"def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text",
"def parse_plain_text_export(text_file):\n\n text_file.seek(0)\n for line in text_file.readlines():\n urls = re.findall(URL_REGEX, line) if line.strip() else ()\n for url in urls:\n yield {\n 'url': url,\n 'timestamp': str(datetime.now().timestamp()),\n 'title': None,\n 'tags': '',\n 'sources': [text_file.name],\n }",
"def process_raw_phrases(file_path):",
"def read_corpus(file_path, source):\n data = []\n for line in open(file_path):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>'] #TODO: Change\n data.append(sent)\n return data",
"def get_transcription(url):\n\n # Checks the format of the URL\n if \"https://www.youtube.com/watch?v=\" in url:\n input_url_id = url.replace(\"https://www.youtube.com/watch?v=\", \"\")\n elif \"https://youtu.be/\" in url:\n input_url_id = url.replace(\"https://youtu.be/\", \"\")\n\n # Creates a blank list to iterate over\n text_parts = []\n\n # Gets a list of all available transcripts\n try:\n\n list_of_transcripts = YouTubeTranscriptApi.list_transcripts(input_url_id)\n print(\"Checking for Transcriptions...\")\n\n # Checks to see if a manual transcript is created if not, checks to see if a generated one is created\n if 'en-US' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en-US'])\n elif 'en' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en'])\n elif 'en' in list_of_transcripts._generated_transcripts:\n print(\"Auto-Generated Transcription Found.\")\n transcript = list_of_transcripts.find_generated_transcript(['en'])\n\n # Saves the transcript into a variable to iterate over\n raw_transcription = transcript.fetch()\n\n # Indexing of raw transcripts\n iteration_of_raw = 0\n\n # Iterates over each dictionary and extracts 'text' key then appends the blank text_parts list\n for i in raw_transcription:\n indexed_dictionary = raw_transcription[iteration_of_raw]\n text_from_dictionary = indexed_dictionary['text']\n text_parts.append(text_from_dictionary)\n iteration_of_raw += 1\n # Defines how we want each text element to be separated with\n separator_for_each_text = \" \"\n\n # Joins the separator with the text_parts\n clean_transcription = separator_for_each_text.join(text_parts)\n\n # Returns the cleaned transcripts\n return clean_transcription\n\n except:\n print(\"No Transcriptions Found\")\n clean_transcription = \"No Transcriptions Found\"\n return clean_transcription",
"def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data",
"def read_corpus(file_path, source):\n data = []\n for line in open(file_path, encoding='utf-8'):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>']\n data.append(sent)\n\n return data",
"def parse(self):\n\n lines = self._get_file_lines( )\n\n message, translation = None, None\n comment, status, sources = None, None, None\n temp_msgid, temp_msgstr = None, None\n previous, current = None, None\n\n tstore = UT3Store( )\n\n for curl in lines:\n\n curl = curl.strip( )\n\n if len(curl) == 0:\n current = LINE_EMPTY\n elif curl[0] == '#':\n current = LINE_COMMENT\n status, comment, sources = \\\n _extract_comment_values(curl, status, comment, sources)\n else:\n keyword, message = _parse_line(curl)\n if keyword is not None:\n if keyword == \"msgid\":\n current = LINE_MSGID\n # and now initialise them for later use\n temp_msgid = message\n temp_msgstr = \"\"\n elif keyword == \"msgstr\":\n current = LINE_MSGSTR\n temp_msgstr = message\n else:\n current = LINE_UNKNOWN\n logging.error(\"unknown keyword: %s\" % (keyword))\n else:\n if message is not None:\n if current == LINE_MSGID:\n temp_msgid = temp_msgid + message\n elif current == LINE_MSGSTR:\n temp_msgstr = temp_msgstr + message\n else:\n logging.error(\"unknown mode\")\n\n if previous == LINE_MSGSTR and current != LINE_MSGSTR:\n # we're not in msgstr mode anymore --> save the current entry\n entry = _make_item(message, translation, \\\n sources, comment, status)\n if entry is not None:\n tstore.append(entry)\n\n # reset the item values\n message, translation = None, None\n comment, status, sources = None, None, None\n\n # save msgid and msgstr for storing them later\n message = temp_msgid\n translation = temp_msgstr\n # save line state\n previous = current\n\n # finally append the last pair\n if previous == LINE_MSGSTR:\n entry = _make_item(message, translation, sources, comment, status)\n if entry is not None:\n tstore.append(entry)\n\n return tstore",
"def parse_lines(filename):\n line_counter = 0\n with open(filename, 'r') as rf:\n for line_txt in rf:\n try:\n d = json.loads(line_txt)\n tup = (\n d['attributed_to'],\n int(d['date_time'][8:10]),\n d.get('used_first_time_today', False),\n d.get('first_utm_source', 'unknown') \n )\n except:\n print('Error parsing line_txt:', line_txt)\n line_counter += 1\n if line_counter % 10 ** 6 == 0:\n print('read %dM lines' % (line_counter // 10 ** 6))\n yield tup # yield: https://stackoverflow.com/a/231855",
"def _get_transcript_entries(transcript_directory):\n transcript_files = iglob_recursive(transcript_directory, '*.trans.txt')\n for transcript_file in transcript_files:\n with open(transcript_file, 'r') as f:\n for line in f:\n # Strip included new line symbol\n line = line.rstrip('\\n')\n\n # Each line is in the form\n # 00-000000-0000 WORD1 WORD2 ...\n splitted = line.split(' ', 1)\n yield splitted",
"def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs",
"def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)",
"def loadTIText(self, file):\n next = 1\n startAddr = 0\n segmentdata = []\n #Convert data for MSP430, TXT-File is parsed line by line\n while next >= 1:\n #Read one line\n l = file.readline()\n if not l: break #EOF\n l = l.strip()\n if l[0] == 'q': break\n elif l[0] == '@': #if @ => new address => send frame and set new addr.\n #create a new segment\n if segmentdata:\n self.segments.append( Segment(startAddr, ''.join(segmentdata)) )\n startAddr = int(l[1:],16)\n segmentdata = []\n else:\n for i in l.split():\n segmentdata.append(chr(int(i,16)))\n if segmentdata:\n self.segments.append( Segment(startAddr, ''.join(segmentdata)) )",
"def read_txt(filename):\n content = [] # list with word index : word count for each track\n string = '%'\n find = False \n words = [] \n track_id = [] # list with track ID's from the MSD\n mxm_tid = [] # track ID's from musiXmatch\n str_data = []\n\n read_file = open(filename, \"r\")\n \n for line in read_file:\n if find:\n line = line.strip() # converting line into list\n index1 = line.find(',') # finds index of 1st comma\n index2 = line.find(',', index1+1) # finds index of 2nd comma\n track_id.append(line[:index1]) # appends track id to list \n mxm_tid.append(line[:index2]) # appends track id to list \n res = '{' + line[index2+1:] + '}' # simulates dictionary with string\n d = eval(res) # converts string to actual dictionary \n content.append(d) # appends track data to content list\n else:\n # obtaining line with 5,000 words \n if line.startswith(string):\n line = line[1:] # getting rid of %\n words = [word.strip() for word in line.split(',')]\n find = True # already found list of words \n read_file.close() \n \n\n return (words, content, track_id, mxm_tid)",
"def parse_transcripts(transcript_lines):\n LOG.info(\"Parsing transcripts\")\n transcripts = parse_ensembl_transcripts(transcript_lines)\n\n # Since there can be multiple lines with information about the same transcript\n # we store transcript information in a dictionary for now\n parsed_transcripts = {}\n # Loop over the parsed transcripts\n for tx in transcripts:\n tx_id = tx[\"ensembl_transcript_id\"]\n ens_gene_id = tx[\"ensembl_gene_id\"]\n\n # Check if the transcript has been added\n # If not, create a new transcript\n if not tx_id in parsed_transcripts:\n tx_info = {\n \"chrom\": tx[\"chrom\"],\n \"transcript_start\": tx[\"transcript_start\"],\n \"transcript_end\": tx[\"transcript_end\"],\n \"mrna\": set(),\n \"mrna_predicted\": set(),\n \"nc_rna\": set(),\n \"ensembl_gene_id\": ens_gene_id,\n \"ensembl_transcript_id\": tx_id,\n }\n parsed_transcripts[tx_id] = tx_info\n\n tx_info = parsed_transcripts[tx_id]\n # Add the ref seq information\n if tx.get(\"refseq_mrna_predicted\"):\n tx_info[\"mrna_predicted\"].add(tx[\"refseq_mrna_predicted\"])\n if tx.get(\"refseq_mrna\"):\n tx_info[\"mrna\"].add(tx[\"refseq_mrna\"])\n if tx.get(\"refseq_ncrna\"):\n tx_info[\"nc_rna\"].add(tx[\"refseq_ncrna\"])\n\n return parsed_transcripts",
"def index_file(self, file_name):\n self.contents = []\n article_text = \"\"\n article_annots = [] # for annot-only index\n\n f = open(file_name, \"r\")\n for line in f:\n line = line.replace(\"#redirect\", \"\")\n # ------ Reaches the end tag for an article ---------\n if re.search(r'</doc>', line):\n # ignores null titles\n if wiki_uri is None:\n print \"\\tINFO: Null Wikipedia title!\"\n # ignores disambiguation pages\n elif (wiki_uri.endswith(\"(disambiguation)>\")) or \\\n ((len(article_text) < 200) and (\"may refer to:\" in article_text)):\n print \"\\tINFO: disambiguation page \" + wiki_uri + \" ignored!\"\n # ignores list pages\n elif (wiki_uri.startswith(\"<wikipedia:List_of\")) or (wiki_uri.startswith(\"<wikipedia:Table_of\")):\n print \"\\tINFO: List page \" + wiki_uri + \" ignored!\"\n # adds the document to the index\n else:\n self.__add_to_contents(Lucene.FIELDNAME_ID, wiki_uri, Lucene.FIELDTYPE_ID)\n if self.annot_only:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_annots, Lucene.FIELDTYPE_ID_TV)\n else:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_text, Lucene.FIELDTYPE_TEXT_TVP)\n self.lucene.add_document(self.contents)\n self.contents = []\n article_text = \"\"\n article_annots = []\n\n # ------ Process other lines of article ---------\n tag_iter = list(self.tagRE.finditer(line))\n # adds line to content if there is no annotation\n if len(tag_iter) == 0:\n article_text += line\n continue\n # A tag is detected in the line\n for t in tag_iter:\n tag = t.group(3)\n if tag == \"doc\":\n doc_title = self.titleRE.search(t.group(2))\n wiki_uri = WikipediaUtils.wiki_title_to_uri(doc_title.group(1)) if doc_title else None\n if tag == \"a\":\n article_text += t.group(1) + t.group(4) # resolves annotations and replace them with mention\n # extracts only annotations\n if self.annot_only:\n link_title = self.linkRE.search(t.group(2))\n link_uri = WikipediaUtils.wiki_title_to_uri(unquote(link_title.group(1))) if link_title else None\n if link_uri is not None:\n article_annots.append(link_uri)\n else:\n print \"\\nINFO: link to the annotation not found in \" + file_name\n last_span = tag_iter[-1].span()\n article_text += line[last_span[1]:]\n f.close()",
"def read_data(self, filePath):\n with open(filePath, 'r', encoding='iso-8859-1') as f:\n for sentence in f.readlines():\n sentence = sentence.replace('\\n', '')\\\n .replace('\"', '')\\\n .replace('\\'', '')\\\n .replace('.', '')\\\n .replace(',', '')\\\n .replace('[', '')\\\n .replace(']', '')\\\n .replace('(', '')\\\n .replace(')', '')\\\n .replace(':', '')\\\n .replace('--', '')\\\n .replace('-', '')\\\n .replace('\\\\', '')\\\n .replace('0', '')\\\n .replace('1', '')\\\n .replace('2', '')\\\n .replace('3', '')\\\n .replace('4', '')\\\n .replace('5', '')\\\n .replace('6', '')\\\n .replace('7', '')\\\n .replace('8', '')\\\n .replace('9', '')\\\n .replace('`', '')\\\n .replace('=', '')\\\n .replace('$', '')\\\n .replace('/', '')\\\n .replace('*', '')\\\n .replace(';', '')\\\n .replace('<b>', '')\\\n .replace('%', '')\n sentence = sentence.split(' ')\n sentence = list(filter(lambda x: x, sentence))\n if sentence:\n self.word_num += len(sentence)\n self.maxlen = self.maxlen if self.maxlen >= len(\n sentence) else len(sentence)\n self.minlen = self.minlen if self.minlen <= len(\n sentence) else len(sentence)\n if 'pos' in filePath:\n self.Pos.append([sentence, self.feelMap['pos']])\n else:\n self.Neg.append([sentence, self.feelMap['neg']])",
"def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences",
"def parseUpload(dbconnection, fileName):\n nhContent = ParseText.nohupTranscriptionContent(fileName)\n count = 0\n while count < len(nhContent[0]):\n try:\n rtf = nhContent[0][count]\n transcription = nhContent[1][count].replace(\"'\", \"''\").replace(\"_\", \"\")\n dbID = nhContent[2][count].replace(\".\", \"\")\n duration = nhContent[3][count]\n DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)\n count += 1\n except:\n print(\"couldnt upload one at index \" + str(count))\n count += 1",
"def tokenize(self, path):\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n lines_c = 0\n for line in f:\n words = ['<start>'] + line.split() + ['<eos>']\n len_ = len(words)\n tokens += len_\n if(self.max_sent_length <len_): self.max_sent_length = len_\n lines_c+=1\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n #print('Creating tensor of size: ', lines_c, self.max_sent_length)\n print('Reading files: ', path)\n ids = [] # torch.LongTensor(lines_c, self.max_sent_length)\n target_vecs = [] # torch.LongTensor(lines_c, self.max_sent_length)\n line_c = 0\n count =0\n for line in f:\n words = ['<start>'] + line.split() + ['<eos>']\n sentence_len = len(words)\n if(sentence_len>self.max_length): \n #print (\"sen len: \", sentence_len, ' exceed limit: ', self.max_length, ' skipped!!', count)\n count+=1\n continue\n ids.append([])\n target_vecs.append([])\n #if(self.max_sent_length<sentence_len): self.max_sent_length = sentence_len\n token = 0\n for word in words:\n if(token<sentence_len-1 ): ids[line_c].append( self.dictionary.word2idx[word])\n if(token>0): target_vecs[line_c].append( self.dictionary.word2idx[word] )\n token += 1\n \n line_c +=1\n\n return ids, target_vecs"
] | [
"0.67881304",
"0.626287",
"0.6229017",
"0.615032",
"0.61068505",
"0.61058336",
"0.60824823",
"0.6019963",
"0.59705794",
"0.5939803",
"0.5923286",
"0.5885868",
"0.5883228",
"0.583081",
"0.5810255",
"0.5807429",
"0.5763637",
"0.5748194",
"0.5746666",
"0.5737573",
"0.5734013",
"0.5723646",
"0.5666927",
"0.56596035",
"0.56577843",
"0.5635724",
"0.5623955",
"0.5588269",
"0.5586113",
"0.55837935"
] | 0.7538108 | 0 |
deletes all contents of the specified folder (but not the folder itself).\n returns true if successful. False if an error was thrown or the number of running processes is not = 0 | def cleanupFolder(folderName):
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_folder(path: str):\n try:\n if os.path.exists(path):\n shutil.rmtree(path)\n return True\n except:\n print(\"An error occured.\")",
"def _rm(folder):\n import os\n import shutil\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n return",
"def cleanup(folder):\n os.system('rm -rf %s/*' % folder)",
"def empty_trash():\n drive_service().files().emptyTrash().execute()\n\n return True",
"def rmdirtree(path):\r\n\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n try:\r\n for raiz, subcarpetas, ficheros in walk(path, topdown=False):\r\n samba.delete_files(ficheros, raiz)\r\n for s in subcarpetas:\r\n samba.delete_directory(s, raiz)\r\n except:\r\n pass\r\n else:\r\n import shutil\r\n shutil.rmtree(path, ignore_errors=True)\r\n\r\n if exists(path): # No se ha eliminado\r\n return False\r\n\r\n return True",
"def clear_cache(directory):\n if os.path.exists(directory):\n list_dirs = os.listdir(directory)\n try:\n for direct in list_dirs:\n shutil.rmtree(directory + direct)\n return True\n except:\n return False\n else:\n return False",
"def delete_directory_contents(conn_obj, path, device=\"dut\"):\n command = \"rm -rf {}/*\".format(path.rstrip(\"/\"))\n if device == \"dut\":\n st.config(conn_obj, command)\n else:\n conn_obj.execute_command(conn_obj, command)\n return True",
"def rmdir_empty(f):\n if not f.is_dir():\n return 0\n removable = True\n result = 0\n for i in f.iterdir():\n if i.is_dir():\n result += rmdir_empty(i)\n removable = removable and not i.exists()\n else:\n removable = removable and (i.name == '.DS_Store')\n if removable:\n items = list(f.iterdir())\n assert not items or items[0].name == '.DS_Store'\n print(f)\n shutil.rmtree(f)\n result += 1\n\n return result",
"def delete_all(self, prog:progress=None): \n\t\tself.__output_status('Delete all files')\n\t\tif (self.__check_terminated()):\n\t\t\treturn;\t\n\t\tdelete_dir(self.root)\n\t\ttime.sleep(0.3)",
"def delete_folder(path):\n command = ['rm', '-rf', TEST_DIR]\n file_operation(path, command)",
"def __rmtree(path):\n logger.info(\"rmtree: %s\" % path)\n try:\n shutil.rmtree(path)\n return True\n except Exception as e:\n logger.error(\"rmtree: %s failed! Error: %s\" % (path, e))\n return False",
"def _safe_clear_dirflow(path):\n print(\"Clearing {}...\".format(path))\n assert os.path.isdir(path), \"Didn't pass a folder to be cleaned\"\n list_dir = [f for f in os.listdir(path) if not f.startswith('.')]\n for folder in list_dir:\n cat_folder = os.path.join(path, folder)\n assert os.path.isdir(cat_folder), \\\n \"Dir contains Non-Folder File!\"\n cat_folder_item = [f for f in os.listdir(cat_folder)\n if not f.startswith('.')]\n for file in cat_folder_item:\n # For every file, confirm is PNG or error.\n # DONT DELETE YET, IN CASE OF ERRORS!\n assert \".png\" in file, \"Folder has Non PNG Contents!\"\n # If we got though that with no error, then now we can delete!\n # for folder in os.listdir(the_path):\n # cat_folder = os.path.join(the_path, folder)\n # for file in os.listdir(cat_folder):\n # os.remove(os.path.join(cat_folder, file))\n # os.rmdir(cat_folder)\n # os.rmdir(the_path)\n return True",
"def rmEmptyDir(path):\n try:\n os.rmdir(path)\n except OSError as exc:\n return False\n return True",
"def clear_directory(folder_path):\n for the_file in os.listdir(folder_path):\n try:\n file_path = os.path.join(folder_path, the_file)\n if os.path.isfile(file_path) \\\n and the_file != RefreshListScript \\\n and not the_file.endswith(('.txt', 'py')):\n os.unlink(file_path)\n except Exception as e:\n print(e)",
"def clear_directory(folder_name):\n for the_file in os.listdir(folder_name):\n file_path = os.path.join(folder_name, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)",
"def deleteImageFolder(pause=5):\n try:\n shutil.rmtree(imageFolder)\n except PermissionError:\n # Still busy creating the montage or something. Try once more\n time.sleep(pause)\n shutil.rmtree(imageFolder)\n except FileNotFoundError:\n # Folder already gone\n pass",
"def delete_with_retry(folder):\n\n for _i in range(0, 5):\n try:\n if os.path.exists(folder):\n shutil.rmtree(folder)\n\n return\n except:\n time.sleep(0.1)\n\n print(\"Could not delete directory after 5 attempts: %s\" % folder)\n sys.exit(1)",
"def clear_cache():\n if not(os.path.isdir(cache_dir)):\n return True\n try:\n for f in os.listdir(cache_dir):\n os.remove(cache_dir + f)\n return True\n except:\n return False",
"async def remove_data(folder: Path) -> None:\n try:\n proc = await asyncio.create_subprocess_exec(\n \"rm\", \"-rf\", str(folder), stdout=asyncio.subprocess.DEVNULL\n )\n\n _, error_msg = await proc.communicate()\n except OSError as err:\n error_msg = str(err)\n else:\n if proc.returncode == 0:\n return\n\n _LOGGER.error(\"Can't remove Add-on Data: %s\", error_msg)",
"def eraseDatas(folderToRemove='datas'):\n directoryToRemove = os.path.join(pathtofolder(), folderToRemove)\n for i in os.listdir(directoryToRemove):\n os.remove(os.path.join(directoryToRemove, i))\n os.rmdir(directoryToRemove) # Now the folder is empty of files\n pass",
"def cleanup(self):\n\t\tfor filename in self.cfg_files:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\tsize = os.stat(filename)[6]\n\t\t\t\tif size == 0:\n\t\t\t\t\tos.remove(filename)\n\n\t\treturn True",
"def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)",
"def flush_outputs():\n try:\n shutil.rmtree(ROOT_OUTPUT_DIR)\n print(\"Removed directory '{}'!\".format(ROOT_OUTPUT_DIR))\n return True\n except FileNotFoundError:\n print(\"Directory '{}' already removed!\".format(ROOT_OUTPUT_DIR))\n return False",
"def DeleteFolderContents(dir):\n create_dir(dir)\n shutil.rmtree(dir)\n create_dir(dir)",
"def DeleteCheckedFiles(self):\n nb = self._model.delete_checked_files()\n if nb > 0:\n self.__refresh()\n return True\n return False",
"def purge_workflow_file(path):\n logger = fsurfer.log.get_logger()\n if not os.path.exists(path):\n return True\n try:\n if os.path.isfile(path):\n os.unlink(path)\n elif os.path.isdir(path):\n os.rmdir(path)\n return True\n except OSError as e:\n logger.exception(\"Exception: {0}\".format(str(e)))\n return False",
"def removeDir(directory):\n if os.path.exists(directory):\n statusDeletion = shutil.rmtree(directory)\n else:\n statusDeletion = 2\n return statusDeletion",
"def delete_file_or_folder(file_id: str) -> bool:\n # TODO Create unit test for this delete_file_or_folder\n try:\n drive_service().files().delete(fileId=file_id).execute()\n file_deleted_status = True\n\n except errors.HttpError:\n file_deleted_status = False\n\n return file_deleted_status",
"def delete_temp_folder():\n\n tempFolder = os.path.join(os.getenv(\"APPDATA\"), \"GARI\\Temp\")\n\n if os.path.exists(tempFolder):\n for file in os.listdir(tempFolder):\n arcpy.Delete_management(os.path.join(tempFolder, file))",
"def clean_all_folder():\n LOGGER.warning('removal of old files has been temporarily disabled')\n # paths_to_clean = CFG.remove_files\n # if paths_to_clean: # pylint: disable=using-constant-test\n # for remove_config in paths_to_clean: # pylint: disable=not-an-iterable\n # name = tuple(remove_config.keys())[0]\n # LOGGER.info(f'processing: {name}')\n # remove_config = remove_config[name]\n # if 'folder' not in remove_config.keys():\n # LOGGER.error(f'missing \"folder\" in {name}')\n # return\n # if 'age' not in remove_config.keys():\n # LOGGER.error(f'missing \"age\" in {name}')\n # return\n # if not os.path.exists(remove_config['folder']):\n # LOGGER.error(f'path does not exist: {remove_config[\"folder\"]}')\n # return\n # _remove_old_files_from_folder(**remove_config)\n # else:\n # LOGGER.debug('no folder to clean')"
] | [
"0.65608245",
"0.6207403",
"0.60895413",
"0.6053084",
"0.60113263",
"0.5985281",
"0.5914889",
"0.58811086",
"0.58790207",
"0.5868364",
"0.5852713",
"0.5824594",
"0.5818425",
"0.58163047",
"0.5810179",
"0.57717085",
"0.5769049",
"0.5744798",
"0.5744438",
"0.5740823",
"0.5726497",
"0.5724047",
"0.5721212",
"0.5715005",
"0.56995",
"0.5688822",
"0.56862587",
"0.5661347",
"0.56559616",
"0.56542426"
] | 0.70612246 | 0 |
gets the number of runnning transcription processes | def numRunningProcesses():
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_processes():\n return 1",
"def num_processes(self):\n return 1",
"def num_processes(self, new_value):",
"def n_worker(self):\n return self.redis.pubsub_numsub(MSG)[0][-1]",
"def GetNumberOfResultsProcessed(self) -> int:\n return self.i",
"def numRunning(self):\n #with self.__queueLock:\n # The size of the list does not change, only its contents, so I don't\n # think there should be any conflict if we are reading a variable from\n # one thread and updating it on the other thread.\n activeRuns = sum(run is not None for run in self.__running)\n\n return activeRuns",
"def number_of_jobs_in_queue():\n\n # Initialize #\n user_name = get_username()\n\n process = subprocess.check_output([\"squeue\", \"-u\", user_name])\n\n return len([line for line in process.split(\"\\n\") if user_name in line])",
"def _n_workers(self, processes: int = 2) -> int:\n if 2 <= processes <= cpu_count():\n n_workers = processes\n else:\n n_workers = cpu_count()\n return n_workers",
"def get_count(bam, max_workers):\n print (\"Count total number of paired reads in %s ...\"%bam)\n cmd = ['samtools','view','-c','-f', '3','-@',str(max_workers),bam]\n out, err = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout=subprocess.PIPE).communicate()\n return int(out.split()[0])",
"def processes(self):\n return self._getint('processes')",
"def numRunningTotal(self):\n activeRuns = sum(run is not None for run in self.__running + self.__clientRunning)\n return activeRuns",
"def numprocesses(self):\n info = self.info()\n return info['max_processes']",
"def count(self):\n return len(self._runs)",
"def get_num_sequence(self):\n return len(self.study_list)",
"def num_run_trajs(self, run_idx):\n return len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])",
"def get_num_processes(profileDict):\n assert isinstance(profileDict, dict)\n\n return profileDict[\"info\"][\"number_of_processes\"]",
"def total_nt(self) -> int:\n return self.sequence.length",
"def processor_count(self):\n return self._processor_count",
"def number_of_sequences(self):\n return self.sequence_last() + 1",
"def get_num_jobs(self):\n return str(self.num_jobs)",
"def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1",
"def count():",
"def get_number_executors(self):\n with self.__threads_lock:\n return self.__number_executors",
"def execution_count(self) -> int:\n return pulumi.get(self, \"execution_count\")",
"def number_of_workers():\n return (cpu_count() * 2) + 1",
"def get_procs_count(proc_name):\n procs = subprocess.check_output(['ps','-ef']).splitlines()\n name_procs = [proc for proc in procs if proc_name.encode() in proc]\n return len(name_procs)",
"def n_tasks(self) -> int:\n pass",
"def num_trials(self):",
"def procs_running():\n \n return __proc_stat('procs_running')",
"def numProcs(reportname):\n with open(reportname, \"rb\") as f:\n data = json.load(f)\n numProcesses = len(data[\"behavior\"][\"processes\"])\n return numProcesses"
] | [
"0.7056694",
"0.6808125",
"0.6725539",
"0.66032845",
"0.64222586",
"0.6406968",
"0.6369945",
"0.6350684",
"0.6319151",
"0.6316894",
"0.631224",
"0.62300086",
"0.62215966",
"0.6156739",
"0.6135421",
"0.6128718",
"0.61258286",
"0.611203",
"0.6101045",
"0.60927176",
"0.6082708",
"0.6049304",
"0.60454756",
"0.60439026",
"0.6039683",
"0.60033643",
"0.5998477",
"0.5984397",
"0.59553885",
"0.5938129"
] | 0.73324955 | 0 |
Does everything you need to transcribe a podcast given the filename\n Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds, remove the .mp3 file, run the transcription | def transcribeAll(service, url, fileName):
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false "
+ "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 "
+ "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName
+ " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transcribe_audio_file(filename):\n url = 'https://api.nexiwave.com/SpeechIndexing/file/storage/' + USERNAME +'/recording/?authData.passwd=' + PASSWORD + '&auto-redirect=true&response=application/json'\n\n # To receive transcript in plain text, instead of html format, comment this line out (for SMS, for example)\n #url = url + '&transcriptFormat=html'\n\n\n # Ready to send:\n sys.stderr.write(\"Send audio for transcript with \" + url + \"\\n\")\n r = requests.post(url, files={'mediaFileData': open(filename,'rb')})\n data = r.json()\n transcript = data['text']\n foo = data['text']\n f = open('newf.txt', 'w')\n f.write(foo)\n f.close() \n # Perform your magic here:\n print \"Transcript for \"+filename+\"=\" + transcript",
"async def download_audio(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"`What I am Supposed to find? Give link`\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, audio_opts, url)\n if ytdl_data is None:\n return\n await codevent.edit(\n f\"`Preparing to upload song:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n f = pathlib.Path(f\"{ytdl_data['title']}.mp3\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.mp3.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.mp3.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n c_time = time.time()\n ul = io.open(f, \"rb\")\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n supports_streaming=True,\n force_document=False,\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await codevent.delete()",
"def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None",
"def main():\n # transcribe_audio()\n summarize()",
"def subprocess_transcribe_function( fname, voicenote_filename_regex ):\n if not hasattr( subprocess_transcribe_function, \"client\" ):\n # Init function failed.\n return None\n if subprocess_transcribe_function.verbose:\n # TODO: We should (probably?) queue these messages and print() on a single thread/process...but....\n print( \"Transcribing {}...\".format( fname ) )\n try:\n ret = ( recording_date_from_full_path( fname, voicenote_filename_regex ), fname, transcribe_wav( fname, client=subprocess_transcribe_function.client ) )\n except BaseException as e:\n # Do NOT kill the program. We'll leave the audio file in the unprocessed directory.\n print( \"ERROR:\" )\n print( e )\n ret = None\n return ret",
"def main():\n st.info(\n \"This webpage lets you upload wav audio file and transribe it to Amharic, CHECK THAT OUT !!\")\n st.markdown(STYLE, unsafe_allow_html=True)\n st.header(\"Upload audio file\")\n file = st.file_uploader(\"Audio file\", type=FILE_TYPES)\n show_file = st.empty()\n if not file:\n show_file.info(\"Please upload a file of type: \" +\n \", \".join(FILE_TYPES))\n return\n\n file_type = get_file_type(file)\n if file_type == FileType.PYTHON:\n st.code(file.getvalue())\n\n elif file_type == FileType.SOUND:\n # st.code(file.getvalue())\n audio_bytes = file.read()\n st.audio(audio_bytes, format=\"audio/ogg\")\n\n else:\n data = pd.read_csv(file)\n st.dataframe(data.head(10))\n\n with open(os.path.join(\"./tempfile\", file.name), \"wb\") as f:\n f.write(file.getbuffer())\n st.success(\"Processing File..\")\n\n st.header(\"Transcribe audio\")\n if st.button('Transcribe'):\n st.write(\"\")\n with st.spinner('wait for it ...'):\n time.sleep(60)\n st.success('Done!')\n else:\n st.write('')\n\n # if file:\n # token, t_id = upload_file(file)\n # result = {}\n # #polling\n # sleep_duration = 1\n # percent_complete = 0\n # progress_bar = st.progress(percent_complete)\n # st.text(\"Currently in queue\")\n # while result.get(\"status\") != \"processing\":\n # percent_complete += sleep_duration\n # time.sleep(sleep_duration)\n # progress_bar.progress(percent_complete/10)\n # result = get_text(token,t_id)\n\n # sleep_duration = 0.01\n\n # for percent in range(percent_complete,101):\n # time.sleep(sleep_duration)\n # progress_bar.progress(percent)\n\n # with st.spinner(\"Processing.....\"):\n # while result.get(\"status\") != 'completed':\n # result = get_text(token,t_id)\n\n # st.balloons()\n # st.header(\"Transcribed Text\")\n # st.subheader(result['text'])\n\n file.close()",
"def main():\n\n # Parse arguments\n parser = OptionParser()\n parser.add_option('-n', '--subscription_key', dest='subscription_key',\n help='subscription_key for authentication')\n parser.add_option('-t', '--text', dest='text',\n help='text to synthesize')\n parser.add_option('-l', '--language', dest='language',\n help='language')\n parser.add_option('-g', '--gender', dest='gender',\n help='gender')\n parser.add_option('-d', '--directory', dest='directory',\n help='directory to store the file')\n (options, args) = parser.parse_args()\n subscription_key = options.subscription_key\n text = options.text\n language = options.language\n gender = options.gender\n directory = options.directory\n\n # Perform sanity checks on options\n validate_options(subscription_key, text)\n\n if not directory:\n directory = default_directory\n\n if not language:\n language = default_language\n\n if not gender:\n gender = default_gender\n\n # format = 'riff-16khz-16bit-mono-pcm'\n format = 'riff-8khz-8bit-mono-mulaw'\n\n # lang = 'en-AU'\n # gender = 'Female'\n tts_msspeak = MSSpeak(subscription_key, '/tmp/')\n tts_msspeak.set_cache(False)\n output_filename = tts_msspeak.speak(text, language, gender, format)\n\n print 'Recorded TTS to %s%s' % (directory, output_filename)",
"def process_speak_listen(device_index, mp3_filename, text, record, flag):\n\n mp3_filename = mp3_filename + \".mp3\"\n try:\n tts = gTTS(text=text, lang='en', slow=False)\n tts.save(mp3_filename)\n playsound(mp3_filename)\n os.remove(mp3_filename)\n\n if flag != 1:\n with sr.Microphone(device_index=device_index) as source:\n record.adjust_for_ambient_noise(source, duration=1)\n print(\"Speak:\")\n os.system(\"zenity --progress --width=400 --height=200 --title='Speak Now' \"\n \"--text='Speak Now......No need to click OK button' --no-cancel &\")\n try:\n audio = record.listen(source, timeout=5)\n text = record.recognize_google(audio)\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(text)\n except LookupError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : LookupError - Could not able to understand\")\n text = None\n except speech_recognition.WaitTimeoutError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : WaitTimeoutError - Could not able to listen anything for 5 seconds\")\n text = None\n except speech_recognition.UnknownValueError:\n os.system(\"ps -ef|grep zenity|awk '{print $2}'|head -1|xargs kill -9\")\n print(\"ERROR : UnknownValueError - Could not able to listen anything for 5 seconds\")\n text = None\n except gtts.tts.gTTSError:\n print(\"ERROR : Connection Error : No internet connection.\")\n exit_program()\n except PermissionError:\n print(\"ERROR : No permission\")\n exit_program()\n\n return text",
"def execute(self, **kwargs):\n if \"text\" not in kwargs:\n return ''\n phrase = str(kwargs[\"text\"])\n \n names = {\n \"callie\": \"6.5\",\n \"lawrence\": \"8.5\"\n }\n name = \"callie\"\n\n #TODO find a better way of implementing TTS\n ttsfd, ttsfile = tempfile.mkstemp(\".wav\")\n outfile, outname = tempfile.mkstemp(\".wav\")\n try:\n \n tts = sp.Popen(['/opt/swift/bin/swift', '-o', ttsfile, '-n', name, phrase], stdout=sp.PIPE, stderr=sp.PIPE)\n# cmd = ('/opt/swift/bin/swift \"' + phrase + '\" -o ' + ttsname + ' && sox -V1 ' +\n# tmp + ' -t wav ' + tmp2 + ' trim 8 ;')\n# p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n# out, err = p.communicate()\n# if len(err) > 0:\n# return err\n\n out, err = tts.communicate()\n if not err:\n sox = sp.Popen(['sox', '-V1', ttsfile, '-t', 'wav', outname, 'trim', names[name]], stdout=sp.PIPE, stderr=sp.PIPE)\n out, err = sox.communicate()\n\n player = gst.element_factory_make(\"playbin2\", \"player\")\n bus = player.get_bus()\n bus.add_signal_watch()\n\n mainloop = gobject.MainLoop()\n\n def quit(bus, message):\n mainloop.quit()\n\n bus.connect(\"message::eos\", quit)\n bus.connect(\"message::error\", quit)\n player.set_property(\"uri\", 'file://' + outname)\n player.set_state(gst.STATE_PLAYING)\n\n try:\n mainloop.run()\n finally:\n player.set_state(gst.STATE_NULL)\n\n finally:\n try:\n os.remove(ttsfile)\n except OSError as err:\n print e\n try:\n os.remove(outname)\n except IOError as err:\n print err",
"def transcribe_streaming_voice_activity_timeouts(\n project_id: str,\n speech_start_timeout: int,\n speech_end_timeout: int,\n audio_file: str,\n) -> cloud_speech.StreamingRecognizeResponse:\n # Instantiates a client\n client = SpeechClient()\n\n # Reads a file as bytes\n with open(audio_file, \"rb\") as f:\n content = f.read()\n\n # In practice, stream should be a generator yielding chunks of audio data\n chunk_length = len(content) // 20\n stream = [\n content[start : start + chunk_length]\n for start in range(0, len(content), chunk_length)\n ]\n audio_requests = (\n cloud_speech.StreamingRecognizeRequest(audio=audio) for audio in stream\n )\n\n recognition_config = cloud_speech.RecognitionConfig(\n auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),\n language_codes=[\"en-US\"],\n model=\"long\",\n )\n\n # Sets the flag to enable voice activity events and timeout\n speech_start_timeout = duration_pb2.Duration(seconds=speech_start_timeout)\n speech_end_timeout = duration_pb2.Duration(seconds=speech_end_timeout)\n voice_activity_timeout = (\n cloud_speech.StreamingRecognitionFeatures.VoiceActivityTimeout(\n speech_start_timeout=speech_start_timeout,\n speech_end_timeout=speech_end_timeout,\n )\n )\n streaming_features = cloud_speech.StreamingRecognitionFeatures(\n enable_voice_activity_events=True, voice_activity_timeout=voice_activity_timeout\n )\n\n streaming_config = cloud_speech.StreamingRecognitionConfig(\n config=recognition_config, streaming_features=streaming_features\n )\n\n config_request = cloud_speech.StreamingRecognizeRequest(\n recognizer=f\"projects/{project_id}/locations/global/recognizers/_\",\n streaming_config=streaming_config,\n )\n\n def requests(config: cloud_speech.RecognitionConfig, audio: list) -> list:\n yield config\n for message in audio:\n sleep(0.5)\n yield message\n\n # Transcribes the audio into text\n responses_iterator = client.streaming_recognize(\n requests=requests(config_request, audio_requests)\n )\n\n responses = []\n for response in responses_iterator:\n responses.append(response)\n if (\n response.speech_event_type\n == cloud_speech.StreamingRecognizeResponse.SpeechEventType.SPEECH_ACTIVITY_BEGIN\n ):\n print(\"Speech started.\")\n if (\n response.speech_event_type\n == cloud_speech.StreamingRecognizeResponse.SpeechEventType.SPEECH_ACTIVITY_END\n ):\n print(\"Speech ended.\")\n for result in response.results:\n print(f\"Transcript: {result.alternatives[0].transcript}\")\n\n return responses",
"def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)",
"def do_play(*_args):\n print(last_wav_path)\n if last_wav_path and last_wav_path.is_file():\n threading.Thread(\n target=lambda: subprocess.check_call(\n [\"aplay\", \"-q\", str(last_wav_path)]\n )\n ).start()",
"async def play(self, ctx, *, filename: str):\r\n if not ctx.voice_client:\r\n await self.connect(ctx)\r\n if filename not in self.audio_files:\r\n await ctx.send(\"File {0} not found\".format(filename))\r\n await self.audiofiles(ctx)\r\n else:\r\n ctx.voice_client.play(discord.FFmpegPCMAudio(source=\"{0}{1}.mp3\".format(self.audio_base_dir, filename)))\r\n await ctx.message.delete()",
"def playMessage(msg):\n tts = gTTS(msg, lang=\"pt-br\")\n file = \"./audios/temp.mp3\"\n\n tts.save(file)\n player = MediaPlayer(file)\n player.play()\n sleep(10)\n os.remove(file)",
"def transcribe(config):\n\n long_mode = True\n\n if 'audio_data' not in config:\n raise KeyError(\"`audio_data` not specified for transcription operation.\")\n\n if 'timeout' not in config:\n raise KeyError(\"`timeout` not specified for transcription operation.\")\n\n try:\n if config.pop('audio_duration') < 60: \n long_mode = False\n except KeyError:\n pass\n\n if long_mode:\n print(\"Running in long audio duration mode (audio is >60 seconds duration)...\")\n print(\"Uploading file...\")\n remote_object = gcloud_upload_file(config['audio_data'], config['storage_bucket'])\n file_name = remote_object.rsplit('/', 1)[-1]\n\n config['audio_data'] = \"gs://%s/%s\" % (config['storage_bucket'], file_name)\n storage_bucket = config.pop('storage_bucket')\n\n print(\"Transcribing file...\")\n result = gcloud_transcribe_long(config)\n\n print(\"Transcription successful, cleaning up...\")\n print(\"Deleting uploaded GCS file...\")\n gcloud_delete_file(file_name, storage_bucket)\n else:\n print(\"Transcribing file...\")\n config.pop('timeout')\n config.pop('storage_bucket')\n result = gcloud_transcribe_short(config)\n\n return result",
"def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()",
"def act(self, audio_file=None):\n #file as source\n if self.src == 'file':\n if audio_file is None:\n raise ValueError(\"Please provide a audio_file\")\n return None\n elif not os.path.exists(audio_file):\n raise FileNotFoundError(\"Specified file not found\")\n return None\n else:\n file = speech_recognition.AudioFile(audio_file)\n with file:\n speech = self.recog_obj.record(file)\n \n #mic as source\n elif self.src == 'mic':\n if audio_file is not None:\n print(\"WARNING: source is set to device microphone. Audio file will be ignored\\n\")\n \n try:\n with self.mic_obj:\n print(\"Speak into the mic....\\n\")\n self.recog_obj.adjust_for_ambient_noise(self.mic_obj)\n speech = self.recog_obj.listen(self.mic_obj)\n #if microphone is not detected\n except OSError:\n print(\"Error: Microphone not detected\")\n return None\n \n \n try:\n print(\"Please wait while we transcribe...\\n\")\n text = self.recog_obj.recognize_google(speech, language='en', show_all=self.debug)\n \n #if audio is not detected\n except speech_recognition.UnknownValueError:\n print(\"Error: Sorry audio not detected by device microphone\")\n return None\n \n #if there is connection issue or api issue\n except speech_recognition.RequestError:\n print(\"Error: API for transcription is not reachable. There may be some connection issue or server side issue\")\n return None\n \n #for imposing various rules to text \n #But if debug mode is enabled, transcript variable will store a dictionary of various transcriptions \n #along with their confidence probabilities, so conversion rules are disabled meanwhile \n transcript = self.tcr.deconcat(text) if not self.debug else text\n return transcript",
"def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):\n speech_config = speechsdk.SpeechConfig(subscription = pa.stt_key, region = pa.stt_region)\n # If necessary, you can enable a proxy here: \n # set_proxy(hostname: str, port: str, username: str, password: str)\n if enable_proxy: \n speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])\n # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted\n speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)\n if pa.stt_endpoint != \"\": \n speech_config.endpoint_id = pa.stt_endpoint\n logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')\n results = []\n filenames = []\n for index, audio in enumerate(glob.iglob(f'{speech_files}*av')):\n result, filename = request_endpoint(audio, speech_config, output_directory, lexical)\n results.append(result)\n filenames.append(filename)\n # Check the result\n return zip(filenames, results)",
"async def transcribe_stream(args: argparse.Namespace, core: Voice2JsonCore) -> None:\n from rhasspyasr import Transcription\n from rhasspysilence import VoiceCommand, VoiceCommandResult\n\n # Make sure profile has been trained\n assert core.check_trained(), \"Not trained\"\n\n wav_sink = None\n wav_dir = None\n if args.wav_sink:\n wav_sink_path = Path(args.wav_sink)\n if wav_sink_path.is_dir():\n # Directory to write WAV files\n wav_dir = wav_sink_path\n else:\n # Single WAV file to write\n wav_sink = open(args.wav_sink, \"wb\")\n\n event_sink = None\n if args.event_sink:\n if args.event_sink == \"-\":\n event_sink = sys.stdout\n else:\n event_sink = open(args.event_sink, \"w\")\n\n # Record command\n recorder = core.get_command_recorder()\n recorder.start()\n\n voice_command: typing.Optional[VoiceCommand] = None\n\n # Expecting raw 16-bit, 16Khz mono audio\n audio_source = await core.make_audio_source(args.audio_source)\n\n # Audio settings\n sample_rate = int(pydash.get(core.profile, \"audio.format.sample-rate-hertz\", 16000))\n sample_width = (\n int(pydash.get(core.profile, \"audio.format.sample-width-bits\", 16)) // 8\n )\n channels = int(pydash.get(core.profile, \"audio.format.channel-count\", 1))\n\n # Get speech to text transcriber for profile\n transcriber = core.get_transcriber(open_transcription=args.open, debug=args.debug)\n\n # Set after a transcription has been printed\n transcription_printed = threading.Event()\n\n # Run transcription in separate thread\n frame_queue: \"Queue[typing.Optional[bytes]]\" = Queue()\n\n def audio_stream() -> typing.Iterable[bytes]:\n \"\"\"Read audio chunks from queue and yield.\"\"\"\n frames = frame_queue.get()\n while frames:\n yield frames\n frames = frame_queue.get()\n\n def transcribe_proc():\n \"\"\"Transcribe live audio stream indefinitely.\"\"\"\n while True:\n # Get result of transcription\n transcribe_result = transcriber.transcribe_stream(\n audio_stream(), sample_rate, sample_width, channels\n )\n\n _LOGGER.debug(\"Transcription result: %s\", transcribe_result)\n\n transcribe_result = transcribe_result or Transcription.empty()\n transcribe_dict = dataclasses.asdict(transcribe_result)\n transcribe_dict[\"timeout\"] = is_timeout\n\n print_json(transcribe_dict)\n transcription_printed.set()\n\n threading.Thread(target=transcribe_proc, daemon=True).start()\n\n # True if current voice command timed out\n is_timeout = False\n\n # Number of events for pending voice command\n event_count = 0\n\n # Number of transcriptions that have happened\n num_transcriptions = 0\n\n print(\"Ready\", file=sys.stderr)\n\n try:\n chunk = await audio_source.read(args.chunk_size)\n while chunk:\n # Reset event\n transcription_printed.clear()\n\n # Look for speech/silence\n voice_command = recorder.process_chunk(chunk)\n\n if event_sink:\n # Print outstanding events\n for event in recorder.events[event_count:]:\n print_json(dataclasses.asdict(event), out_file=event_sink)\n\n event_count = len(recorder.events)\n\n if voice_command:\n is_timeout = voice_command.result == VoiceCommandResult.FAILURE\n\n # Force transcription\n frame_queue.put(None)\n\n # Reset\n audio_data = recorder.stop()\n if wav_dir:\n # Write WAV to directory\n wav_path = (wav_dir / time.strftime(args.wav_filename)).with_suffix(\n \".wav\"\n )\n wav_bytes = core.buffer_to_wav(audio_data)\n wav_path.write_bytes(wav_bytes)\n _LOGGER.debug(\"Wrote %s (%s byte(s))\", wav_path, len(wav_bytes))\n elif wav_sink:\n # Write to WAV file\n wav_bytes = core.buffer_to_wav(audio_data)\n wav_sink.write(wav_bytes)\n _LOGGER.debug(\n \"Wrote %s (%s byte(s))\", args.wav_sink, len(wav_bytes)\n )\n\n num_transcriptions += 1\n\n # Wait for transcription to be printed\n transcription_printed.wait(timeout=args.timeout)\n\n # Check exit count\n if (args.exit_count is not None) and (\n num_transcriptions >= args.exit_count\n ):\n _LOGGER.debug(\"Exit count reached\")\n break\n\n recorder.start()\n else:\n # Add to current command\n frame_queue.put(chunk)\n\n # Next audio chunk\n chunk = await audio_source.read(args.chunk_size)\n finally:\n transcriber.stop()\n\n try:\n await audio_source.close()\n except Exception:\n pass",
"def main():\n\n start_program()\n yes_syn_words, no_syn_words, stop_words, record, mp3_filename, text, device_index, output_file = \\\n process_parameter_set()\n stand_alone_flag = process_check_input_argument()\n process_speak_listen(device_index, mp3_filename, text, record, flag=1)\n text = process_name(device_index, mp3_filename, record)\n input_details = process_speak_listen(device_index, mp3_filename, text, record, flag=0)\n response = process_input_details(device_index, input_details, mp3_filename, record, yes_syn_words, no_syn_words,\n stop_words)\n process_output_file_write(output_file, response)\n process_delete_mp3_output_files(stand_alone_flag)\n exit_program()",
"def pron(word):\n\n return send_from_directory('prons', word + \".mp3\", mimetype=\"audio/mpeg\")",
"def transcribe_gcs(gcs_uri):\n from google.cloud import speech\n from google.cloud.speech import enums\n from google.cloud.speech import types\n client = speech.SpeechClient()\n\n audio = types.RecognitionAudio(uri=gcs_uri)\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.FLAC,\n enable_word_time_offsets=True,\n #sample_rate_hertz=32000,\n language_code='en-US')\n\n operation_start_time = time.time()\n operation = client.long_running_recognize(config, audio)\n\n\n print('Waiting for operation to complete...')\n response = operation.result(timeout=None)\n operation_end_time = time.time()\n operation_elapsed_time = operation_end_time - operation_start_time\n operation_time_string = format_time_string(operation_elapsed_time)\n\n last_result_index = len(response.results)-1\n last_word_index = len(response.results[last_result_index].alternatives[0].words)-1\n audio_duration = response.results[last_result_index].alternatives[0].words[last_word_index].end_time.seconds\n audio_duration_string = format_time_string(audio_duration)\n\n counter = 1\n srt_file_name = gcs_uri[gcs_uri.rfind(\"/\")+1:gcs_uri.rfind(\".mp4-audio.\")]+\".srt\"\n srt_file = open(srt_file_name, \"w\")\n\n srt_file_name2 = gcs_uri[gcs_uri.rfind(\"/\") + 1:gcs_uri.rfind(\".mp4-audio.\")] + \"2.srt\"\n srt_file2 = open(srt_file_name2, \"w\")\n\n transcription_file_name = gcs_uri[gcs_uri.rfind(\"/\") + 1:gcs_uri.rfind(\"-audio.\")] + \"-transcription.txt\"\n transcription_file = open(transcription_file_name, \"w\")\n\n word_list = concat_word_list(response.results)\n phrase_list = make_phrase_list(word_list)\n write_srt_file(srt_file2, phrase_list)\n\n # Print the first alternative of all the consecutive results.\n for result in response.results:\n transcript = result.alternatives[0].transcript.strip()\n seconds = result.alternatives[0].words[0].start_time.seconds\n last_word_index = len(result.alternatives[0].words)-1\n end_seconds = result.alternatives[0].words[last_word_index].end_time.seconds\n outstring = format_time_string(seconds) + \" - \" +transcript\n print(outstring + \"\\n\")\n transcription_file.write(outstring + \"\\n\\n\")\n\n # now write to srt file\n srt_file.write(str(counter)+\"\\n\")\n start_time_code = format_time_string(seconds) + \",000\"\n\n end_time_code = format_time_string(end_seconds) + \",000\"\n time_code = start_time_code + \" --> \" + end_time_code\n srt_file.write(time_code + \"\\n\")\n srt_file.write(transcript + \"\\n\\n\")\n counter += 1\n #print('Confidence: {}'.format(result.alternatives[0].confidence))\n srt_file.close()\n srt_file2.close()\n transcription_file.close()\n print(\"\\n------------------------------------------------\")\n print(\"Audio file length: \" + audio_duration_string)\n print(\"Transcribe operation running time: \" + operation_time_string)\n print(\"------------------------------------------------\")",
"def process_transcript(transcript_label):\n transcript_key = f\"{transcript_label}.json\"\n\n # Load Transcribe output from S3.\n raw_transcript = get_transcribe_output(transcript_key)\n\n # Parse to assign speaker parts.\n speaker_parts = assign_speakers(raw_transcript)\n\n # Identify Karen and Georgia.\n assigned = karen_or_georgia(speaker_parts)\n\n # Update the full transcript.\n build_transcript(assigned)\n\n # Upload the latest transcript to S3.\n s3 = boto3.resource(\"s3\")\n s3.Bucket(os.getenv(\"S3_BUCKET\")).upload_file(\"main_transcript.txt\", \"main_transcript.txt\")",
"def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")",
"def gravar():\n frase = input(\"Digite a frase a ser gravada: \")\n filename = frase.replace(\" \", \"\").lower() + '.mp3'\n txt = \"{};{}\\n\".format(frase, filename)\n\n # adiciona texto ao arquivo\n with open('frases', 'a') as file:\n file.write(txt)\n\n play_async(text_to_file(frase, filename))",
"def track_01():\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)\n return \"Ok\"",
"def transcribe_recording(file_name, transcript_label):\n s3_key = f\"recordings/{file_name}\"\n\n # Load to S3.\n load_recording_to_s3(file_name)\n\n # Start the transcription job.\n start_transcribe_recording_job(s3_key, transcript_label)",
"def synthesize_text_file(text_file):\n from google.cloud import texttospeech\n client = texttospeech.TextToSpeechClient()\n\n with open(text_file, 'r') as f:\n text = f.read()\n input_text = texttospeech.types.SynthesisInput(text=text)\n\n # Note: the voice can also be specified by name.\n # Names of voices can be retrieved with client.list_voices().\n voice = texttospeech.types.VoiceSelectionParams(\n language_code='en-AU',\n name='en-AU-Wavenet-C',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3,\n speaking_rate=0.80)\n\n response = client.synthesize_speech(input_text, voice, audio_config)\n\n # The response's audio_content is binary.\n filename = text_file\n try:\n filename = filename.replace('.txt', '.mp3')\n filename = filename.replace('../Articles/', '')\n filename = filename.replace(';', ' ')\n filename = filename.replace(\"'\", \" \")\n except Exception as e:\n print(e)\n print('Check replace command in synthesize_file.py file')\n\n with open(filename, 'wb') as out:\n out.write(response.audio_content)\n print(f'Audio content written to file: \\n{filename}\\n')",
"def async_transcribe(audio_file_paths,\n bucket_name,\n output_tsv_path,\n sample_rate,\n language_code,\n speaker_count=0,\n begin_sec=0.0):\n tmp_audio_file = tempfile.mktemp(suffix=\".flac\")\n print(\"Temporary audio file: %s\" % tmp_audio_file)\n audio_duration_s = concatenate_audio_files(audio_file_paths, tmp_audio_file)\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n destination_blob_name = os.path.basename(tmp_audio_file)\n blob = bucket.blob(destination_blob_name)\n print(\"Uploading %s to GCS bucket %s\" % (tmp_audio_file, bucket_name))\n blob.upload_from_filename(tmp_audio_file)\n gcs_uri = \"gs://%s/%s\" % (bucket_name, destination_blob_name)\n print(\"Uploaded to GCS URI: %s\" % gcs_uri)\n\n client = speech.SpeechClient()\n audio = speech.RecognitionAudio(uri=gcs_uri)\n enable_speaker_diarization = speaker_count > 0\n config = speech.RecognitionConfig(\n encoding=speech.RecognitionConfig.AudioEncoding.FLAC,\n sample_rate_hertz=sample_rate,\n language_code=language_code,\n enable_speaker_diarization=enable_speaker_diarization,\n diarization_speaker_count=speaker_count)\n\n operation = client.long_running_recognize(config=config, audio=audio)\n timeout_s = int(audio_duration_s * 0.25)\n print(\n \"Waiting for async ASR operation to complete \"\n \"(audio duration: %.3f s; ASR timeout: %d s)...\" %\n (audio_duration_s, timeout_s))\n response = operation.result(timeout=timeout_s)\n blob.delete()\n os.remove(tmp_audio_file)\n\n utterances = []\n for result in response.results:\n # The first alternative is the most likely one for this portion.\n alt = result.alternatives[0]\n utterances.append(alt.transcript)\n print(u\"Transcript: {}\".format(alt.transcript))\n diarized_words = [(\n word.word, word.speaker_tag, word.start_time.total_seconds(),\n word.end_time.total_seconds()) for word in alt.words]\n # print(\"Confidence: {}\".format(result.alternatives[0].confidence))\n\n regrouped_utterances = regroup_utterances(utterances, diarized_words)\n with open(output_tsv_path, \"w\" if not begin_sec else \"a\") as f:\n if not begin_sec:\n # Write the TSV header.\n f.write(tsv_data.HEADER + \"\\n\")\n utterance_counter = 0\n for (regrouped_utterance,\n speaker_index, start_time_sec, end_time_sec) in regrouped_utterances:\n utterance_counter += 1\n line = \"%.3f\\t%.3f\\t%s\\t%s [U%d] [Speaker #%d]\" % (\n start_time_sec + begin_sec,\n end_time_sec + begin_sec,\n tsv_data.SPEECH_TRANSCRIPT_TIER,\n regrouped_utterance,\n utterance_counter,\n speaker_index)\n print(line)\n f.write(line + \"\\n\")",
"def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)"
] | [
"0.66069883",
"0.6560382",
"0.6439472",
"0.626135",
"0.61909765",
"0.6083509",
"0.6073147",
"0.5968924",
"0.5962577",
"0.5942388",
"0.58873165",
"0.58815217",
"0.5879562",
"0.5855016",
"0.5853225",
"0.5847647",
"0.58247757",
"0.5802723",
"0.5779541",
"0.5740425",
"0.57340455",
"0.57230544",
"0.5722402",
"0.57103133",
"0.5694708",
"0.5692375",
"0.5677796",
"0.56541234",
"0.56260026",
"0.56227624"
] | 0.7504941 | 0 |
audiourl > url of the transcriptions mp3 is stored here (NOT NULL)\n PodcastName > THe name of the show (references podcast(name))\n Description > The provided summary of that days podcast\n Date > The date that podcast aired (parsed to mmddyyyy\n Title > The title of that specific podcast\n Duration > the running time of that podcast (use strptime to parse, need mmddyyyy\n pending > right now will be false because were not transcribing\n (dateTranscribed) > date of transcription (updated later)\n | def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):
try:
cursor = dbConnection.cursor()
title = title.replace("'", "''")
cursor.execute("INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('" + audiourl + "', NULL, '" + podcastName + "', NULL, '" + description + "', '" + parsedDate + "', '" + title + "', FALSE, NULL);")
dbConnection.commit()
cursor.close()
return True
except:
return False
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tokenize_podcast_transcript(args):\n DATA_DIR = os.path.join(os.getcwd(), 'data', args.project_id)\n story_file = os.path.join(DATA_DIR, 'podcast-transcription.txt')\n\n # Read all words and tokenize them\n with open(story_file, 'r') as fp:\n data = fp.readlines()\n\n data = [item.split(' ') for item in data]\n data = [\n item[:-2] + [' '.join(item[-2:])] if item[-1] == '\\n' else item\n for item in data\n ]\n data = [item for sublist in data for item in sublist]\n\n df = pd.DataFrame(data, columns=['word'])\n df['conversation_id'] = 1\n\n return df",
"def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None",
"def construct_metadata(song):\n print(song) #temp",
"def podcast_show(url, name):\n for recording in scraper.get_podcast_episodes(url):\n INTERFACE.add_item(recording['title'],\n 'play_podcast',\n recording['url'],\n extra_info=recording)",
"def get_podcast_episodes(url):\n\n def parse_pubdate(date_string):\n \"\"\"\n Change pubdate string to datetime object. Tries a bunch of\n possible formats, but if none of them is a match, it will\n return a epoch = 0 datetime object\n\n :param date_string: A string representing a date\n :return: datetime object\n \"\"\"\n date_formats = (\n '%a, %d %b %Y %H:%M:%S +0000',\n '%a, %d %b %Y',\n '%a, %d %b %Y%H:%M:%S +0000',\n '%a, %d %b %Y %H:%M',\n '%a, %d %b %Y %H.%M'\n )\n df_generator = (format for format in date_formats)\n\n date = None\n while date is None:\n try:\n date = datetime.strptime(date_string, next(df_generator))\n except ValueError:\n pass\n except StopIteration:\n date = datetime.fromtimestamp(0)\n\n return date\n\n doc = get_document(url)\n\n return (\n {\n 'url': item.select('guid')[0].text,\n 'Premiered': parse_pubdate(\n item.select('pubdate')[0].text\n ).strftime(\"%d.%m.%Y\"),\n # 'Duration': duration_to_seconds(item.find('itunes:duration').text),\n 'title': item.title.text,\n 'Plot': item.description.text\n }\n for item in doc.find_all(\"item\")\n )",
"def parse_description(self, track: dict):\n try:\n album = track['album']\n link = album['external_urls'].get('spotify')\n preview = track.get('preview_url')\n return (f\"<p>Song from album <a href='{link}'>{album.get('name')}</a><p>\" +\n f\"<audio controls><source src='{preview}' type='audio/mp3'></audio>\")\n except KeyError:\n return \"\"",
"def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")",
"def track_info(filename):\n tag = id3.Tag()\n tag.parse(filename)\n a = load(filename)\n print(\"# {}\".format('=' * 78))\n print(\"Track Name: {}\".format(tag.title))\n print(\"Track Artist: {}\".format(tag.artist))\n print(\"Track Album: {}\".format(tag.album))\n print(\"Track Duration: {}\".format(duration_from_seconds(a.info.time_secs)))\n print(\"Track Number: {}\".format(tag.track_num))\n print(\"Track BitRate: {}\".format(a.info.bit_rate))\n print(\"Track BitRate: {}\".format(a.info.bit_rate_str))\n print(\"Sample Rate: {}\".format(a.info.sample_freq))\n print(\"Mode: {}\".format(a.info.mode))\n print(\"# {}\".format('=' * 78))\n print(\"Album Artist: {}\".format(tag.album_artist))\n print(\"Album Year: {}\".format(tag.getBestDate()))\n print(\"Album Recording Date: {}\".format(tag.recording_date))\n print(\"Album Type: {}\".format(tag.album_type))\n print(\"Disc Num: {}\".format(tag.disc_num))\n print(\"Artist Origin: {}\".format(tag.artist_origin))\n print(\"# {}\".format('=' * 78))\n print(\"Artist URL: {}\".format(tag.artist_url))\n print(\"Audio File URL: {}\".format(tag.audio_file_url))\n print(\"Audio Source URL: {}\".format(tag.audio_source_url))\n print(\"Commercial URL: {}\".format(tag.commercial_url))\n print(\"Copyright URL: {}\".format(tag.copyright_url))\n print(\"Internet Radio URL: {}\".format(tag.internet_radio_url))\n print(\"Publisher URL: {}\".format(tag.publisher_url))\n print(\"Payment URL: {}\".format(tag.payment_url))\n print(\"# {}\".format('=' * 78))\n print(\"Publisher: {}\".format(tag.publisher))\n print(\"Original Release Date: {}\".format(tag.original_release_date))\n print(\"Play Count: {}\".format(tag.play_count))\n print(\"Tagging Date: {}\".format(tag.tagging_date))\n print(\"Release Date: {}\".format(tag.release_date))\n print(\"Terms Of Use: {}\".format(tag.terms_of_use))\n print(\"isV1: {}\".format(tag.isV1()))\n print(\"isV2: {}\".format(tag.isV2()))\n print(\"BPM: {}\".format(tag.bpm))\n print(\"Cd Id: {}\".format(tag.cd_id))\n print(\"Composer: {}\".format(tag.composer))\n print(\"Encoding date: {}\".format(tag.encoding_date))\n print(\"# {}\".format('=' * 78))\n print(\"Genre: {}\".format(tag.genre.name))\n print(\"Non Std Genre Name: {}\".format(tag.non_std_genre.name))\n print(\"Genre ID: {}\".format(tag.genre.id))\n print(\"Non Std Genre ID: {}\".format(tag.non_std_genre.id))\n print(\"LAME Tag: {}\".format(a.info.lame_tag))\n print(\"# {}\".format('=' * 78))\n print(\"Header Version: {}\".format(tag.header.version))\n print(\"Header Major Version: {}\".format(tag.header.major_version))\n print(\"Header Minor Version: {}\".format(tag.header.minor_version))\n print(\"Header Rev Version: {}\".format(tag.header.rev_version))\n print(\"Header Extended: {}\".format(tag.header.extended))\n print(\"Header Footer: {}\".format(tag.header.footer))\n print(\"Header Experimental: {}\".format(tag.header.experimental))\n print(\"Header SIZE: {}\".format(tag.header.SIZE))\n print(\"Header Tag Size: {}\".format(tag.header.tag_size))\n print(\"Extended Header Size: {}\".format(tag.extended_header.size))\n print(\"# {}\".format('=' * 78))\n print(\"File Name: {}\".format(tag.file_info.name))\n print(\"File Tag Size: {}\".format(tag.file_info.tag_size))\n print(\"File Tag Padding Size: {}\".format(tag.file_info.tag_padding_size))\n print(\"File Read Only: {}\".format(tag.read_only))\n print(\"File Size: {}\".format(a.info.size_bytes))\n print(\"Last Modified: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(tag.file_info.mtime))))\n print(\"Last Accessed: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(tag.file_info.atime))))\n print(\"# {}\".format('=' * 78))",
"def get_track_info_mp3(filepath, tags, stream, cover):\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID', tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc == 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz: musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n\n track = sanitize_track(extract(tag('TRCK')))\n\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {\n \"title\": extract(tag('TIT2')),\n \"track\": track,\n \"artists\": tag('TPE1'),\n \"albumartist\": extract(tag('TPE2')) or extract(tags.get('TPE1')),\n \"album\": extract(tag('TALB')),\n \"discogs_id\": bytes(discogs).decode('utf-8') if discogs else None,\n \"musicbrainz_id\": musicbrainz,\n \"disk\": sanitize_disk(extract(tag('TPOS'))),\n \"year\": sanitize_year(extract(date)),\n \"genres\": sanitize_genres(tag('TCON')),\n \"length\": stream.length,\n \"bitrate\": stream.bitrate,\n \"size\": os.path.getsize(filepath),\n \"cover\": cover,\n \"filepath\": filepath,\n }",
"def transcribeAll(service, url, fileName):\n if(service == \"omny.fm\"):\n url = url.replace(\".mp3\",\"\") + \".mp3\"\n subprocess.Popen(\"wget -c -O ./podcasts/\" + fileName + \".mp3 \" + url + \" && sleep 40 && ffmpeg -i ./podcasts/\"\n + fileName + \".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/\" + fileName + \".wav && sleep 10 && rm ./podcasts/\" \n + fileName + \".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false \"\n + \"--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 \"\n + \"--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id\" + fileName \n + \" utterance-id\" + fileName + \"|' 'scp:echo utterance-id\" + fileName + \" ./podcasts/\" + fileName + \".wav|' 'ark:/dev/null' &\", shell=True)",
"def test_gathering_links_for_audio_track(\n lep_dl: LepDL,\n) -> None:\n json_test = \"\"\"\\\n [\n {\n \"episode\": 3,\n \"date\": \"2000-01-01T00:00:00+00:00\",\n \"url\": \"https://teacherluke.co.uk/2009/04/15/episode-3-musicthe-beatles/\",\n \"post_title\": \"3. Music/The Beatles\",\n \"post_type\": \"\",\n \"files\": {\n \"audios\": [],\n \"atrack\": [\n [\n \"https://someurl1.local\", \"https://someurl2.local\", \"https://someurl3.local\"\n ]\n ]\n },\n \"parsed_at\": \"2021-10-14T07:35:24.575575Z\",\n \"index\": 2009041501,\n \"admin_note\": \"Check audio track.\"\n }\n ]\n \"\"\" # noqa: E501,B950\n db_episodes = Lep.extract_only_valid_episodes(json_test)\n lep_dl.files = downloader.gather_all_files(db_episodes)\n assert len(lep_dl.files) == 2\n assert lep_dl.files[0].primary_url == \"https://someurl1.local\"\n assert lep_dl.files[0].secondary_url == \"https://someurl2.local\"\n assert lep_dl.files[0].tertiary_url == \"https://someurl3.local\"\n assert isinstance(lep_dl.files[0], ATrack)\n assert (\n lep_dl.files[0].filename == \"[2000-01-01] # 3. Music/The Beatles _aTrack_.mp3\"\n )",
"def set_meta_mp3(file):\n\n list_str_prop_mp3 = ['album', 'artist', 'title']\n list_other_prop_mp3 = ['comment', 'genre', 'year']\n dict_file_mp3 = {}\n # For each string properties into the tag\n for prop in list_str_prop_mp3:\n # If the tag exist (i.e it's not empty for the music file)\n if file.tag.d.has_key(prop.upper()):\n # We delete spe char and we format it\n dict_file_mp3[prop] = delete_spe_char_and_format(file.tag[prop.upper()])\n else:\n # Or we define it's value as 'Unknow ' + prop\n # For instance 'Unknow Artist'\n dict_file_mp3[prop] = 'Unknow ' + prop.capitalize()\n # For each other properties\n for prop in list_other_prop_mp3:\n if file.tag.d.has_key(prop.upper()):\n # We just copy them\n dict_file_mp3[prop] = file.tag[prop.upper()]\n else:\n dict_file_mp3[prop] = ''\n # To try to find the tracknumber, we need 'title'\n if dict_file_mp3.has_key('title'): \n # But before, we delete the duplicate\n list_duplicate = [dict_file_mp3['artist'], dict_file_mp3['album']]\n # Now we delete the duplicates\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], list_duplicate)\n # So we are able to find the tracknumber\n number = ''\n # If ID3 already find it\n if file.tag.d.has_key(\"TRACKNUMBER\"):\n number = file.tag[\"TRACKNUMBER\"]\n # Else we try to find by ourself\n else:\n number = find_tracknumber(dict_file_mp3['title'])\n # If we found a tracknumber, we delete it from 'title'\n if number:\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], [number])\n dict_file_mp3['tracknumber'] = number\n # And we format the new title\n dict_file_mp3['title'] = build_track_name(dict_file_mp3['title'], number)\n dict_file_mp3['name'] = dict_file_mp3['title'] + '.mp3'\n dict_file_mp3['path'] = build_path([dict_file_mp3['artist'], dict_file_mp3['album']])\n return dict_file_mp3",
"def get_audio_data(filename):\n\n audio_file = eyed3.load(filename)\n artist = audio_file.tag.artist\n title = audio_file.tag.title\n time = audio_file.info.time_secs\n album = audio_file.tag.album\n genre = re.sub('^\\(.*\\)', '', str(audio_file.tag._getGenre().name).lower().replace('|', ',').replace('/', ','))\n\n try:\n year = audio_file.tag.getBestDate().year\n except:\n year = None\n\n comments = []\n for i in audio_file.tag.comments:\n comment = correct_playlist_names(i.text.lower().strip())\n comments += comment.replace('|', ',').replace('/', ',').strip('|').split(',')\n\n return {\n 'artist' : artist,\n 'title' : title,\n 'album' : album,\n 'time' : time,\n 'comments' : filter(None, comments),\n 'genre' : genre.split(','),\n 'year' : year\n }",
"def news_speech():\n #Fetches data from API and creates global varibles.\n news_handle(news_fetch(config_fetcher('news_region'), config_fetcher('news_key')))\n #Creates a daily breifing using varibles\n news_daily_news = Markup((f\"The top headline for today is entitled: {title_1}, and was \\\nwritten by {author_1}. Here is a second headline, entitled: {title_2}, written by {author_2}.\"))\n return news_daily_news",
"def subject_item(url):\n soup = abcradionational.get_soup(url)\n \n playable_podcast = abcradionational.get_playable_podcast(soup)\n\n items = abcradionational.compile_playable_podcast(playable_podcast)\n\n\n return items",
"def transcribe_audio_file(filename):\n url = 'https://api.nexiwave.com/SpeechIndexing/file/storage/' + USERNAME +'/recording/?authData.passwd=' + PASSWORD + '&auto-redirect=true&response=application/json'\n\n # To receive transcript in plain text, instead of html format, comment this line out (for SMS, for example)\n #url = url + '&transcriptFormat=html'\n\n\n # Ready to send:\n sys.stderr.write(\"Send audio for transcript with \" + url + \"\\n\")\n r = requests.post(url, files={'mediaFileData': open(filename,'rb')})\n data = r.json()\n transcript = data['text']\n foo = data['text']\n f = open('newf.txt', 'w')\n f.write(foo)\n f.close() \n # Perform your magic here:\n print \"Transcript for \"+filename+\"=\" + transcript",
"def dummy_add_transcript():\n return {\n \"message\": \"AddTranscript\",\n \"format\": \"2.1\",\n \"metadata\": {\n \"start_time\": 0.0, \"end_time\": 2.0, \"transcript\": \"Foo\\nBar.\"},\n \"results\": [\n {\n \"type\": \"word\",\n \"start_time\": 0.0,\n \"end_time\": 1.0,\n \"alternatives\": [\n {\"content\": \"foo\", \"confidence\": 1.0, \"language\": \"en\"},\n ],\n },\n {\n \"type\": \"speaker_change\",\n \"start_time\": 1.0,\n \"end_time\": 1.0,\n \"score\": 0.8,\n },\n {\n \"type\": \"word\",\n \"start_time\": 1.0,\n \"end_time\": 2.0,\n \"alternatives\": [\n {\"content\": \"bar\", \"confidence\": 1.0, \"language\": \"en\"},\n ],\n },\n {\n \"type\": \"punctuation\",\n \"start_time\": 2.0,\n \"end_time\": 2.0,\n \"alternatives\": [{\"content\": \".\", \"confidence\": 1.0}],\n },\n ],\n }",
"def gen_dl_text(ddata, song, p):\n hdr = []\n hdr.append(\" %s%s%s\" % (c.r, song.title, c.w))\n author = p.author\n hdr.append(c.r + \" Uploaded by \" + author + c.w)\n hdr.append(\" [\" + fmt_time(song.length) + \"]\")\n hdr.append(\"\")\n\n heading = tuple(\"Item Format Quality Media Size Notes\".split())\n fmt = \" {0}%-6s %-8s %-13s %-7s %-5s %-16s{1}\"\n heading = [fmt.format(c.w, c.w) % heading]\n heading.append(\"\")\n\n content = []\n\n for n, d in enumerate(ddata):\n row = (n + 1, d['ext'], d['quality'], d['mediatype'], d['size'],\n d['notes'])\n fmt = \" {0}%-6s %-8s %-13s %-7s %5s Mb %-16s{1}\"\n row = fmt.format(c.g, c.w) % row\n content.append(row)\n\n content.append(\"\")\n\n footer = \"Select [%s1-%s%s] to download or [%sEnter%s] to return\"\n footer = [footer % (c.y, len(content) - 1, c.w, c.y, c.w)]\n return(content, hdr, heading, footer)",
"def add_simple_metadata(file_path, artist='', title='', album='', albumartist='', override=False):\r\n try:\r\n audio = EasyID3(file_path)\r\n except mutagen.id3.ID3NoHeaderError:\r\n audio = File(file_path)\r\n audio.add_tags()\r\n audio.save()\r\n audio = EasyID3(file_path)\r\n filename = pathlib.Path(file_path).name\r\n advanced_audio = File(file_path)\r\n try:\r\n if (not override and audio.get('title', '') and audio.get('artist', '')\r\n and audio.get('albumartist', '') and has_album_cover(file_path)) and 'TDRC' in advanced_audio: return False\r\n if not artist: artist = get_artist(filename)\r\n else:\r\n if artist.count(' , '): artist.split(' , ')\r\n elif artist.count(' ,'): artist = artist.split(' ,')\r\n elif artist.count(', '): artist = artist.split(', ')\r\n elif artist.count(','): artist = artist.split(',')\r\n if not title: title = filename.split(' - ')[-1][:-4]\r\n if override:\r\n audio['title'] = title\r\n audio['artist'] = artist\r\n if album: audio['album'] = album\r\n if albumartist: audio['albumartist'] = albumartist\r\n else:\r\n if 'album' not in audio:\r\n if album == '': audio['album'] = title\r\n else: audio['album'] = album\r\n if 'title' not in audio: audio['title'] = title\r\n if 'artist' not in audio: audio['artist'] = artist\r\n if 'albumartist' not in audio:\r\n if albumartist: audio['albumartist'] = albumartist\r\n else: audio['albumartist'] = artist\r\n audio.save()\r\n audio = MP3(file_path)\r\n # if artist and title and override or audio.get('TDRC', False):\r\n # auto_set_year(audio, artist, title)\r\n if not has_album_cover(file_path):\r\n if not set_album_cover(file_path):\r\n print(f'Album art not found for {file_path}')\r\n except MutagenError:\r\n print(f'{filename} in use')\r\n return False\r\n except ValueError as e:\r\n print(e)\r\n print('Error adding metadata to', filename)\r\n return False\r\n return True",
"def test_extracting_audio_data(\n only_audio_episodes: LepEpisodeList,\n lep_dl: LepDL,\n) -> None:\n expected_audio = Audio(\n ep_id=2009101908, # many posts in that day\n name=\"15. Extra Podcast – 12 Phrasal Verbs\",\n short_date=\"2009-10-19\",\n filename=\"[2009-10-19] # 15. Extra Podcast – 12 Phrasal Verbs\",\n primary_url=\"http://traffic.libsyn.com/teacherluke/15-extra-podcast-12-phrasal-verbs.mp3\", # noqa: E501,B950\n )\n lep_dl.files = downloader.gather_all_files(only_audio_episodes)\n audio_files = lep_dl.files.filter_by_type(Audio)\n assert audio_files[1] == expected_audio",
"def oc_metadata(row):\n t = _parse_date(row['startTime'])\n\n def _make_field(id_, value):\n return {'id': id_, 'value': value}\n\n return [\n {\n 'flavor': 'dublincore/episode',\n 'fields': [\n _make_field('title', row['title']),\n _make_field('description', row['courseDescription']),\n _make_field('startDate', t.strftime(\"%Y-%m-%d\")),\n _make_field('startTime', t.strftime(\"%H:%M:%SZ\")),\n ],\n }\n ]",
"def checkPre(dbConnection):\n cursor = dbConnection.cursor()\n cursor.execute(\"SELECT audiourl, T.id, podcastName, source FROM transcriptions AS T JOIN podcasts as P ON P.name = T.podcastname WHERE COALESCE(T.transcription, '') = '' AND pending = FALSE LIMIT 1;\")\n entry = cursor.fetchone()\n cursor.close()\n return entry",
"def test_gathering_multi_part_audio_track(\n lep_dl: LepDL,\n) -> None:\n json_test = \"\"\"\\\n [\n {\n \"episode\": 3,\n \"date\": \"2000-01-01T00:00:00+00:00\",\n \"url\": \"https://teacherluke.co.uk/2009/04/15/episode-3-musicthe-beatles/\",\n \"post_title\": \"3. Music/The Beatles\",\n \"post_type\": \"\",\n \"files\": {\n \"audios\": [],\n \"atrack\": [\n [\n \"https://someurl1.local\", \"https://someurl2.local\", \"https://someurl3.local\"\n ],\n [\n \"https://part2-someurl1.local\", \"https://part2-someurl2.local\"\n ]\n ]\n },\n \"parsed_at\": \"2021-10-14T07:35:24.575575Z\",\n \"index\": 2009041501,\n \"admin_note\": \"Check audio track.\"\n }\n ]\n \"\"\" # noqa: E501,B950\n db_episodes = Lep.extract_only_valid_episodes(json_test)\n lep_dl.files = downloader.gather_all_files(db_episodes)\n assert len(lep_dl.files) == 3\n assert lep_dl.files[0].secondary_url == \"https://someurl2.local\"\n assert lep_dl.files[0].tertiary_url == \"https://someurl3.local\"\n assert lep_dl.files[1].secondary_url == \"https://part2-someurl2.local\"\n assert isinstance(lep_dl.files[0], ATrack)\n assert isinstance(lep_dl.files[1], ATrack)\n assert (\n lep_dl.files[0].filename\n == \"[2000-01-01] # 3. Music/The Beatles [Part 01] _aTrack_.mp3\"\n )\n assert (\n lep_dl.files[1].filename\n == \"[2000-01-01] # 3. Music/The Beatles [Part 02] _aTrack_.mp3\"\n )",
"def to_m3u_track(record: Dict[str, str]) -> str:\n\n location = normalize(unquote(record.get(\"Location\")))\n\n # m3u duration in seconds, not ms\n duration = int(record.get(\"Total Time\")) // 1000\n name = normalize(unquote(record.get(\"Name\")))\n artist = normalize(unquote(\n record.get(\"Artist\") or\n record.get(\"Album Artist\") or\n record.get(\"Composer\", \"\")\n ))\n # print(\"Location {}\".format(location))\n return M3U_TRACK_TEMPLATE.format(\n length=duration,\n artist=artist,\n title=name,\n path=location\n )",
"def read_transcription_file(file_path, audio_file_path):\n with open(file_path) as in_file:\n last_timestamp = 0\n res = []\n transcription = \"\"\n for line in in_file:\n time_stamp_match = re.match(\"\\[([0-9\\]+\\.[0-9]+)\\]\", line)\n #if this regex matched then the line is a timestamp\n if time_stamp_match:\n timestamp = float(time_stamp_match.group(1))\n if transcription and transcription.strip() not in ['(())', \"<no-speech>\"]:\n single_instance = {\"start_time\": last_timestamp, \n \"end_time\": timestamp,\n \"transcription\": transcription,\n \"audio_file\" : audio_file_path}\n res.append(single_instance)\n last_timestamp = timestamp\n else:\n last_timestamp = timestamp # this handles silence at beginning\n else:\n transcription = line.strip()\n \n return res",
"def process_transcript(transcript_label):\n transcript_key = f\"{transcript_label}.json\"\n\n # Load Transcribe output from S3.\n raw_transcript = get_transcribe_output(transcript_key)\n\n # Parse to assign speaker parts.\n speaker_parts = assign_speakers(raw_transcript)\n\n # Identify Karen and Georgia.\n assigned = karen_or_georgia(speaker_parts)\n\n # Update the full transcript.\n build_transcript(assigned)\n\n # Upload the latest transcript to S3.\n s3 = boto3.resource(\"s3\")\n s3.Bucket(os.getenv(\"S3_BUCKET\")).upload_file(\"main_transcript.txt\", \"main_transcript.txt\")",
"def track_04():\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)\n return \"Ok\"",
"def uploadPodcast(dbConnection, homepage, name, description, category, source, imageurl, web, twitter, facebook, rss):\n try:\n cursor = dbConnection.cursor()\n name = name.replace(\"'\", \"''\")\n description = description.replace(\"'\", \"''\")\n cursor.execute(\"\"\"INSERT INTO podcasts(homepage, name, description, category, source, imageuri, web, twitter, Facebook, rss) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\", (homepage, name, description, category, source, imageurl, web, twitter, facebook, rss))\n dbConnection.commit()\n cursor.close()\n return True\n except Exception as e:\n\t\t Tools.writeException(\"insertHeader\", \"e\")\n return False",
"def parse_song_data(data):\r\n song_title_regex = re.compile(r'<title>([\\S\\s]+)</title>')\r\n\r\n match = song_title_regex.search(data)\r\n\r\n song_title = match.groups(0)[0]\r\n\r\n # Replaces the HTML code for apostrophe with the symbol\r\n return re.sub(r''', \"\\'\", song_title)",
"async def download_audio(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"`What I am Supposed to find? Give link`\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, audio_opts, url)\n if ytdl_data is None:\n return\n await codevent.edit(\n f\"`Preparing to upload song:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n f = pathlib.Path(f\"{ytdl_data['title']}.mp3\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.mp3.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.mp3.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n c_time = time.time()\n ul = io.open(f, \"rb\")\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n supports_streaming=True,\n force_document=False,\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await codevent.delete()"
] | [
"0.5720898",
"0.56445146",
"0.5573998",
"0.55513567",
"0.55373365",
"0.5462284",
"0.5397166",
"0.53909075",
"0.5349029",
"0.53120935",
"0.51405776",
"0.51242185",
"0.51071036",
"0.50734013",
"0.5052101",
"0.50449437",
"0.50282156",
"0.5022951",
"0.49973148",
"0.49737933",
"0.49661297",
"0.49635798",
"0.49503383",
"0.49322152",
"0.49305058",
"0.49285465",
"0.49257424",
"0.48962638",
"0.4868232",
"0.48550746"
] | 0.5656862 | 1 |
checks the database for empty transcription entries, returns a list with \n\n index 0 audiourl\n index 1 id\n index 2 podcast name\n index 3 service of podcast | def checkPre(dbConnection):
cursor = dbConnection.cursor()
cursor.execute("SELECT audiourl, T.id, podcastName, source FROM transcriptions AS T JOIN podcasts as P ON P.name = T.podcastname WHERE COALESCE(T.transcription, '') = '' AND pending = FALSE LIMIT 1;")
entry = cursor.fetchone()
cursor.close()
return entry | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listMissingItems():\n global doc\n texts = doc.getElementsByTagName(\"text\")\n for t in texts:\n xmlid = t.getAttribute(\"id\")\n for table in langtables:\n execute(\"SELECT * FROM %s WHERE xmlid=\\\"%s\\\"\" % (table, xmlid))\n rows = cursor.fetchall()\n if rows == None or len(rows) == 0:\n warn(t.toxml() + \" missing in %s\" % table)",
"def test_filter_messages_empty_data(self):\n pass",
"def test_170417_empty(self):\n spc = parser(get_file('PTSD48_empty.txt'))\n # spc.draw_outlooks()\n spc.sql(self.txn)\n jabber = spc.get_jabbers('')\n self.assertEquals(jabber[0][0],\n (\"The Storm Prediction Center issues Days 4-8 \"\n \"Convective Outlook at Dec 25, 9:41z \"\n \"http://www.spc.noaa.gov/products/exper/day4-8/\"\n \"archive/2008/day4-8_20081225.html\"))",
"def nohupTranscriptionContent(filePath):\n try:\n continu = True\n fileContent = \"\"\n f = open(filePath, 'r')\n while (continu):\n temp = f.readline(900000)\n if(len(temp) == 0):\n continu = False\n else:\n fileContent += temp\n results = []\n realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)\n results.append(realTimeFactor)\n transcription = re.findall(r'utterance-id(.*?) (.*?)\\n', fileContent)\n transcriptionList = []\n transcriptionIDList = []\n for item in transcription:\n if(len(item[1]) > 1000):\n transcriptionIDList.append(item[0])\n transcriptionList.append(item[1])\n results.append(transcriptionList)\n results.append(transcriptionIDList)\n transcriptionTime = re.findall(r'seconds / (.*?) seconds\\.', fileContent)\n results.append(transcriptionTime)\n return results\n except Exception as e:\n Tools.writeException(\"nohupTranscriptionContent\", e)\n return False",
"def filter_empty(word_list):\n new_list = []\n for x in word_list:\n if(x):\n new_list.append(x)\n return new_list",
"def __look_for_missing_pseudotext_info(self, force_update=False):\n logging.debug('Starting method that looks for a missing pseudo-text info')\n counter = 0\n max_vids_to_process = self.num_vids_to_use\n logging.info('Examining ' + str(max_vids_to_process) + ' records.')\n list_vids_no_pt_data = []\n percent_tracker = PercentTracker(max_vids_to_process, int_output_every_x_percent=10)\n for vid_id in self.transcripts_ds:\n execution_should_continue = self.var_mgr.var_retrieve(my_globals.str_execution_may_go_on)\n if (not execution_should_continue) or (counter >= max_vids_to_process):\n break\n\n need_to_append_to_list = False\n if force_update:\n need_to_append_to_list = True\n if not need_to_append_to_list:\n # if we already found that the video should be appended to the list,\n # then there is no need for further checks. But if NOT, then\n # the following should still be performed\n transcript = Transcript(vid_id)\n transcript.set_transcript_directory(self.str_path_to_transcripts_files)\n transcript.load_transcript_object_from_dictionary(self.transcripts_ds.fetch_data(vid_id))\n has_pseudotranscript_data = transcript.is_pseudotranscript_filename_populated()\n if not has_pseudotranscript_data:\n # we are here if the video has a transcript (it exists in the transcripts SimpleDS),\n # but the field for the filename of the TranscriptAnalysis file has never been populated.\n need_to_append_to_list = True\n\n if need_to_append_to_list:\n list_vids_no_pt_data.append(vid_id)\n counter += 1\n percent_tracker.update_progress(counter,\n str_description_to_include_in_logging='Finding missing pseudotext files.')\n return list_vids_no_pt_data",
"def test_get_all_unassociated_no_tracks(self):\n self.assertEqual(self.get_track_count(), 0)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(tracks, [])",
"def __look__missing_termcount_info(self):\n logging.debug('Starting method that looks for missing Term Count data.')\n counter = 0\n max_vids_to_process = self.num_vids_to_use\n logging.info('Examining ' + str(max_vids_to_process) + ' records.')\n list_vids_no_tc_data = []\n percent_tracker = PercentTracker(max_vids_to_process, int_output_every_x_percent=10)\n for vid_id in self.transcripts_ds:\n execution_should_continue = self.var_mgr.var_retrieve(my_globals.str_execution_may_go_on)\n if (not execution_should_continue) or (counter >= max_vids_to_process):\n break\n transcript = Transcript(vid_id)\n transcript.set_transcript_directory(self.str_path_to_transcripts_files)\n transcript.load_transcript_object_from_dictionary(self.transcripts_ds.fetch_data(vid_id))\n has_tc_data = transcript.is_termcount_filename_populated()\n if not has_tc_data:\n # we are here if the video has a transcript (it exists in the transcripts SimpleDS),\n # but the field for the filename of the Term Count file has never been populated.\n list_vids_no_tc_data.append(vid_id)\n counter += 1\n percent_tracker.update_progress(counter,\n str_description_to_include_in_logging='Finding missing term-count files.')\n return list_vids_no_tc_data",
"def _checkForBlankLines(self, datalines):\n empties = None\n count = 0\n rtlines = []\n for line in datalines:\n if line.strip() == \"\":\n empties = 1\n else:\n if empties == 1: # If data line found after empty line then raise\n raise Exception(\"Empty line found in data section at line: \" + str(count))\n else:\n rtlines.append(line)\n count = count + 1\n return rtlines",
"def test_get_all_unassociated_single_track_without_album(self):\n track = Track(artist='Artist', title='Title')\n track.insert(self.app.db, self.app.curs,\n 'xmms', datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(len(tracks), 0)",
"def not_empty(cur, conn, v=False):\n logs = []\n print(\"Checking that all tables have at least 10 rows...\")\n for t in TABLES:\n try:\n cur.execute(f\"SELECT COUNT(*) FROM {t}\")\n conn.commit()\n row = cur.fetchone()\n except:\n raise ValueError(f\"Something went wrong executing `not_empty` query on table {t}\")\n\n if row == None:\n text = f\"!!Data quality check on table {t} failed, no data fetched.\"\n elif row[0] <= 10:\n text = f\"!!Data quality check on table {t} failed with {row[0]} rows.\"\n else:\n text = f\"Data quality check on table {t} passed with {row[0]} rows.\"\n if v: print(text)\n logs.append(f\"{text}\\n\")\n\n return logs",
"def test_query_with_no_matches_returns_nothing(test_store):\n items = list(test_store.get_by(name=\"Sugar\"))\n\n assert len(items) == 0",
"def check_txt_ids(self):\n for awi in self:\n if not awi.txt_ids:\n raise exceptions.except_orm(\n _(\"Missing Values !\"),\n _(\"Missing VAT TXT Lines!!!\"))\n return True",
"def missing_samples(self):\n missing = [s for s in self.subjects if len(s.samples) == 0]\n if len(missing) == 0:\n return None\n return missing",
"def get_transcription(url):\n\n # Checks the format of the URL\n if \"https://www.youtube.com/watch?v=\" in url:\n input_url_id = url.replace(\"https://www.youtube.com/watch?v=\", \"\")\n elif \"https://youtu.be/\" in url:\n input_url_id = url.replace(\"https://youtu.be/\", \"\")\n\n # Creates a blank list to iterate over\n text_parts = []\n\n # Gets a list of all available transcripts\n try:\n\n list_of_transcripts = YouTubeTranscriptApi.list_transcripts(input_url_id)\n print(\"Checking for Transcriptions...\")\n\n # Checks to see if a manual transcript is created if not, checks to see if a generated one is created\n if 'en-US' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en-US'])\n elif 'en' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en'])\n elif 'en' in list_of_transcripts._generated_transcripts:\n print(\"Auto-Generated Transcription Found.\")\n transcript = list_of_transcripts.find_generated_transcript(['en'])\n\n # Saves the transcript into a variable to iterate over\n raw_transcription = transcript.fetch()\n\n # Indexing of raw transcripts\n iteration_of_raw = 0\n\n # Iterates over each dictionary and extracts 'text' key then appends the blank text_parts list\n for i in raw_transcription:\n indexed_dictionary = raw_transcription[iteration_of_raw]\n text_from_dictionary = indexed_dictionary['text']\n text_parts.append(text_from_dictionary)\n iteration_of_raw += 1\n # Defines how we want each text element to be separated with\n separator_for_each_text = \" \"\n\n # Joins the separator with the text_parts\n clean_transcription = separator_for_each_text.join(text_parts)\n\n # Returns the cleaned transcripts\n return clean_transcription\n\n except:\n print(\"No Transcriptions Found\")\n clean_transcription = \"No Transcriptions Found\"\n return clean_transcription",
"def emptyDVDColection(self):\r\n if (len(self.DVDColectionlist))==0:\r\n print(\"DVD collection is empty!\")\r\n else:\r\n print(\"DVD collection is not empty!\")",
"def nonempty_lines(text):\n return [line for line in text.split('\\n') if line]",
"def test_empty_transformlist(self):\n tflist = TransformList()\n self.assertEqual(len(tflist), 0)",
"def addMissingData():\n\n conn = sqlite3.connect(\"./transactions.db\")\n\n person = pd.read_sql(\n \"\"\"\n select * from person;\n \"\"\",\n conn,\n )\n\n record = pd.read_sql(\n \"\"\"\n select * from record;\n \"\"\",\n conn,\n )\n\n tracked = set([_id for _id in record[\"doc_id\"]])\n\n untracked = []\n\n for url, _id in zip(person[\"url\"], person[\"doc_id\"]):\n if not _id in tracked:\n untracked.append((url[-17:-13], _id))\n\n untracked = pd.DataFrame(untracked, columns=[\"date\", \"doc_id\"])\n\n res = transaction.extractData(untracked)\n\n res.to_sql(\"record\", conn, index=False, if_exists=\"append\")\n\n conn.close()",
"def clean_tweets(data):\n count = 0\n f = open(os.path.dirname(__file__) + '/../tweet_output/ft1.txt','w')\n for item in data:\n if item.get('text'):\n string=item['text'].encode('ascii','ignore')+' (timestamp: '+item['created_at']+')\\n'\n f.write(string)\n if item['text'].encode('ascii','ignore')!=item['text']:\n count=count+1\n f.write('\\n')\n string=str(count)+' tweets contained unicode.'\n f.write(string)\n f.close()",
"def test_nil_results(self):\n class Test(pyperry.Base):\n def _config(cls):\n cls.attributes('id')\n cls.configure('read', adapter=TestAdapter)\n TestAdapter.data = None\n TestAdapter.count = 3\n result = Test.fetch_records(Test.scoped())\n self.assertEqual(len(result), 0)",
"def tokenize_podcast_transcript(args):\n DATA_DIR = os.path.join(os.getcwd(), 'data', args.project_id)\n story_file = os.path.join(DATA_DIR, 'podcast-transcription.txt')\n\n # Read all words and tokenize them\n with open(story_file, 'r') as fp:\n data = fp.readlines()\n\n data = [item.split(' ') for item in data]\n data = [\n item[:-2] + [' '.join(item[-2:])] if item[-1] == '\\n' else item\n for item in data\n ]\n data = [item for sublist in data for item in sublist]\n\n df = pd.DataFrame(data, columns=['word'])\n df['conversation_id'] = 1\n\n return df",
"def remove_empty(data):\n out = []\n for item in data:\n if item == '':\n continue\n out.append(item)\n return out",
"def test_get_all_need_transform_no_tracks(self):\n self.assertEqual(Track.get_all_need_transform(self.app.curs, 1), [])",
"def test_returns_all_studies_with_no_query(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_studies_with_no_query(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]",
"def empty(self):\n return [cell for cell in self.compact if not cell.peg]",
"def no_txs(self):\n return self._no_txs",
"def find_add_audio_messages(self):\n return [\n msg for msg in self.messages_received if not isinstance(msg, dict)]"
] | [
"0.5676291",
"0.5263004",
"0.5223297",
"0.52066535",
"0.5155342",
"0.51544493",
"0.51198375",
"0.50936806",
"0.50230044",
"0.49982882",
"0.49966055",
"0.49849313",
"0.4925892",
"0.48985046",
"0.48830864",
"0.48502585",
"0.48242763",
"0.48231605",
"0.48141956",
"0.4793201",
"0.47868186",
"0.4778326",
"0.47765735",
"0.4775492",
"0.4756699",
"0.4756699",
"0.4752159",
"0.47504467",
"0.4750284",
"0.4746151"
] | 0.6268205 | 0 |
given title, if the podcast is in the database already return true. False if the podcast does not exist in the database | def checkIfExists(dbconnection, title):
cursor = dbconnection.cursor()
output = ""
title = title.replace("'", "''")
try:
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
except:
dbconnection.rollback()
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_if_entry_exists(title: str) -> bool:\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n try:\n c.execute(\n \"\"\"select * from entries where title = ?\"\"\",\n (title,)\n )\n records = c.fetchall()\n return len(records) > 0\n except sqlite3.OperationalError as e:\n print(f'Exception {e} caught. Recreating database.')\n c.execute('drop table if exists entries')\n conn.commit()\n conn.close()\n create()\n return False",
"def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False",
"def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False",
"def presentation_exists(self, presentation):\r\n result = QtSql.QSqlQuery('''SELECT * FROM presentations''')\r\n while result.next():\r\n if (unicode(presentation.title) == unicode(result.value(1).toString())\r\n and unicode(presentation.speaker) == unicode(result.value(2).toString())):\r\n return True\r\n return False",
"def check_story_exists(self) -> bool:\n title_check = self._soup.find(\"title\").string\n if title_check == u'FicWad: fresh-picked original and fan fiction':\n return False\n return True",
"def get_movie_if_exist(item):\n query = Session.query(Movie).filter(Movie.title == item.title)\n result = query.first()\n return result",
"def check_repeat(db, record):\n models = [TechRepublicData, SecurityNewsData, PyjobData, RedditData]\n temp = db.query(*models)\n\n for model in models:\n if temp.filter(model.title == record.title).count():\n return True",
"def book_exist(author, title, edition):\n book = Book.query.filter_by(\n author=author,\n book_title=title,\n edition=edition).first()\n if book:\n return True\n return False",
"def title_exists(form, field):\n if Entry.select().where(Entry.title ** field.data).exists():\n raise ValidationError('That title is already in use.')",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def url_is_in_db(url):\n return bool(find_url(url).first())",
"def if_already_present(video_id: str) -> bool:\n return Video.objects.filter(video_id=video_id).exists()",
"def check_db_for_vid(self):\n with db.cursor() as cursor:\n if self.videoId in db.\n pass",
"def check_repost_exists(type, id):\n \n try:\n soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))\n return True\n except HTTPError as e:\n if e.response.status_code == 404:\n db.mark_as_deleted(type, id)\n return False\n else:\n raise",
"def player_exists_in_db(name: str):\n with open('db.json') as fo:\n data = loads(fo.read())\n return name in data",
"def valid_title(self, title):\n if title in self.timers.keys() and isinstance(title, str) and self.timers[title]['count']>0:\n return True\n else:\n return False",
"def insertEpisode(ep):\n if check(\"episodes\", ep):\n return \"episode exists\"\n else:\n engine.execute(f\"INSERT INTO episodes (episode) VALUES ('{ep}');\")",
"def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):\n try:\n cursor = dbConnection.cursor()\n title = title.replace(\"'\", \"''\")\n cursor.execute(\"INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('\" + audiourl + \"', NULL, '\" + podcastName + \"', NULL, '\" + description + \"', '\" + parsedDate + \"', '\" + title + \"', FALSE, NULL);\")\n dbConnection.commit()\n cursor.close()\n return True\n except:\n return False\n return False",
"def _item_exists(self, item):\n cursor = self.conn.cursor()\n cursor.execute(\n 'SELECT * FROM Members where first_name = ?;',\n (item['first_name'])\n )\n return True if len(cursor.fetchall()) else False",
"def exists(self, answer):\n return self.find(answer) is not None",
"def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False",
"def exist(self, product_item):\n cursor = self.database.cursor(named_tuple=True, buffered=True)\n sql = \"SELECT * FROM favoris WHERE produit_id = '{}' \".format(product_item.id)\n cursor.execute(sql)\n rows = cursor.fetchone()\n if not rows:\n return False\n return True",
"def exist(self):",
"def _exists (self):\n cursor = self._exec (self.select)\n return bool (cursor.fetchall ())",
"def db_exists(self):\n \n with self.connection:\n c = self.connection.cursor()\n c.execute(\"SELECT EXISTS(SELECT 1 FROM sqlite_master WHERE name=?)\", [PUBLICATIONS_TABLE])\n exists = c.fetchone()\n if(exists[0] == 1):\n return True\n else:\n return False",
"def existing_event(self, title, location, category, description):\n for event in self.event_list:\n # test to see if the user has the same event, in the same location in their list\n if event['title'] == title and event['location'] == location and event['category'] == category and event['description'] == description:\n return True\n else:\n return False",
"def database_exists (name, parent=None):\n return get_database(name, parent) is not None",
"def _check_row_exists(self, pk):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(id=pk).first()\n session.close()\n if exists:\n return True\n return False",
"def existsInDatabase(self, url):\n connection = pymongo.MongoClient(\n settings['MONGODB_SERVER'],\n settings['MONGODB_PORT']\n )\n db = connection[settings['MONGODB_DB']]\n collection = db[settings['MONGODB_COLLECTION']]\n\n db_comic = collection.find_one({\n 'url': url\n })\n return True if db_comic else False"
] | [
"0.7373473",
"0.63370997",
"0.6241188",
"0.62162536",
"0.6169572",
"0.6139767",
"0.6122975",
"0.5888012",
"0.5878836",
"0.58756447",
"0.58756447",
"0.58615744",
"0.58349437",
"0.583252",
"0.5811973",
"0.5811524",
"0.58094037",
"0.57614815",
"0.57470703",
"0.57385516",
"0.57263124",
"0.5658692",
"0.56586796",
"0.56561965",
"0.5651934",
"0.5638678",
"0.5634481",
"0.56085217",
"0.560124",
"0.5592616"
] | 0.7353994 | 1 |
generate the CUSPARSE FFI definition | def generate_cffi_cdef(
cuda_include_path=cuda_include_path, cusparse_header=cusparse_header,
cffi_out_file=None):
with open(cusparse_header, 'r') as f:
cusparse_hdr = f.readlines()
# in some version cusparse_v2.h just points to cusparse.h, so read it
# instead
for line in cusparse_hdr:
# if v2 header includes cusparse.h, read that one instead
if line.startswith('#include "cusparse.h"'):
cusparse_header = os.path.join(cuda_include_path, 'cusparse.h')
with open(cusparse_header, 'r') as f:
cusparse_hdr = f.readlines()
cusparse_hdr = [_remove_comment(l) for l in cusparse_hdr]
# skip lines leading up to first typedef
for idx, line in enumerate(cusparse_hdr):
if line.startswith('typedef'):
start_line = idx
break
# skip closing #if defined logic
for idx, line in enumerate(cusparse_hdr[start_line:]):
if line.startswith('#if defined(__cplusplus)') or \
'Define the following symbols for the new API' in line:
# second match is to avoid CFFI compilation errror due to the final
# define statements in v4.1 through v5.5
end_line = start_line + idx
break
# define other data types needed by FFI
# ... will be filled in from cuComplex.h by the C compiler
cffi_cdef = """
typedef struct CUstream_st *cudaStream_t;
typedef struct float2 {
...;
} float2;
typedef float2 cuFloatComplex;
typedef float2 cuComplex;
typedef struct double2 {
...;
} double2;
typedef double2 cuDoubleComplex;
typedef float cufftReal;
typedef double cufftDoubleReal;
typedef cuComplex cufftComplex;
typedef cuDoubleComplex cufftDoubleComplex;
typedef enum cudaDataType_t
{
CUDA_R_16F= 2, // real as a half
CUDA_C_16F= 6, // complex as a pair of half numbers
CUDA_R_32F= 0, // real as a float
CUDA_C_32F= 4, // complex as a pair of float numbers
CUDA_R_64F= 1, // real as a double
CUDA_C_64F= 5, // complex as a pair of double numbers
CUDA_R_8I= 3, // real as a signed char
CUDA_C_8I= 7, // complex as a pair of signed char numbers
CUDA_R_8U= 8, // real as a unsigned char
CUDA_C_8U= 9, // complex as a pair of unsigned char numbers
CUDA_R_32I= 10, // real as a signed int
CUDA_C_32I= 11, // complex as a pair of signed int numbers
CUDA_R_32U= 12, // real as a unsigned int
CUDA_C_32U= 13 // complex as a pair of unsigned int numbers
} cudaDataType;
typedef enum libraryPropertyType_t //GRL: added this for cuda 8.0
{
MAJOR_VERSION,
MINOR_VERSION,
PATCH_LEVEL
} libraryPropertyType;
/* definitions from cusparse header below this point */
"""
cffi_cdef += ''.join(cusparse_hdr[start_line:end_line])
"""
don't use the _v2 versions of the function names defined in CUDA v4.1
through v5.5
"""
cffi_cdef = cffi_cdef.replace('_v2(', '(')
if os.name == 'nt': # Win
cffi_cdef = cffi_cdef.replace('CUSPARSEAPI', '__stdcall')
else: # posix, etc
cffi_cdef = cffi_cdef.replace('CUSPARSEAPI', '')
if cffi_out_file is not None:
# create specified output directory if it doesn't already exist
out_dir = os.path.dirname(cffi_out_file)
if out_dir and not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(cffi_out_file, 'w') as f:
f.write(cffi_cdef)
return cffi_cdef | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)",
"def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result",
"def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res",
"def gen_capi(args):\n\n if not args.header:\n return \"\"\n\n cmd = [\"ctags\", \"-x\", \"--c-kinds=fpsgx\", args.header]\n\n process = Popen(cmd, stdout=PIPE, stderr=PIPE)\n out, err = process.communicate()\n\n if process.returncode:\n return \"\"\n\n titles = {\n \"nvm_geo\": \"Geometry\",\n \"nvm_buf\": \"Buffer Allocation\",\n \"nvm_dev\": \"Device Management\",\n \"nvm_addr\": \"Addressing\",\n \"nvm_cmd\": \"Raw Commands\",\n \"nvm_vblk\": \"Virtual Block\",\n \"nvm_bbt\": \"Bad-Block-Table\"\n }\n docs = {}\n\n lib = {}\n for line in out.split(\"\\n\"):\n parts = (\" \".join(line.split())).split(\" \")[:2]\n if len(parts) < 2:\n continue\n\n name, kind = parts\n ns = \"_\".join(name.split(\"_\")[:2])\n\n if ns not in lib:\n lib[ns] = {}\n\n if kind not in lib[ns]:\n lib[ns][kind] = []\n\n lib[ns][kind].append(name)\n\n for ns in lib:\n\n if \"prototype\" in lib[ns]:\n ordering = [\n \"bbt_get\", \"bbt_set\", \"bbt_mark\", \"bbt_flush\",\n \"addr_erase\", \"addr_read\", \"addr_write\", \"addr_check\",\n \"addr_.*2\",\n \"vblk_erase\", \"vblk_p?read\", \"vblk_p?write\", \"vblk_pad\",\n \"lba_p?read\", \"lba_p?write\",\n \"_alloc\", \"_fill\", \"_free\", \"_pr\",\n \"_get_\", \"_set_\"\n ]\n\n ordered = []\n for order in ordering:\n for func in lib[ns][\"prototype\"]:\n if re.search(order, func):\n if func not in ordered:\n ordered.append(func)\n\n lib[ns][\"prototype\"] = list(\n set(lib[ns][\"prototype\"]) -\n set(ordered)\n ) + ordered\n\n title = \"%s - %s\" % (ns, titles[ns]) if ns in titles else ns\n\n rst = \"\\n\".join([\n \".. _sec-capi-%s:\" % ns, \"\",\n title,\n \"=\" * len(title),\n \"\", \"\"\n ])\n\n if \"typedefs\" in lib[ns]:\n for typedef in lib[ns][\"typedefs\"]:\n rst += \"\\n\".join([\n typedef,\n \"-\" * len(typedef), \"\",\n \".. doxygentypedef:: %s\" % typedef,\n \"\", \"\"\n ])\n\n for mangler in [\"struct\", \"externvar\"]:\n if mangler in lib[ns]:\n for struct in lib[ns][mangler]:\n rst += \"\\n\".join([\n struct,\n \"-\" * len(struct), \"\",\n \".. doxygenstruct:: %s\" % struct,\n \" :members:\",\n \"\", \"\"\n ])\n\n if \"enum\" in lib[ns]:\n for enum in lib[ns][\"enum\"]:\n rst += \"\\n\".join([\n enum,\n \"-\" * len(enum), \"\",\n \".. doxygenenum:: %s\" % enum,\n \"\", \"\"\n ])\n\n if \"prototype\" in lib[ns]:\n for func in lib[ns][\"prototype\"]:\n rst += \"\\n\".join([\n func,\n \"-\" * len(func), \"\",\n \".. doxygenfunction:: %s\" % func,\n \"\", \"\"\n ])\n\n docs[ns] = rst\n\n return docs",
"def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result",
"def build_cffi():\r\n print_banner(\"Building CFFI Module\")\r\n ffi = cffi.FFI()\r\n\r\n this_dir = pathlib.Path().resolve()\r\n h_file_name = this_dir / \"cmult.h\"\r\n with open(h_file_name) as h_file:\r\n # cffi does not like our preprocessor directives, so we remove them\r\n lns = h_file.read().splitlines()\r\n flt = filter(lambda ln: not re.match(r\" *#\", ln), lns)\r\n flt = map(lambda ln: ln.replace(\"EXPORT_SYMBOL \", \"\"), flt)\r\n ffi.cdef(str(\"\\n\").join(flt))\r\n\r\n ffi.set_source(\r\n \"cffi_example\",\r\n # Since we are calling a fully built library directly no custom source\r\n # is necessary. We need to include the .h files, though, because behind\r\n # the scenes cffi generates a .c file which contains a Python-friendly\r\n # wrapper around each of the functions.\r\n '#include \"cmult.h\"',\r\n # The important thing is to include the pre-built lib in the list of\r\n # libraries we are linking against:\r\n libraries=[\"cmult\"],\r\n library_dirs=[this_dir.as_posix()],\r\n extra_link_args=[\"-Wl,-rpath,.\"],\r\n )\r\n\r\n ffi.compile()\r\n print(\"* Complete\")",
"def fortran_c_wrapper(self) -> str:\n result = banner('//')\n result += self._fc_includes()\n result += self._fc_using_statements()\n result += self._fc_function_definitions()\n return result",
"def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s",
"def gen_csource(protocol):\n\tdef format_default(reg):\n\t\t\"\"\"Given a reg, return its default value formatted as a string for inclusion in\n\t\t a C source file.\"\"\"\n\t\tif reg.size == \"accum\":\n\t\t\treturn str(float(reg.default)) + \"k\"\n\t\telse:\n\t\t\treturn str(int(reg.default)) + \"L\"\n\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <avr/interrupt.h>\n#include <util/atomic.h>\n#include \"protocol.h\"\n#include \"spi.h\"\n\n\"\"\"\n\ts += \"volatile struct comm_data_t Data = {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t.\" + r.name + \" = \" + format_default(r) + \", /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\ts += \"\\n\"\n\t\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void){ /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"\"\"\\t%s v;\n\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tv = Data.%s;\n\t}\n\treturn v;\n}\n\"\"\"%(r.size, r.name)\n\t\ts += \"void set_%s(%s v){ /* %s */\\n\"%(r.name, r.size, r.desc)\n\t\ts += \"\"\"\\tATOMIC_BLOCK(ATOMIC_RESTORESTATE){\n\t\tData.%s = v;\n\t}\n}\n\n\"\"\"%(r.name)\n\ts += \"\"\"ISR(SPI0_STC_vect){\n\tuint8_t reg_num = SPDR0;\n\tswitch(reg_num){\n\"\"\"\n\t\n\tfor r in protocol:\n\t\tif r.write:\n\t\t\ts += \"\\t\\tcase % 2d: /* Write %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_rx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\t\tif r.read:\n\t\t\ts += \"\\t\\tcase 0x80 + % 2d: /* Read %s (%s) */\\n\"%(r.number, r.name, r.desc)\n\t\t\ts += \"\\t\\t\\tspi_tx((uint8_t *) &Data.%s, sizeof(Data.%s));\\n\"%(r.name, r.name)\n\t\t\ts += \"\\t\\t\\tbreak;\\n\"\n\ts += \"\"\"\t}\n\n\t/* Clear SPIF flag */\n\treg_num = SPSR0;\n\treg_num = SPDR0;\n}\n\"\"\"\t\n\treturn s",
"def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)",
"def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)",
"def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True",
"def make_ffi(module_path, crate_path, cached_header_filename=None):\n if cached_header_filename is not None and \\\n os.path.isfile(cached_header_filename):\n with open(cached_header_filename, 'rb') as f:\n header = f.read()\n if not PY2:\n header = header.decode('utf-8')\n else:\n from .bindgen import generate_header\n header = generate_header(crate_path)\n header = _directive_re.sub('', header)\n\n if os.environ.get('SNAEK_DEBUG_HEADER') == '1':\n sys.stderr.write('/* generated header for \"%s\" */\\n' % module_path)\n sys.stderr.write(header)\n sys.stderr.write('\\n')\n sys.stderr.flush()\n\n ffi = cffi.FFI()\n ffi.cdef(header)\n ffi.set_source(module_path, None)\n return ffi",
"def build(self, cres):\n _launch_threads()\n # Build wrapper for ufunc entry point\n ctx = cres.target_context\n library = cres.library\n signature = cres.signature\n llvm_func = library.get_function(cres.fndesc.llvm_func_name)\n wrapper, env = build_gufunc_wrapper(library, ctx, llvm_func,\n signature, self.sin, self.sout,\n fndesc=cres.fndesc,\n env=cres.environment)\n\n ptr = library.get_pointer_to_function(wrapper.name)\n\n # Get dtypes\n dtypenums = []\n for a in signature.args:\n if isinstance(a, types.Array):\n ty = a.dtype\n else:\n ty = a\n dtypenums.append(as_dtype(ty).num)\n\n return dtypenums, ptr, env",
"def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r",
"def translate_to_c(Newast):\n ast = parse_file('exampleMin.c', use_cpp=True)\n\n ast.show()\n #print(\"newast: \", Newast.ext[0].decl.type.args.params[0].type.type==ast.ext[0].decl.type.args.params[0].type.type)\n #print(\"newast2: \", Newast.ext[0].decl.type.args.params[0].type.type.coord)\n #print(\"ast2: \", ast.ext[0].decl.type.args.params[0].type.type.coord)\n\n #Newast.show()\n \n # print(ast.ext[0].decl.bitsize)\n # print(Newast.ext[0].decl.bitsize)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.coord)\n # print(Newast.ext[0].decl.type.args.coord)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params)\n # print(Newast.ext[0].decl.type.args.params)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0])\n # print(Newast.ext[0].decl.type.args.params[0])\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type)\n # print(Newast.ext[0].decl.type.args.params[0].type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type)\n # print(Newast.ext[0].decl.type.args.params[0].type.type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type.names)\n # print(Newast.ext[0].decl.type.args.params[0].type.type.names)\n # print(\"----------------------------------\")\n\n generator = c_generator.CGenerator()\n #ast.show()\n\n # tracing the generator for debugging\n # import trace\n # tr = trace.Trace(countcallers=1)\n # tr.runfunc(generator.visit, Newast)\n # tr.results().write_results()\n\n print(generator.visit(Newast))",
"def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)",
"def fortran_interface(self) -> str:\n result = ''\n if self.fc_override == '':\n return result\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n # declaration\n in_parameters = self._fi_in_parameters()\n return_type, out_parameters = self._fi_out_parameters()\n if self.may_throw:\n out_parameters.append(('integer (c_int)', 'err_code'))\n out_parameters.append(('type (c_ptr)', 'err_msg'))\n out_parameters.append(('integer (c_size_t)', 'err_msg_len'))\n\n arg_list = [par_name for _, par_name in in_parameters + out_parameters]\n if len(arg_list) > 1:\n arg_vlist = ' &\\n' + indent(', &\\n'.join(arg_list), 8*' ')\n else:\n arg_vlist = ', '.join(arg_list)\n\n if return_type != '':\n result += '{} function {}({}) &\\n'.format(\n return_type, func_name, arg_vlist)\n else:\n result += 'subroutine {}({}) &\\n'.format(func_name, arg_vlist)\n result += ' bind(C, name=\"{}\")\\n'.format(func_name)\n result += '\\n'\n result += ' use iso_c_binding\\n'\n\n # parameter declarations\n for par_type, par_name in in_parameters:\n result += ' {}, intent(in) :: {}\\n'.format(\n par_type, par_name)\n for par_type, par_name in out_parameters:\n result += ' {}, intent(out) :: {}\\n'.format(par_type, par_name)\n\n # end\n if return_type != '':\n result += 'end function {}\\n\\n'.format(func_name)\n else:\n result += 'end subroutine {}\\n\\n'.format(func_name)\n return indent(result, 8*' ')",
"def cblas_header_text():\r\n\r\n return \"\"\"\r\n //#include <stddef.h>\r\n\r\n #undef __BEGIN_DECLS\r\n #undef __END_DECLS\r\n #ifdef __cplusplus\r\n #define __BEGIN_DECLS extern \"C\" {\r\n #define __END_DECLS }\r\n #else\r\n #define __BEGIN_DECLS /* empty */\r\n #define __END_DECLS /* empty */\r\n #endif\r\n\r\n __BEGIN_DECLS\r\n\r\n #define MOD %\r\n\r\n /*\r\n * Enumerated and derived types\r\n */\r\n #define CBLAS_INDEX size_t /* this may vary between platforms */\r\n\r\n enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};\r\n enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};\r\n enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};\r\n enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};\r\n enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};\r\n\r\n float cblas_sdsdot(const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY);\r\n double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,\r\n const int incY);\r\n float cblas_sdot(const int N, const float *X, const int incX,\r\n const float *Y, const int incY);\r\n double cblas_ddot(const int N, const double *X, const int incX,\r\n const double *Y, const int incY);\r\n\r\n /*\r\n * Functions having prefixes Z and C only\r\n */\r\n void cblas_cdotu_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotu);\r\n void cblas_cdotc_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotc);\r\n\r\n void cblas_zdotu_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotu);\r\n void cblas_zdotc_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotc);\r\n\r\n\r\n /*\r\n * Functions having prefixes S D SC DZ\r\n */\r\n float cblas_snrm2(const int N, const float *X, const int incX);\r\n float cblas_sasum(const int N, const float *X, const int incX);\r\n\r\n double cblas_dnrm2(const int N, const double *X, const int incX);\r\n double cblas_dasum(const int N, const double *X, const int incX);\r\n\r\n float cblas_scnrm2(const int N, const void *X, const int incX);\r\n float cblas_scasum(const int N, const void *X, const int incX);\r\n\r\n double cblas_dznrm2(const int N, const void *X, const int incX);\r\n double cblas_dzasum(const int N, const void *X, const int incX);\r\n\r\n\r\n /*\r\n * Functions having standard 4 prefixes (S D C Z)\r\n */\r\n CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);\r\n CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);\r\n CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);\r\n CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 1 BLAS routines\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (s, d, c, z)\r\n */\r\n void cblas_sswap(const int N, float *X, const int incX, \r\n float *Y, const int incY);\r\n void cblas_scopy(const int N, const float *X, const int incX, \r\n float *Y, const int incY);\r\n void cblas_saxpy(const int N, const float alpha, const float *X,\r\n const int incX, float *Y, const int incY);\r\n\r\n void cblas_dswap(const int N, double *X, const int incX, \r\n double *Y, const int incY);\r\n void cblas_dcopy(const int N, const double *X, const int incX, \r\n double *Y, const int incY);\r\n void cblas_daxpy(const int N, const double alpha, const double *X,\r\n const int incX, double *Y, const int incY);\r\n\r\n void cblas_cswap(const int N, void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_ccopy(const int N, const void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_caxpy(const int N, const void *alpha, const void *X,\r\n const int incX, void *Y, const int incY);\r\n\r\n void cblas_zswap(const int N, void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_zcopy(const int N, const void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_zaxpy(const int N, const void *alpha, const void *X,\r\n const int incX, void *Y, const int incY);\r\n\r\n\r\n /* \r\n * Routines with S and D prefix only\r\n */\r\n void cblas_srotg(float *a, float *b, float *c, float *s);\r\n void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);\r\n void cblas_srot(const int N, float *X, const int incX,\r\n float *Y, const int incY, const float c, const float s);\r\n void cblas_srotm(const int N, float *X, const int incX,\r\n float *Y, const int incY, const float *P);\r\n\r\n void cblas_drotg(double *a, double *b, double *c, double *s);\r\n void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);\r\n void cblas_drot(const int N, double *X, const int incX,\r\n double *Y, const int incY, const double c, const double s);\r\n void cblas_drotm(const int N, double *X, const int incX,\r\n double *Y, const int incY, const double *P);\r\n\r\n\r\n /* \r\n * Routines with S D C Z CS and ZD prefixes\r\n */\r\n void cblas_sscal(const int N, const float alpha, float *X, const int incX);\r\n void cblas_dscal(const int N, const double alpha, double *X, const int incX);\r\n void cblas_cscal(const int N, const void *alpha, void *X, const int incX);\r\n void cblas_zscal(const int N, const void *alpha, void *X, const int incX);\r\n void cblas_csscal(const int N, const float alpha, void *X, const int incX);\r\n void cblas_zdscal(const int N, const double alpha, void *X, const int incX);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 2 BLAS\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (S, D, C, Z)\r\n */\r\n void cblas_sgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n const float *X, const int incX, const float beta,\r\n float *Y, const int incY);\r\n void cblas_sgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const float alpha,\r\n const float *A, const int lda, const float *X,\r\n const int incX, const float beta, float *Y, const int incY);\r\n void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *A, const int lda, \r\n float *X, const int incX);\r\n void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const float *A, const int lda, \r\n float *X, const int incX);\r\n void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *Ap, float *X, const int incX);\r\n void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *A, const int lda, float *X,\r\n const int incX);\r\n void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const float *A, const int lda,\r\n float *X, const int incX);\r\n void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *Ap, float *X, const int incX);\r\n\r\n void cblas_dgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n const double *X, const int incX, const double beta,\r\n double *Y, const int incY);\r\n void cblas_dgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const double alpha,\r\n const double *A, const int lda, const double *X,\r\n const int incX, const double beta, double *Y, const int incY);\r\n void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *A, const int lda, \r\n double *X, const int incX);\r\n void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const double *A, const int lda, \r\n double *X, const int incX);\r\n void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *Ap, double *X, const int incX);\r\n void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *A, const int lda, double *X,\r\n const int incX);\r\n void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const double *A, const int lda,\r\n double *X, const int incX);\r\n void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *Ap, double *X, const int incX);\r\n\r\n void cblas_cgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *X, const int incX, const void *beta,\r\n void *Y, const int incY);\r\n void cblas_cgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const void *alpha,\r\n const void *A, const int lda, const void *X,\r\n const int incX, const void *beta, void *Y, const int incY);\r\n void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, void *X,\r\n const int incX);\r\n void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda,\r\n void *X, const int incX);\r\n void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n\r\n void cblas_zgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *X, const int incX, const void *beta,\r\n void *Y, const int incY);\r\n void cblas_zgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const void *alpha,\r\n const void *A, const int lda, const void *X,\r\n const int incX, const void *beta, void *Y, const int incY);\r\n void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, void *X,\r\n const int incX);\r\n void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda,\r\n void *X, const int incX);\r\n void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n\r\n\r\n /* \r\n * Routines with S and D prefixes only\r\n */\r\n void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *A,\r\n const int lda, const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const float alpha, const float *A,\r\n const int lda, const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *Ap,\r\n const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,\r\n const float alpha, const float *X, const int incX,\r\n const float *Y, const int incY, float *A, const int lda);\r\n void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, float *A, const int lda);\r\n void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, float *Ap);\r\n void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY, float *A,\r\n const int lda);\r\n void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY, float *A);\r\n\r\n void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *A,\r\n const int lda, const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const double alpha, const double *A,\r\n const int lda, const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *Ap,\r\n const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,\r\n const double alpha, const double *X, const int incX,\r\n const double *Y, const int incY, double *A, const int lda);\r\n void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, double *A, const int lda);\r\n void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, double *Ap);\r\n void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, const double *Y, const int incY, double *A,\r\n const int lda);\r\n void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, const double *Y, const int incY, double *A);\r\n\r\n\r\n /* \r\n * Routines with C and Z prefixes only\r\n */\r\n void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *Ap,\r\n const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const void *X, const int incX,\r\n void *A, const int lda);\r\n void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const void *X,\r\n const int incX, void *A);\r\n void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *Ap);\r\n\r\n void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *Ap,\r\n const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const void *X, const int incX,\r\n void *A, const int lda);\r\n void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const void *X,\r\n const int incX, void *A);\r\n void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *Ap);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 3 BLAS\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (S, D, C, Z)\r\n */\r\n void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const float alpha, const float *A,\r\n const int lda, const float *B, const int ldb,\r\n const float beta, float *C, const int ldc);\r\n void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n const float *B, const int ldb, const float beta,\r\n float *C, const int ldc);\r\n void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const float *A, const int lda,\r\n const float beta, float *C, const int ldc);\r\n void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const float *A, const int lda,\r\n const float *B, const int ldb, const float beta,\r\n float *C, const int ldc);\r\n void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n float *B, const int ldb);\r\n void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n float *B, const int ldb);\r\n\r\n void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const double alpha, const double *A,\r\n const int lda, const double *B, const int ldb,\r\n const double beta, double *C, const int ldc);\r\n void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n const double *B, const int ldb, const double beta,\r\n double *C, const int ldc);\r\n void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const double *A, const int lda,\r\n const double beta, double *C, const int ldc);\r\n void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const double *A, const int lda,\r\n const double *B, const int ldb, const double beta,\r\n double *C, const int ldc);\r\n void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n double *B, const int ldb);\r\n void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n double *B, const int ldb);\r\n\r\n void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const void *alpha, const void *A,\r\n const int lda, const void *B, const int ldb,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n\r\n void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const void *alpha, const void *A,\r\n const int lda, const void *B, const int ldb,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n\r\n\r\n /* \r\n * Routines with prefixes C and Z only\r\n */\r\n void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const void *A, const int lda,\r\n const float beta, void *C, const int ldc);\r\n void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const float beta,\r\n void *C, const int ldc);\r\n\r\n void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const void *A, const int lda,\r\n const double beta, void *C, const int ldc);\r\n void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const double beta,\r\n void *C, const int ldc);\r\n\r\n void cblas_xerbla(int p, const char *rout, const char *form, ...);\r\n\r\n __END_DECLS\r\n \"\"\"",
"def make_get_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_TYPE_* ___madz_TYPE_get_out_struct(){{\n return &___madz_OUTPUT;\n}}\n\n\"\"\"\n return res",
"def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature",
"def convert_fus(ast):\n\n parent_fn_name = ast.name_short\n prefix_list = {\"p\": \"p.\", \"r\": \"r.\", \"g\": \"c.\"}\n prefix = prefix_list[parent_fn_name]\n\n fus1_ns = ast.args[0].namespace\n fus1_val = ast.args[0].value\n\n arg_fus = ast.args[1]\n fus_args = [None, \"?\", \"?\"]\n for idx, arg in enumerate(arg_fus.args):\n fus_args[idx] = arg\n\n fus2_ns = fus_args[0].namespace\n fus2_val = fus_args[0].value\n\n if fus_args[1] == \"?\":\n fus1_range = fus_args[1]\n else:\n fus1_range = f'\"{prefix}1_{fus_args[1].value}\"'\n\n if fus_args[2] == \"?\":\n fus2_range = fus_args[2]\n else:\n fus2_range = f'\"{prefix}{fus_args[2].value}_?\"'\n\n fus = Function(\"fus\", version=version, parent=ast)\n fus.args = [\n NSArg(fus1_ns, fus1_val, fus),\n StrArg(fus1_range, fus),\n NSArg(fus2_ns, fus2_val, fus),\n StrArg(fus2_range, fus),\n ]\n\n # Remove BEL\n ast_args = ast.args\n ast_args.pop(0)\n ast_args.pop(0)\n\n if ast_args == [None]:\n ast_args = []\n\n ast.args = []\n ast.add_argument(fus)\n\n if len(ast_args) > 0:\n ast.args.extend(ast_args)\n\n return ast",
"def _parse_cc_h(self, fname_cc):\n def _type_translate(p_type, default_v=None):\n \"\"\" Translates a type from C++ to GRC \"\"\"\n translate_dict = {'float': 'float',\n 'double': 'real',\n 'int': 'int',\n 'gr_complex': 'complex',\n 'char': 'byte',\n 'unsigned char': 'byte',\n 'std::string': 'string',\n 'std::vector<int>': 'int_vector',\n 'std::vector<float>': 'real_vector',\n 'std::vector<gr_complex>': 'complex_vector',\n }\n if p_type in ('int',) and default_v[:2].lower() == '0x':\n return 'hex'\n try:\n return translate_dict[p_type]\n except KeyError:\n return 'raw'\n def _get_blockdata(fname_cc):\n \"\"\" Return the block name and the header file name from the .cc file name \"\"\"\n blockname = os.path.splitext(os.path.basename(fname_cc.replace('_impl.', '.')))[0]\n fname_h = (blockname + '.h').replace('_impl.', '.')\n blockname = blockname.replace(self._info['modname']+'_', '', 1)\n return (blockname, fname_h)\n # Go, go, go\n print \"Making GRC bindings for %s...\" % fname_cc\n (blockname, fname_h) = _get_blockdata(fname_cc)\n try:\n parser = ParserCCBlock(fname_cc,\n os.path.join(self._info['includedir'], fname_h),\n blockname,\n self._info['version'],\n _type_translate\n )\n except IOError:\n print \"Can't open some of the files necessary to parse %s.\" % fname_cc\n sys.exit(1)\n return (parser.read_params(), parser.read_io_signature(), blockname)",
"def buildCDeclaration(self):\n raise Exception(\"Unimplemented function in symbol: \"+self.name)",
"def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir",
"def make_func_declarations(self):\n\n\t\tfor name in self.func_dict:\n\t\t\tbody = Lexer(self.func_dict[name]).get_tokens()\n\t\t\ti = body.index('\\\\') + 1 #Start of parameters\n\t\t\tj = body.match_paren(i)\n\t\t\tparam_tokens = body[i + 1: j] #Stuff inside parentheses\n\t\t\t#\t\t\tprint \"param list:\", param_tokens\n\n\t\t\tparams = self.split_params(param_tokens)\n\t\t\tparams = map(lambda n: n.split(':'), params)\n\t\t\t#params is now [[<name>,<type>],...]\n\t\t\tc_types = map(lambda n: self.convert_type(*n), params)\n\t\t\t#\t\t\tprint c_types\n\n\t\t\treturn_type = ''\n\t\t\t# +2 to skip over \")\" and \":\"\n\t\t\tif body[j+2] == '(': #Function returns another function\n\t\t\t\t# +3 for [\")\",\"->\",\"<type>\"]\n\t\t\t\tfor x in xrange(j+2, body.match_paren(j+2)+3):\n\t\t\t\t\treturn_type += body[x]\n\t\t\telse: #Function returns a concrete type\n\t\t\t\treturn_type = body[j+2] #+2 to skip over \")\" and \":\"\n\n\t\t\tfunc_type = self.convert_type(name, return_type)\n\t\t\t#\t\t\tprint \"params\", params\n\t\t\t#\t\t\tprint \"c_types\", c_types\n\t\t\t#while True:exec raw_input() in globals(), locals()\n\t\t\tself.cpp_declarations[name] = func_type + '(' + ', '.join(c_types) + ')'\n\n\t\tself.cpp_declarations['main'] = 'int main()' #actually this isn't used",
"def _load_hesaff_clib(rebuild=None):\n global REBUILD_ONCE\n # Get the root directory which should have the dynamic library in it\n #root_dir = realpath(dirname(__file__)) if '__file__' in vars() else realpath(os.getcwd())\n\n # os.path.dirname(sys.executable)\n #if getattr(sys, 'frozen', False):\n # # we are running in a |PyInstaller| bundle\n # root_dir = realpath(sys._MEIPASS)\n #else:\n # # we are running in a normal Python environment\n # root_dir = realpath(dirname(__file__))\n root_dir = realpath(dirname(__file__))\n if rebuild is not False and REBUILD_ONCE == 0 and __name__ != '__main__':\n REBUILD_ONCE += 1\n rebuild = ut.get_argflag('--rebuild-hesaff')\n if rebuild:\n print('REBUILDING HESAFF')\n repo_dir = realpath(dirname(root_dir))\n ut.std_build_command(repo_dir)\n\n libname = 'hesaff'\n (clib, def_cfunc, lib_fpath) = ctypes_interface.load_clib(libname, root_dir)\n # Expose extern C Functions to hesaff's clib\n #def_cfunc(C.c_char_p, 'cmake_build_type', [])\n #def_cfunc(None, 'free_char', [C.c_char_p])\n def_cfunc(int_t, 'get_cpp_version', [])\n def_cfunc(int_t, 'is_debug_mode', [])\n def_cfunc(int_t, 'detect', [obj_t])\n def_cfunc(int_t, 'get_kpts_dim', [])\n def_cfunc(int_t, 'get_desc_dim', [])\n def_cfunc(None, 'exportArrays', [obj_t, int_t, kpts_t, vecs_t])\n def_cfunc(None, 'extractDesc', [obj_t, int_t, kpts_t, vecs_t])\n def_cfunc(None, 'extractPatches', [obj_t, int_t, kpts_t, img32_t])\n def_cfunc(None, 'extractDescFromPatches', [int_t, int_t, int_t, img_t, vecs_t])\n def_cfunc(obj_t, 'new_hesaff_fpath', [str_t] + HESAFF_PARAM_TYPES)\n def_cfunc(obj_t, 'new_hesaff_image', [img_t, int_t, int_t, int_t] + HESAFF_PARAM_TYPES)\n def_cfunc(None, 'free_hesaff', [obj_t])\n def_cfunc(obj_t, 'detectFeaturesListStep1', [int_t, str_list_t] + HESAFF_PARAM_TYPES)\n def_cfunc(None, 'detectFeaturesListStep2', [int_t, obj_t, int_array_t])\n def_cfunc(None, 'detectFeaturesListStep3', [int_t, obj_t, int_array_t, int_array_t, kpts_t, vecs_t])\n return clib, lib_fpath",
"def fortran_function(self) -> str:\n if self.f_override is not None:\n return indent(\n self.f_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\n \"$F_PREFIX$\", self.f_prefix),\n 4*' ')\n\n result = ''\n\n # declaration\n func_name = '{}_{}_{}'.format(\n self.f_prefix, self.class_name, self.name)\n in_parameters = self._f_in_parameters()\n return_type, out_parameters = self._f_out_parameters()\n if self.may_throw:\n out_parameters.append(('integer, optional', 'err_code'))\n out_parameters.append(('character(:), allocatable, optional',\n 'err_msg'))\n\n all_parameters = in_parameters + out_parameters\n arg_list = ', &\\n'.join([par_name for _, par_name in all_parameters])\n arg_ilist = indent(arg_list, 8*' ')\n if return_type != '':\n result += 'function {}( &\\n{})\\n'.format(func_name, arg_ilist)\n else:\n result += 'subroutine {}( &\\n{})\\n'.format(func_name, arg_ilist)\n\n # parameter declarations\n result += ' implicit none\\n'\n for par_type, par_name in in_parameters:\n result += ' {}, intent(in) :: {}\\n'.format(\n par_type, par_name)\n for par_type, par_name in out_parameters:\n result += ' {}, intent(out) :: {}\\n'.format(par_type, par_name)\n if return_type != '':\n result += ' {} :: {}\\n'.format(return_type, func_name)\n result += '\\n'\n\n # variable declarations\n c_return_type, fi_out_parameters = self._fi_out_parameters()\n if c_return_type:\n result += ' {} :: ret_val\\n'.format(c_return_type)\n for par_type, par_name in fi_out_parameters:\n result += ' {} :: {}\\n'.format(par_type, par_name)\n for par_type, par_name in self.ret_type.f_aux_variables():\n result += ' {} :: {}\\n'.format(par_type, par_name)\n if self.may_throw:\n result += ' integer (c_int) :: err_code_v\\n'\n result += ' type (c_ptr) :: err_msg_v\\n'\n result += ' integer (c_size_t) :: err_msg_len_v\\n'\n result += ' character (c_char), dimension(:), pointer :: err_msg_f\\n'\n result += ' character(:), allocatable :: err_msg_p\\n'\n result += ' integer (c_size_t) :: err_msg_i\\n'\n if c_return_type or fi_out_parameters or self.may_throw:\n result += '\\n'\n\n # convert input\n args = [param.f_chain_arg() for param in self.params]\n args += [par_name for _, par_name in fi_out_parameters]\n if self.may_throw:\n args += ['err_code_v', 'err_msg_v', 'err_msg_len_v']\n arg_str = ', &\\n'.join([8*' ' + arg for arg in args])\n\n # call C function\n fc_func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n chain_call = self.fc_chain_call(\n ns_prefix=self.c_prefix, class_name=self.class_name,\n fc_func_name=fc_func_name, fc_args=arg_str)\n result_name = ''\n if return_type != '':\n result_name = func_name\n elif out_parameters:\n result_name = out_parameters[0][1]\n result += self.ret_type.f_call_c('ret_val', chain_call)\n\n # handle errors if necessary\n if self.may_throw:\n # Note: I tried to factor this out into a function, but Fortran\n # makes that near-impossible. Since we're generating anyway, it's\n # not really duplication, so leave it as is.\n result += indent(dedent(f\"\"\"\\\n if (err_code_v .ne. 0) then\n if (present(err_code)) then\n err_code = err_code_v\n if (present(err_msg)) then\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg)\n do err_msg_i = 1, err_msg_len_v\n err_msg(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n end if\n {dedent(\n self.ret_type.f_return_dummy_result(result_name))}\n return\n else\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg_p)\n do err_msg_i = 1, err_msg_len_v\n err_msg_p(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n print *, err_msg_p\n stop\n end if\n else\n if (present(err_code)) then\n err_code = 0\n end if\n end if\n\n \"\"\"), 4*' ')\n\n # convert and return result\n result += self.ret_type.f_return_result(result_name, 'ret_val')\n\n # end\n if return_type != '':\n result += 'end function {}\\n\\n'.format(func_name)\n else:\n result += 'end subroutine {}\\n\\n'.format(func_name)\n return indent(result, 4*' ')",
"def gen_code(self, filename, func_name):\n\n assert self.bits is not None\n\n vd_list = []\n n_vars = 0\n for tree in self.trees:\n vd_list.append(tree.gen_code(n_vars))\n n_vars += len(vd_list[-1])\n\n # checks the type by the suffix\n\n is_v = filename.split(\".\")[-1] == \"v\"\n\n assert self.inputs\n\n f = open(filename, \"w\")\n\n i_bits = np.sum(self.bits[:-1])\n o_bits = self.bits[-1]\n o_sign = self.is_neg[-1]\n\n if is_v:\n f.write(\"module {}(input [{}:0] i, output [{}:0] o);\\n\".format(\n func_name, i_bits-1, o_bits-1))\n else:\n f.write(\"#include<ac_int.h>\\n\\n\")\n f.write(\"void {}(ac_int<{},false> i, ac_int<{},{}> &o)\\n\".format(\n func_name, i_bits, o_bits, o_sign))\n f.write(\"{\\n\")\n\n\n # write function headline\n s_in_line = []\n\n i_bits = self.bits[0]\n i_sign = self.is_neg[0]\n\n if is_v:\n i_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if i_sign else \"\", i_bits-1)\n else:\n i_datatype = \" ac_int<{},{}> \".format(i_bits, i_sign)\n\n len_s = len(i_datatype)\n\n for i in range(self.inputs):\n if is_v:\n s = (\n \"i_\" + str(i) + \" = \" + \"i[\" + str(i_bits*(i+1)-1) + \":\" +\n str(i_bits*i) + \"]\"\n )\n else:\n s = (\n \"i_\" + str(i) + \" = \" + \"i.slc<\" + str(i_bits) + \">(\" +\n str(i_bits*i) + \")\"\n )\n if (\n len_s + len(s) + 2 > 70 or i_bits != self.bits[i] or\n i_sign != self.is_neg[i]\n ):\n f.write(i_datatype + \", \".join(s_in_line) + \";\\n\")\n\n s_in_line = []\n if is_v:\n i_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if i_sign else \"\", i_bits-1)\n else:\n i_datatype = \" ac_int<{},{}> \".format(i_bits, i_sign)\n\n len_s = len(i_datatype)\n\n s_in_line.append(s)\n len_s += len(s) + 2\n\n if s_in_line:\n f.write(i_datatype + \", \".join(s_in_line) + \";\\n\")\n\n if is_v:\n o_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if o_sign else \"\", o_bits)\n else:\n o_datatype = \" ac_int<{},{}> \".format(o_bits, o_sign)\n\n o_list = []\n for i in range(len(vd_list)):\n for v in vd_list[i]:\n if is_v:\n f.write(o_datatype + v + \" = \" + vd_list[i][v] + \";\\n\")\n else:\n f.write(o_datatype + v + \" = \" + vd_list[i][v] + \";\\n\")\n f.write(\"\\n\")\n o_list.append(v)\n\n assert len(o_list) <= 3\n\n if is_v:\n f.write(\" assign \")\n else:\n f.write(\" \")\n\n if len(o_list) == 1:\n f.write(\"o = \" + o_list[0] + \";\")\n elif len(o_list) == 2:\n cond = \"( \" + o_list[0] + \" == \" + o_list[1] + \" ) \"\n n1 = o_list[0]\n n0 = \"( ( \" + \" + \".join(o_list) + \" ) >> 1 )\"\n f.write(\"o = \" + cond + \"? \" + n1 + \": \" + n0)\n elif len(o_list) == 3:\n cond = (\n \"( \" +\n \"( \" + \" == \".join(o_list[0:2]) + \" )?\" + o_list[0] + \":\" +\n \"( \" + \" == \".join(o_list[1:]) + \" )?\" + o_list[1] + \":\" +\n \"( \" + \" == \".join([o_list[0], o_list[2]]) + \" )?\" + o_list[0] +\n \":\" + \"( \" + \" < \".join(o_list[0:2]) + \" ) ?\" +\n \"( ( \" + \" < \".join(o_list[1:]) + \" ) ?\" + o_list[1] + \":\" +\n o_list[2] + \" ) : \" +\n \"( ( \" + \" < \".join([o_list[0], o_list[2]]) + \" ) ?\" + o_list[0] +\n \":\" + o_list[2] + \" )\"\n )\n f.write(\"o = \" + cond + \";\\n\")\n if is_v:\n f.write(\"endmodule\")\n else:\n f.write(\"}\")\n\n f.close()",
"def create_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n def func(list_, length, text, error):\n return overhead(list_, length, text, error)\n\n return overhead"
] | [
"0.6575036",
"0.6416403",
"0.62135506",
"0.61577857",
"0.6081804",
"0.6024244",
"0.58753806",
"0.5823587",
"0.573958",
"0.5726746",
"0.56846005",
"0.56105137",
"0.55705476",
"0.5551445",
"0.5547371",
"0.55256623",
"0.55013",
"0.54184884",
"0.539809",
"0.53861326",
"0.5378367",
"0.53707635",
"0.53621775",
"0.53464997",
"0.5334414",
"0.5327685",
"0.5298696",
"0.5278123",
"0.5239755",
"0.52002364"
] | 0.6747278 | 0 |
Set up the Opple light platform. | def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
name = config[CONF_NAME]
host = config[CONF_HOST]
entity = OppleLight(name, host)
add_entities([entity])
_LOGGER.debug("Init light %s %s", host, entity.unique_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure(self):\n\n self.platform.configure()",
"def platform_start(self):\n self.platform.start()",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 256}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 64}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def setUp(self):\n self.hass = get_test_home_assistant()\n controller_mock = mock.MagicMock()\n dev_dict = {\"address\": \"a1\", \"name\": \"fake_light\", \"brightness_levels\": 32}\n self.light = mochad.MochadLight(self.hass, controller_mock, dev_dict)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None\n) -> None:\n # Assign configuration variables.\n # The configuration check takes care they are present.\n host = config[CONF_HOST]\n username = config[CONF_USERNAME]\n password = config.get(CONF_PASSWORD)\n\n # Setup connection with devices/cloud\n hub = awesomelights.Hub(host, username, password)\n\n # Verify that passed in configuration works\n if not hub.is_valid_login():\n _LOGGER.error(\"Could not connect to AwesomeLight hub\")\n return\n\n # Add devices\n add_entities(AwesomeLight(light) for light in hub.lights())",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n # import awesomelights\n\n # Assign configuration variables. The configuration check takes care they are\n # present.\n host = config.get(CONF_HOST)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n # Setup connection with devices/cloud\n # hub = awesomelights.Hub(host, username, password)\n _LOGGER.info(\"hub = awesomelights.Hub(host, username, password)\")\n\n # Verify that passed in configuration works\n # if not hub.is_valid_login():\n # _LOGGER.error(\"Could not connect to AwesomeLight hub\")\n # return\n\n # Add devices\n # add_devices(AwesomeLight(light) for light in hub.lights())\n add_devices([AwesomeLight(Light)])",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n _LOGGER.debug(\"Setting up\")\n\n mon = Monitor(hass, mac, name)\n add_entities([SkybeaconTemp(name, mon)])\n add_entities([SkybeaconHumid(name, mon)])\n\n def monitor_stop(_service_or_event):\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping monitor for %s\", name)\n mon.terminate()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n mon.start()",
"def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True",
"def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))",
"def setupLights(self) :\n\t\tself.ambientLight = render.attachNewNode(AmbientLight( \\\n\t\t\t\t\t\"ambientLight\"))\n\t\tself.ambientLight.node().setColor(Vec4(.8,.8,.8,1))\n\t\trender.setLight(self.ambientLight)\n\n\t\tdLight1 = DirectionalLight(\"dLight1\")\n\t\tdLight1.setColor(Vec4(6,5,7,1))\n\t\tdLight1.setDirection(Vec3(1,1,1))\n\t\tdlnp1 = render.attachNewNode(dLight1)\n\t\tdlnp1.setHpr(30,-160,0)\n\t\trender.setLight(dlnp1)\n\n\t\tdLight2 = DirectionalLight(\"dLight2\")\n\t\tdLight2.setColor(Vec4(.6,.7,1,1))\n\t\tdLight2.setDirection(Vec3(-1,-1,-1))\n\t\tself.dlnp2 = render.attachNewNode(dLight2)\n\t\tself.dlnp2.node().setScene(render)\n\t\tself.dlnp2.setHpr(-70,-60,0)\n\t\trender.setLight(self.dlnp2)",
"def setup_pi():\n global pi\n pi = modOrangePi.OrangePiOne()",
"def main():\r\n LEDStrip = createNeoPixelObject()\r\n setup(LEDStrip)\r\n clock(LEDStrip)",
"def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()",
"def initialize_robot():\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()\n\n proxy_autonomous_life = naoqi.ALProxy(\"ALAutonomousLife\", IP_ROBOT, PORT_ROBOT)\n proxy_autonomous_life.setState(\"disabled\")\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()",
"def setup(self): \n # Navigate to POS screen\n pos.connect()",
"def setup(hass, base_config):\n from pyhusmow import API as HUSMOW_API\n\n config = base_config.get(DOMAIN)\n\n if hass.data.get(DOMAIN) is None:\n hass.data[DOMAIN] = { 'devices': [] }\n\n api = HUSMOW_API()\n api.login(config.get(CONF_USERNAME), config.get(CONF_PASSWORD))\n\n robots = api.list_robots()\n\n if not robots:\n return False\n\n for robot in robots:\n hass.data[DOMAIN]['devices'].append(AutomowerDevice(robot, api))\n\n for component in AUTOMOWER_COMPONENTS:\n discovery.load_platform(hass, component, DOMAIN, {}, base_config)\n\n return True",
"def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access",
"def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_light')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: True,\n FEATURE_PERCENT: True,\n FEATURE_NUMBER_OF_STEPS: 100\n })",
"def setup(self):\n build_world.start_level(self)",
"def setup(self):\n self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)\n self.pi.set_watchdog(self.gpio, 0)\n self.register_callbacks()",
"def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n for light in self.all:\n GPIO.setup(light, GPIO.OUT)",
"def main():\n # Parse arguments for configuration and light type\n parser = argparse.ArgumentParser()\n parser.add_argument(\"light_type\", help=\"lifx or hue\", choices=['lifx', 'hue'], type = str.lower)\n parser.add_argument(\"-c\", \"--config_mode\", action='store_true', help=\"runs the client in config mode which prints out the light data\")\n \n args = parser.parse_args()\n \n config_mode = args.config_mode\n light_type = args.light_type\n \n # Get light information \n # *Note*\n # Only LIFX is supported at this point in time\n light_service = None\n if light_type == 'lifx':\n light_service = lightservice.LIFXLightService(\"https://api.lifx.com/v1/\")\n \n data = light_service.refresh_light_data(config_mode)\n \n button_handler = None\n if config_mode:\n button_handler = buttonhandler.ConfigButtonHandler()\n button_handler.start()\n else:\n button_handler = buttonhandler.ButtonHandler(data)\n button_handler.start(light_service)",
"def test_setup_adds_proper_devices(self, mock_light):\n good_config = {\n \"mochad\": {},\n \"light\": {\n \"platform\": \"mochad\",\n \"devices\": [{\"name\": \"Light1\", \"address\": \"a1\"}],\n },\n }\n assert setup_component(self.hass, light.DOMAIN, good_config)",
"def setup(self):\n\t\tself.interface = self.getDriver('light_interface')\n\n\t\tself.pin = self.config['interface_position']\n\t\tself.blink_rate = self.config['blink_rate'] / 2 or 0.5\n\t\tself.is_on = False\n\n\t\tself.intensity = 255\n\t\tself.blink = False\n\t\tself.count = None\n\t\tself.current_count = False\n\t\tself.current_count = None\n\n\t\tself.saved_intensity = None\n\t\tself.saved_blink = False\n\t\tself.saved_count = None\n\n\t\treturn True",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n from pybotvac import Account\n\n try:\n auth = Account(config[CONF_USERNAME], config[CONF_PASSWORD])\n except HTTPError:\n _LOGGER.error(\"Unable to connect to Neato API\")\n return False\n\n dev = []\n for robot in auth.robots:\n for type_name in SWITCH_TYPES:\n dev.append(NeatoConnectedSwitch(robot, type_name))\n add_devices(dev)",
"def setup_application(self):\n pass",
"def pibooth_startup(cfg, app):",
"def __init__(self, name, host):\n\n self._device = OppleLightDevice(host)\n\n self._name = name\n self._is_on = None\n self._brightness = None\n self._color_temp = None"
] | [
"0.670556",
"0.669467",
"0.64281476",
"0.6402788",
"0.6392791",
"0.6329436",
"0.6317174",
"0.6247289",
"0.61651385",
"0.6147113",
"0.6105949",
"0.60822815",
"0.6056726",
"0.60057616",
"0.5982987",
"0.5977405",
"0.59728897",
"0.5957383",
"0.59512126",
"0.59488773",
"0.59368527",
"0.5917735",
"0.5917156",
"0.59153193",
"0.591321",
"0.5909372",
"0.59063745",
"0.5903958",
"0.59006363",
"0.5900078"
] | 0.73331904 | 0 |
Return the color temperature of this light. | def color_temp(self):
return kelvin_to_mired(self._color_temp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def color_temp(self):\n return self._color_temp",
"def getTemperature(self):\n return self.temperature",
"def temperature(self):\n return self._temperature",
"def temperature(self):\n return self._temperature",
"def temperature(self):\n return float(self._current_observation['temp_c'])",
"def temperature(self):\n temp = ct.c_float()\n self.lib.GetTemperatureF(ct.pointer(temp))\n return temp.value",
"def temperature(self):\n return self.read_short(65) / 340.0 + 36.53",
"def get_temperature(self):\n pass",
"def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)",
"def color_temp(self) -> int:\n new_range = self._tuya_temp_range()\n tuya_color_temp = self.tuya_device.status.get(self.dp_code_temp, 0)\n return (\n self.max_mireds\n - self.remap(\n tuya_color_temp,\n new_range[0],\n new_range[1],\n self.min_mireds,\n self.max_mireds,\n )\n + self.min_mireds\n )",
"def get_temperature(self):\n self.temperature = self.temperature_sensors.get_temperature(\n self.channel)\n return self.temperature",
"def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0",
"def getTemperature(self):\n return self.json_state.get(\"temperature\")",
"def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp",
"def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)",
"def get_temperature_color_preview(lamp_props):\n temperature = lamp_props.color_temperature\n\n mm = 1000.0 / temperature\n mm2 = mm ** 2\n mm3 = mm2 * mm\n x, y = 0, 0\n\n if temperature < 4000:\n x = -0.2661239 * mm3 - 0.2343580 * mm2 + 0.8776956 * mm + 0.179910\n else:\n x = -3.0258469 * mm3 + 2.1070379 * mm2 + 0.2226347 * mm + 0.240390\n\n x2 = x**2\n x3 = x2 * x\n if temperature < 2222:\n y = -1.1063814 * x3 - 1.34811020 * x2 + 2.18555832 * x - 0.20219683\n elif temperature < 4000:\n y = -0.9549476 * x3 - 1.37418593 * x2 + 2.09137015 * x - 0.16748867\n else:\n y = 3.0817580 * x3 - 5.87338670 * x2 + 3.75112997 * x - 0.37001483\n\n # xyY to XYZ, assuming Y=1.\n xyz = mathutils.Vector((x / y, 1, (1 - x - y) / y))\n return xyz_to_rgb * xyz",
"def getTemperature(self):\n with self.lock:\n temp = self.temp\n return temp",
"def current_temperature(self) -> float:\n return self._thermostat.current_temperatue",
"def temp(self):\n if self.temp_sensor is None:\n return None\n else:\n if self.temp_scale.lower() in ['f', 'fahrenheit']:\n return self.temp_sensor.temp_f\n elif self.temp_scale.lower() in ['c', 'celsius']:\n return self.temp_sensor.temp_c",
"def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)",
"def target_temperature(self):\n if self._client.mode == self._client.MODE_HEAT:\n return self._client.heattemp\n if self._client.mode == self._client.MODE_COOL:\n return self._client.cooltemp\n return None",
"def get_color(self):\n return self.color",
"def current_temperature(self):\n return self.atag.dhw_temperature",
"def temperature(self) -> SmartSsdTemperature:\n return self._temperature",
"def get_temperature(self):\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_GET_TEMPERATURE, (), '', 'i')",
"def get_color(self):\n\n return self.color",
"def internal_temp_c(self) -> int:\n return int(self._device_info[\"Temperature\"])",
"def current_temperature(self):\n return self._cur_temp",
"def ambient_temperature(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"F\":\r\n return self.ambient_temperature_f\r\n elif self.temperature_scale == \"C\":\r\n return self.ambient_temperature_c\r\n else:\r\n return self._ambient_temperature",
"def get_color(self):\r\n return self.__color"
] | [
"0.8550451",
"0.75972027",
"0.75929296",
"0.75929296",
"0.7584848",
"0.7554157",
"0.74379694",
"0.7430193",
"0.73825926",
"0.7336957",
"0.73212904",
"0.72820795",
"0.72075",
"0.72032225",
"0.71964854",
"0.71878976",
"0.7180456",
"0.7172465",
"0.7114398",
"0.7094845",
"0.70741916",
"0.70650184",
"0.70343816",
"0.7015165",
"0.70147234",
"0.7010132",
"0.7008548",
"0.7004446",
"0.69951445",
"0.6989003"
] | 0.8436134 | 1 |
This function returns the number of elements in the numbers list that are divisible by divide. | def listDivide(numbers, divide = 2):
divisible_count = 0
for i in numbers:
if i % divide == 0:
divisible_count += 1
return divisible_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listDivide(numbers, divide=2): \n counter = 0\n for num in numbers:\n if num % divide == 0:\n counter+=1\n return counter",
"def listDivide(numbers, divide=2):\n newList = []\n for i in numbers:\n if i % divide == 0:\n newList.append(i)\n return len(newList)",
"def divide(numbers):\n counter = 0\n for num in numbers:\n counter /= num\n return counter",
"def answer(l):\n num_divisors = [0] * len(l)\n triple_count = 0\n for large in range(1, len(l)):\n for small in range (0, large):\n if l[large] % l[small] == 0:\n num_divisors[large] += 1\n triple_count += num_divisors[small]\n return triple_count",
"def count_multiples( start, stop, divisor ):\n count = 0\n num = start\n while num < stop + 1:\n if num % divisor == 0:\n count += 1\n num += 1\n return count",
"def num_divisors_iii(n):\n set_pf = set(n)\n n_div = 1\n for pf in set_pf:\n x = n.count(pf)\n n_div *= (1 + x)\n return n_div",
"def count_divisor(num):\n ans = 2 # considering 1 and number itself by default\n for i in range(2, int(math.sqrt(num)) + 1):\n if num % i == 0:\n # for equal divisor (incase of perfect square)\n if (num / i) == i:\n ans += 1\n else:\n ans += 2\n return ans",
"def get_divisores(num):\n divisores = [] #uso una lista para guardar los divisores\n for i in range(1, num):\n if num%i == 0:\n divisores.append(i)\n return divisores",
"def count_divisions(num, n):\n count = 0\n while pe_005.is_divisible(num, n):\n num = num // n\n count += 1\n return count, num",
"def question_23(list_num: float) -> float:\n return sum(list_num) / len(list_num)",
"def find_count_divisor(this_list):\n max_found = this_list[0][1]\n count = 0\n\n while max_found/50 > 0:\n max_found -= 50\n count += 1\n\n return count",
"def num_divisors_ii(n):\n set_pf = set(n)\n n_og = 2**(len(set_pf))\n n_div = n_og\n for pf in set_pf:\n x = n.count(pf)\n n_div += n_div//2 * (x - 1)\n return n_div",
"def div_numbers(a: int, b: int) -> int:\n return a / b",
"def divide(numbers):\n \n result = numbers[0]\n for n in numbers[1:]:\n result = result / n\n return result",
"def div_by(n, list_of_num):\n for num in list_of_num:\n if not n % num:\n return True\n return False",
"def sumDivisor(inputList):\n result = 0\n for i in inputList:\n result += i\n return result",
"def verifica_element_divide_lista(numar, lista_divizori):\n for i in lista_divizori:\n if i == 0:\n return False\n if numar % i != 0:\n return False\n return True",
"def findDivisor(num):\n divisors = [1]\n for i in range(2, int(sqrt(num)) + 1):\n if num % i == 0:\n divisors.append(i)\n temp = num / i\n if temp != i:\n divisors.append(temp)\n return divisors",
"def div_sum(data: list) -> int:\n\n def even_quotient(nums: list) -> int:\n \"\"\"Finds the quotient of the only two numbers in the list that evennly divide.\"\"\"\n for i in range(len(nums[:-1])):\n for j in range(i + 1, len(nums)):\n if nums[i] % nums[j] == 0:\n return nums[i] // nums[j]\n elif nums[j] % nums[i] == 0:\n return nums[j] // nums[i]\n\n total = 0\n for row in data:\n total += even_quotient(row)\n return total",
"def test_count_divisible_digits():\n print('Testing count_divisible_digits')\n\n # Cases given to test this problem\n assert_equals(4, hw1.count_divisible_digits(650899, 3))\n assert_equals(1, hw1.count_divisible_digits(-204, 5))\n assert_equals(0, hw1.count_divisible_digits(24, 5))\n assert_equals(0, hw1.count_divisible_digits(1, 0))\n\n # Additional cases to check the 0 check\n assert_equals(0, hw1.count_divisible_digits(0, 0))\n assert_equals(2, hw1.count_divisible_digits(-579300, 2))",
"def divisions(self,domain,divisions):\n size = domain.height/divisions\n counter = []\n for i in range(divisions):\n count = ((self.z >= i*size) & (self.z < (i+1)*size)).sum()\n counter.append(count)\n return counter",
"def get_divisors_sum(number):\n if number == 0:\n return 0\n\n divisors_list = []\n for i in range(number+1):\n j = i + 1\n if number % j == 0:\n divisors_list.append(j)\n\n return sum(divisors_list)",
"def testListDivide():\n assert listDivide([1,2,3,4,5]) == 2\n assert listDivide([2,4,6,8,10]) == 5\n assert listDivide([30, 54, 63,98, 100], divide = 10) == 2\n assert listDivide([]) == 0\n assert listDivide([1,2,3,4,5], 1) == 5",
"def divisible_by(array, divisor):\n return_list = list()\n for i in array:\n if i % divisor == 0:\n return_list.append(i)\n return return_list",
"def sum_of_proper_divisors(number: int):\n divisors = []\n\n for n in range(1, number):\n if number % n == 0:\n divisors.append(n)\n\n return sum(divisors)",
"def n_photon_counting_div(self):\n inti = ct.c_ulong()\n self.lib.GetNumberPhotonCountingDivisions(ct.pointer(inti))\n return inti.value",
"def getDivisors(n):",
"def divisor_counter(num):\n if num <= 0:\n raise ValueError('num must be a positive, non-zero number')\n\n divisors = 0\n num_sq_rt = num ** .5\n for possible_divisor in range(1, int(num_sq_rt)):\n if num % possible_divisor == 0:\n divisors += 1\n\n divisors *= 2\n # If num is a perfect square, we have to subtract one so we only count\n # the square root once. i.e. if num is 16, we only want to count 4 once\n if num_sq_rt.is_integer():\n divisors -= 1\n return divisors*2",
"def _find_dividers(num: int) -> List[int]:\r\n\r\n dividers: List[int] = list()\r\n while num != 1:\r\n primes = PrimeHandler.find_all_primes(num)\r\n for prime in reversed(primes):\r\n if num % prime == 0:\r\n dividers.append(prime)\r\n num = num // prime\r\n break\r\n return list(reversed(dividers))",
"def main():\n numbers = int(input())\n count = 0\n for num in range(1, numbers+1):\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n else:\n count += 1\n print(count)"
] | [
"0.82095575",
"0.8028148",
"0.7676638",
"0.724413",
"0.7203808",
"0.6955737",
"0.6939475",
"0.6889885",
"0.6795731",
"0.6774087",
"0.6745991",
"0.6704762",
"0.66760755",
"0.66736186",
"0.6646036",
"0.65987384",
"0.6588523",
"0.6577694",
"0.6576061",
"0.6554679",
"0.64939314",
"0.6487798",
"0.648749",
"0.64867496",
"0.6440039",
"0.64290667",
"0.6417297",
"0.6391014",
"0.63629925",
"0.6293372"
] | 0.8605654 | 0 |
This function tests the listDivide function. | def testListDivide():
assert listDivide([1,2,3,4,5]) == 2
assert listDivide([2,4,6,8,10]) == 5
assert listDivide([30, 54, 63,98, 100], divide = 10) == 2
assert listDivide([]) == 0
assert listDivide([1,2,3,4,5], 1) == 5 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testListDivide():\n listDivide([1, 2, 3, 4, 5])\n listDivide([2, 4, 6, 8, 10])\n listDivide([30, 54, 63, 98, 100], divide=10)\n listDivide([])\n listDivide([1, 2, 3, 4, 5], 1)",
"def testListDivide():\n #a\n numbers = [1,2,3,4,5]\n expected = 2\n \n try:\n assert listDivide(numbers) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\")\n \n \n #b\n numbers = [2,4,6,8,10]\n expected = 5\n \n try:\n assert listDivide(numbers) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\") \n \n #c\n numbers = [30, 54, 63, 98, 100]\n divide = 10\n expected = 2\n \n try:\n assert listDivide(numbers, divide) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\") \n \n #d\n numbers = []\n expected = 0\n \n try:\n assert listDivide(numbers) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\") \n \n #e\n numbers = [1, 2, 3, 4, 5]\n divide = 1\n expected = 5\n \n try:\n assert listDivide(numbers, divide) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\")",
"def test_list_int(self):\n result = div(2, 4)\n self.assertEqual(result, 0.5)",
"def test_divide(self):\n self.assertEqual(2, divide(6, 3))\n self.assertEqual(2.5, divide(5, 2))",
"def listDivide(numbers, divide=2):\n newList = []\n for i in numbers:\n if i % divide == 0:\n newList.append(i)\n return len(newList)",
"def divide_list(ld, division):\n buckets = []\n current = []\n for obj in ld:\n if len(current) < division:\n current.append(obj)\n else:\n buckets.append(current)\n current = [obj]\n if len(current) > 0:\n buckets.append(current)\n return buckets",
"def test_div():\n l = [1, 2, 3, 4]\n assert s7.div(*l) == 1 / 2 / 3 / 4\n assert s7.div(100, 20) == 5\n assert s7.div(100.0, 20) == 5.0\n assert s7.div(100, 20.0) == 5.0",
"def test_divide(self):\n self.assertEqual(2, foo.divide(6, 3))\n self.assertEqual(2.5, foo.divide(5, 2))",
"def test_divide(self):\n print \"divide\"\n self.assertEqual(2, divide(6, 3))\n self.assertEqual(2.5, divide(5, 2))",
"def divideList(L):\n for x in range(len(L)):\n L[x] = L[x]/100.0\n return L",
"def test_dividing(self):\n divider = Divider()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n if j != 0:\n self.assertEqual(i/j, divider.calc(j, i))",
"def seperate_list(list, division_part):\n avg = len(list) / float(division_part)\n out = []\n last = 0.0\n\n while last < len(list):\n out.append(list[int(last):int(last + avg)])\n last += avg\n return out",
"def listDivide(numbers, divide=2): \n counter = 0\n for num in numbers:\n if num % divide == 0:\n counter+=1\n return counter",
"def test_splitlist():\n lst = [4, 2, 3, 1, 6, 7]\n lt, pi, gt = splitlist(lst)\n if lt == [2, 3, 1] and pi == 4 and gt == [6, 7]:\n print(\"test splitlist OK!\")\n else:\n print(\"test splitlist Failed!\")",
"def test_divide_success(self):\n with self.assertNoLogs():\n divide_by(10, 2)",
"def listDivide(numbers, divide = 2):\n divisible_count = 0\n\n for i in numbers:\n if i % divide == 0:\n divisible_count += 1\n return divisible_count",
"def test_split(range_size, partition_size):\n dump = Mock()\n\n iterable = list(range(range_size))\n\n list(_split(partition_size=partition_size, dump=dump, iterable=iterable))\n expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))\n\n assert dump.call_count == expected_call_count",
"def test_list_int(self):\n\n result = get_avg([0,0,0,0])\n self.assertEqual(result, ZeroDivisionError)",
"def list_division(my_list_1, my_list_2, list_length):\n\n res_1 = []\n res = 0\n for n in range(0, list_length):\n try:\n res = my_list_1[n] / my_list_2[n]\n res_1.append(res)\n except ZeroDivisionError:\n print(\"division by 0\")\n res_1.append(0)\n except TypeError:\n print(\"wrong type\")\n res_1.append(0)\n except IndexError:\n print(\"out of range\")\n res_1.append(0)\n finally:\n pass\n return res_1",
"def test_divide(self):\n\n result = random.randint(2, 10)\n\n b = random.randint(100, 1000)\n a = result * b\n\n path = \"/divide/{}/{}\".format(a, b)\n\n response = self.get_response(path)\n self.assertEqual(200, response.getcode())\n\n self.assertIn(str(result).encode(), response.read())",
"def divide_list(input_list, n):\n\n avg = len(input_list) / float(n)\n last = 0.0\n divided = []\n\n while last < len(input_list):\n divided.append(input_list[int(last):int(last + avg)])\n last += avg\n\n return divided",
"def divide(numbers):\n \n result = numbers[0]\n for n in numbers[1:]:\n result = result / n\n return result",
"def verifica_element_divide_lista(numar, lista_divizori):\n for i in lista_divizori:\n if i == 0:\n return False\n if numar % i != 0:\n return False\n return True",
"def div(a, x):\n return [a[i]/x for i in range(2)]",
"def test_four_divided_by_two():\n assert divide(4, 2) == 2",
"def test_list_group(self):\n pass",
"def test_scalar_division(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = a1 / 2\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))",
"def divide(a, b):\n return a / b",
"def _divide_pred(pred):\n if type(pred) == list:\n fake = []\n real = []\n for p in pred:\n fake.append([tensor[:tensor.size(0) // 2] for tensor in p])\n real.append([tensor[tensor.size(0) // 2:] for tensor in p])\n else:\n fake = pred[:pred.size(0) // 2]\n real = pred[pred.size(0) // 2:]\n\n return fake, real",
"def test_list(self):\n pass"
] | [
"0.8978412",
"0.8315685",
"0.72600734",
"0.7163162",
"0.6961623",
"0.69122976",
"0.6898121",
"0.68613684",
"0.68511623",
"0.6756895",
"0.6750342",
"0.66283035",
"0.66096985",
"0.65842646",
"0.6559477",
"0.64508224",
"0.6335196",
"0.6329726",
"0.6322206",
"0.6280527",
"0.6270623",
"0.6246634",
"0.62314254",
"0.61926675",
"0.6124619",
"0.6065383",
"0.60648453",
"0.60194075",
"0.60072446",
"0.5985423"
] | 0.88921094 | 1 |
Drops an Operation, identified by it's Operation Id and it's children recursively Drop deletes the Operations from Database | def drop_operation(cls,operation_id):
db = cls._core.get_db()
stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;"
cur = db.query(cls._core,stmnt,(operation_id,))
for row in cur.fetchallmap():
cls.drop_operation(row["OPE_ID"])
stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;"
db.query(cls._core,stmnt,(operation_id,),commit=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cancel_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.cancel_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)",
"def remove_operation(self, name):\n\n del self.operations[name]",
"def _simple_deletion(self, operation, labels):\r\n label_strings = []\r\n for label in labels:\r\n if inspect.isclass(label) and issubclass(label, Edge):\r\n label_string = label.get_label()\r\n elif isinstance(label, Edge):\r\n label_string = label.get_label()\r\n label_strings.append(label_string)\r\n\r\n return self._delete_related(operation, label_strings)",
"def drop(self):\n j.sal.fs.removeDirTree(self._root, True)\n j.sal.fs.createDir(self._root)",
"def _simple_deletion(self, operation, labels):\n from mogwai.models.edge import Edge\n\n label_strings = []\n for label in labels:\n if inspect.isclass(label) and issubclass(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, string_types):\n label_string = label\n else:\n raise MogwaiException('traversal labels must be edge classes, instances, or strings')\n label_strings.append(label_string)\n\n future = connection.future_class()\n future_result = self._delete_related(operation, label_strings)\n\n def on_read(f2):\n try:\n result = f2.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future.set_result(result)\n\n def on_save(f):\n try:\n stream = f.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future_read = stream.read()\n future_read.add_done_callback(on_read)\n\n future_result.add_done_callback(on_save)\n\n return future",
"def delete_branch_from_db(element_id):\n from core_parser_app.components.data_structure.models import (\n DataStructureElement,\n )\n\n element = DataStructureElement.get_by_id(element_id)\n\n for child in element.children.all():\n delete_branch_from_db(str(child.pk))\n\n element.delete()",
"def test_delete_complex_tree_06(comp):\n comp.delete(9)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 4, 7, 15)",
"def test_delete_complex_tree_03(comp):\n comp.delete(15)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 12, 13, 14)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 4, 7, 9)",
"def drop(self):\n self.id = None",
"def test_delete_complex_tree_02(comp):\n comp.delete(4)\n assert tuple(comp.in_order()) == (6, 7, 8, 9, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 13, 6, 10, 12, 14, 7, 9, 15)",
"def test_delete_complex_tree_04(comp):\n comp.delete(13)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 12, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 14, 6, 10, 12, 15, 4, 7, 9)",
"def test_delete_complex_tree_07(comp):\n comp.delete(12)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 11, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 8, 14, 6, 10, 13, 15, 4, 7, 9)",
"def test_delete_complex_tree_05(comp):\n comp.delete(8)\n assert tuple(comp.in_order()) == (4, 6, 7, 9, 10, 11, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (11, 9, 13, 6, 10, 12, 14, 4, 7, 15)",
"def drop(self):\n\t\tdrop_model(self.name, self.cursor, print_info = False)",
"def test_delete_complex_tree_08(comp):\n comp.delete(11)\n assert tuple(comp.in_order()) == (4, 6, 7, 8, 9, 10, 12, 13, 14, 15)\n assert tuple(comp.breadth_first()) == (12, 8, 14, 6, 10, 13, 15, 4, 7, 9)",
"def op_delete(self, args):\n stack_level = 0\n if args != None:\n stack_level = int(args[0])\n self.require_stack(stack_level+1)\n if stack_level == None:\n self.stack.pop()\n else:\n self.stack.pop(-stack_level-1)",
"def test_handle_root_deletion(right_left_most_has_right_child):\n right_left_most_has_right_child.delete(1)\n assert tuple(right_left_most_has_right_child.in_order()) == (\n 3, 5, 6, 7, 8, 10, 20\n )",
"def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]",
"def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]",
"def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]",
"def DeleteOperation(\n self,\n request: google.longrunning.operations_pb2.DeleteOperationRequest,\n context: grpc.ServicerContext,\n ) -> google.protobuf.empty_pb2.Empty:",
"def delete_objects(data,\n path = None,\n submode_adjustor = None):\n if debug.description(): # description debugging\n print 'delete_objects', data, path\n\n if not path:\n raise error.CommandDescriptionError(\"Need path to delete an object\")\n\n data = dict(data)\n bigdb = bigsh.bigdb\n bigdb.canonicalize_values_of_path(path, data)\n\n # if the node type under configuration is a LIST\n # (or LEAF_LIST), this likely wants to add a new\n # item to the list.\n (schema, items_matched) = bigdb.schema_of_path(path, {} )\n if schema == None:\n print 'Missing Schema for', path\n return\n node_type = schema['nodeType']\n if debug.description(): # description debugging\n print 'delete_objects:', path, node_type\n\n if node_type == 'LIST':\n list_nodes = schema['listElementSchemaNode']\n selection = {}\n for key in list_nodes.get('keyNodeNames', []):\n if key in data:\n full_path = '%s/%s' % (path, key)\n selection[full_path] = data[key]\n del data[key]\n # populate for fields which are key's\n for key in list_nodes.get('keyNodeNames', []):\n if not key in selection:\n for row in command.bigsh.mode_stack:\n if 'name' in row and row['name'] == key:\n if 'obj' in row:\n selection[key] = row['obj']\n bigdb.add_mode_stack_paths(selection)\n if submode_adjustor:\n command.submode_adjustor_invoke(submode_adjustor,\n path,\n selection,\n data,\n 'delete')\n\n oper = bigdb.canonicalize_values_for_delete(path,\n data,\n selection,\n list_nodes['childNodes'])\n if oper == 'POST':\n bigdb.post(path, data, selection)\n else:\n # bigdb.delete(path, data, selection) perhaps date <- {}\n bigdb.delete(path, data, selection)\n return\n if node_type == 'LEAF_LIST':\n if debug.description(): # description debugging\n print 'delete_object: leaf-list needs implementation:LEAF_LISTN'\n selection = {}\n bigdb.add_mode_stack_paths(selection)\n leaf_node = schema['leafSchemaNode']\n type_node = leaf_node['typeSchemaNode']\n split_path = path.split('/')\n item_name = split_path[-1]\n item = None\n if item_name in data:\n item = data[item_name]\n elif type_node['name'] in data:\n item = data[type_node['name']]\n del data[type_node['name']]\n if debug.description(): # description debugging\n print 'DATUM', data, 'SELECTUM', selection, 'ITEM', item\n # Currently, 'add/delete' for specific elements isn't\n # directly support in the BigDB REST API's. \n split_path = path.split('/')\n base_path = '/'.join(split_path[:-1])\n (schema, result) = bigdb.schema_and_result(base_path, selection)\n collection = result.expect_single_result(failed_result = [])\n item_name = split_path[-1]\n if item_name in collection:\n collection = collection[item_name]\n if debug.description(): # description debugging\n print 'COLLECTION', collection, ' REMOVE ', item\n if item in collection:\n collection = [x for x in collection if x != item]\n bigdb.put(path, collection, selection, 'query')\n return\n raise error.CommandSemanticError('%s \"%s\" '\n 'not currently configured' %\n (item_name, item))\n return\n if node_type == 'CONTAINER':\n container_nodes = schema.get('childNodes')\n\n selection = {}\n bigdb.add_mode_stack_paths(selection)\n\n for (n,v) in data.items():\n oper = bigdb.canonicalize_values_for_delete(path,\n data,\n selection,\n container_nodes)\n if oper == 'PATCH':\n bigdb.patch(path, data, selection)\n else:\n item_path = '%s/%s' % (path, n)\n bigdb.delete(item_path, {}, selection)\n return\n\n bigsh.bigdb.add_mode_stack_paths(data)\n bigsh.bigdb.delete(path, data)",
"def delete(self, tree_path):\n\t\traise NotImplementedError",
"def test_deletion(basic_tree):\n tree = red_black_tree.RBTree()\n\n # 23, 4, 30, 11, 7, 34, 20, 24, 22, 15, 1\n for key, data in basic_tree:\n tree.insert(key=key, data=data)\n\n # No child\n tree.delete(15)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (7, \"7\"),\n (11, \"11\"),\n (20, \"20\"),\n (22, \"22\"),\n (23, \"23\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]\n\n # One right child\n tree.delete(7)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (11, \"11\"),\n (20, \"20\"),\n (22, \"22\"),\n (23, \"23\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]\n\n # One left child\n tree.insert(key=9, data=\"9\")\n tree.delete(11)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (9, \"9\"),\n (20, \"20\"),\n (22, \"22\"),\n (23, \"23\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]\n\n # Two children\n tree.delete(23)\n assert [item for item in tree.inorder_traverse()] == [\n (1, \"1\"),\n (4, \"4\"),\n (9, \"9\"),\n (20, \"20\"),\n (22, \"22\"),\n (24, \"24\"),\n (30, \"30\"),\n (34, \"34\"),\n ]",
"def _operation_tree(self):\n\n # initial state\n i = 0\n level = 0\n stack = []\n current = None\n\n def _create_operation(args):\n profile_stats = None\n name = args[0].strip()\n args.pop(0)\n if len(args) > 0 and \"Records produced\" in args[-1]:\n records_produced = int(\n re.search(\"Records produced: (\\\\d+)\", args[-1]).group(1)\n )\n execution_time = float(\n re.search(\"Execution time: (\\\\d+.\\\\d+) ms\", args[-1]).group(1)\n )\n profile_stats = ProfileStats(records_produced, execution_time)\n args.pop(-1)\n return Operation(\n name, None if len(args) == 0 else args[0].strip(), profile_stats\n )\n\n # iterate plan operations\n while i < len(self.plan):\n current_op = self.plan[i]\n op_level = current_op.count(\" \")\n if op_level == level:\n # if the operation level equal to the current level\n # set the current operation and move next\n child = _create_operation(current_op.split(\"|\"))\n if current:\n current = stack.pop()\n current.append_child(child)\n current = child\n i += 1\n elif op_level == level + 1:\n # if the operation is child of the current operation\n # add it as child and set as current operation\n child = _create_operation(current_op.split(\"|\"))\n current.append_child(child)\n stack.append(current)\n current = child\n level += 1\n i += 1\n elif op_level < level:\n # if the operation is not child of current operation\n # go back to it's parent operation\n levels_back = level - op_level + 1\n for _ in range(levels_back):\n current = stack.pop()\n level -= levels_back\n else:\n raise Exception(\"corrupted plan\")\n return stack[0]",
"def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])",
"def drop(self):\n for step in self.steps:\n step[1].drop()",
"def drop(self):\n pass",
"def drop(self):\n pass"
] | [
"0.68534726",
"0.59218234",
"0.5495888",
"0.5355895",
"0.5291793",
"0.5289697",
"0.52653944",
"0.52528286",
"0.5250576",
"0.52433366",
"0.5235218",
"0.5228821",
"0.5226976",
"0.522246",
"0.5191076",
"0.5159619",
"0.5126623",
"0.50397605",
"0.50218856",
"0.50218856",
"0.50218856",
"0.5013367",
"0.5005075",
"0.4987348",
"0.49844423",
"0.49713787",
"0.49689227",
"0.4968176",
"0.49651787",
"0.49651787"
] | 0.79740757 | 0 |
Resets the state of an operation and it's children recursively to 0 (PENDING) The operation is identified by a given operationId | def retry_operation(cls,operation_id):
db = cls._core.get_db()
stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;"
cur = db.query(cls._core,stmnt,(operation_id,))
for row in cur.fetchallmap():
cls.retry_operation(row["OPE_ID"])
stmnt = "UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;"
db.query(cls._core,stmnt,(operation_id,),commit=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cancel_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.cancel_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def resetOperationCount():\n global _operationCount\n _countLock.acquire()\n try:\n _operationCount = 0\n finally:\n _countLock.release()",
"def drop_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.drop_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def reset_tree() -> None:\n global task_tree\n task_tree = TaskTreeNode(NoOperation())\n task_tree.start_time = datetime.datetime.now()\n task_tree.status = TaskStatus.RUNNING",
"def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None",
"def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)",
"def setOperationId(self, opid) :\n self.operation_id = opid",
"def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()",
"def reset(self):\n # The apply(f) method recursively calls f on itself and all children\n self.apply(self._reset_module)",
"def reset(self) -> None:\r\n self.tree.delete(*self.tree.get_children())",
"def operation_state(self, operation_state):\n\n self._operation_state = operation_state",
"def reset(self):\r\n self.key = None\r\n self.value = None\r\n self.parent = None\r\n self.left_child = None\r\n self.right_child = None\r\n self.color = BLACK\r\n self.size_tree = 0",
"def reset(self):\n self.state = EvaluationState.ready\n\n for child in self.children:\n if hasattr(child, \"reset\"):\n child.reset()",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r",
"def reset(self):\n self.undo_stack = Stack(self.undo_stack_size)\n self.redo_stack[:] = []\n self.not_undoable_action = False\n self.undo_in_progress = False",
"def _reset_tree_ids(self, start_id: int):\n\n add_id = start_id - self.min_tree_id()\n self.tree_ids = [tree_id + add_id for tree_id in self.tree_ids]",
"def clear(self):\n self.beginResetModel()\n self.root_item = RootItem()\n self.requests_items = {}\n self.endResetModel()",
"def update_root(self, action: Action) -> \"MonteCarloSearchTree\":\n if action in self._root.children:\n new_root = self._root.children[action]\n else:\n new_root = self._root.add_child(action)\n self._root.remove_child(new_root)\n self._root = new_root\n return self",
"def reset(self, state: nx.Graph = None):\n if state is None:\n self._state = self.init_mol\n else:\n self._state = state\n\n self.action_space.update_actions(self._state, self.observation_space)\n if self.record_path:\n self._path = [self._state]\n self._counter = 0",
"def reset_state(self):\n self.intersection_per_class.assign(\n tf.zeros_like(self.intersection_per_class)\n )\n self.union_per_class.assign(tf.zeros_like(self.union_per_class))",
"def on_ResetNode_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def clear(self):\n self.root = None",
"def reset() -> None:\n Invocation.active = {}\n Invocation.current = None # type: ignore\n Invocation.top = Invocation(None, None)\n Invocation.top._become_current() # pylint: disable=protected-access\n Invocation.up_to_date = {}\n Invocation.phony = set()\n Invocation.poisoned = set()\n Invocation.actions_count = 0\n Invocation.skipped_count = 0",
"def reset(self):\n self._open_activity_count = 0\n self._decisions = []\n self._tasks = TaskRegistry()",
"def reset_workflow(self, new_id):\n with self._driver.session() as session:\n session.write_transaction(tx.reset_tasks_metadata)\n session.write_transaction(tx.reset_workflow_id, new_id=new_id)",
"def clear_state(self):\n super().clear_state()\n self.pid = 0",
"def reset(self):\n for parent in self.GetParents():\n parent.reset()"
] | [
"0.6283896",
"0.55792785",
"0.5552252",
"0.55377895",
"0.55284655",
"0.53993994",
"0.5203521",
"0.5042159",
"0.50319934",
"0.50242394",
"0.5023247",
"0.5015806",
"0.50039274",
"0.4977529",
"0.4977529",
"0.4977529",
"0.4977529",
"0.48844925",
"0.4862212",
"0.48437867",
"0.48415264",
"0.48373774",
"0.4833321",
"0.4814799",
"0.48086548",
"0.47904965",
"0.47648314",
"0.47600612",
"0.47410592",
"0.47050318"
] | 0.6371056 | 0 |
Cancels an Operation, identified by it's Operation Id and it's children recursively Cancel Deletes the Operation from Database | def cancel_operation(cls,operation_id):
db = cls._core.get_db()
stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;"
cur = db.query(cls._core,stmnt,(operation_id,))
for row in cur.fetchallmap():
cls.cancel_operation(row["OPE_ID"])
stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;"
db.query(cls._core,stmnt,(operation_id,),commit=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drop_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.drop_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]",
"def CancelOperation(\n self,\n request: google.longrunning.operations_pb2.CancelOperationRequest,\n context: grpc.ServicerContext,\n ) -> google.protobuf.empty_pb2.Empty:",
"def do_cancel(order):\r\n self.gox.cancel(order.oid)",
"def _do_cancel(self):\r\n\r\n def do_cancel(order):\r\n \"\"\"cancel a single order\"\"\"\r\n self.gox.cancel(order.oid)\r\n\r\n if not len(self.items):\r\n return\r\n if not len(self.selected):\r\n order = self.items[self.item_sel]\r\n do_cancel(order)\r\n else:\r\n for order in self.selected:\r\n do_cancel(order)",
"def cancel(self):\n self.session.rollback()",
"def retry_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.retry_operation(row[\"OPE_ID\"])\n\n stmnt = \"UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def cancelarOperacion(self):\n\n ok=QtGui.QMessageBox.warning(self,\"Aviso\",\"¿Desea cancelar la operación?\",\\\n QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok)\n if ok == QtGui.QMessageBox.Ok:\n if self.factura != None:\n self.factura.anular()\n for detalle in self.lotesVentas:\n for loteVenta in self.lotesVentas[detalle]:\n loteVenta[0].aumentarCantidad(loteVenta[1])\n loteVenta[0].modificar(self.sesion)\n detalle.eliminarLotesAsociados(self.sesion)\n detalle.borrar(self.sesion)\n self.objectModified.emit()\n self.limpiarVentana()",
"def DeleteOperation(\n self,\n request: google.longrunning.operations_pb2.DeleteOperationRequest,\n context: grpc.ServicerContext,\n ) -> google.protobuf.empty_pb2.Empty:",
"def test_transaction_explitic_canceling(self):\n\n proxy = self.node.get_proxy('/')\n\n # look under the hood to verify that branches are added\n # recursively\n _latest_root_rev = self.node._branches[None].latest\n adapter_node = _latest_root_rev._children['adapters'][2].node\n self.assertEqual(len(self.node._branches.keys()), 1)\n self.assertEqual(len(adapter_node._branches.keys()), 1)\n\n tx = proxy.open_transaction()\n self.assertEqual(len(self.node._branches.keys()), 2)\n self.assertEqual(len(adapter_node._branches.keys()), 1)\n\n self.make_change(tx, '/adapters/2', 'config.log_level', 4)\n\n self.assertEqual(len(self.node._branches.keys()), 2)\n self.assertEqual(len(adapter_node._branches.keys()), 2)\n\n tx.cancel()\n\n self.assertEqual(len(self.node._branches.keys()), 1)\n self.assertEqual(len(adapter_node._branches.keys()), 1)",
"def CancelOperation(self, operation_ref):\n request = self.messages.NetappProjectsLocationsOperationsCancelRequest(\n name=operation_ref.RelativeName())\n return self.client.projects_locations_operations.Cancel(request)",
"def cancel(self, comment=None):\n payload = {\n \"Comment\": comment\n }\n qry = ServiceOperationQuery(self, \"cancel\", None, payload)\n self.context.add_query(qry)\n return self",
"def cancel_operation(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n self.proceed = False\n self.entry_view.destroy()",
"def cancel(bot, update):\n bot.sendMessage(chat_id=update.message.chat_id, text=\"As you wish, the operation has been cancelled! 😊\")\n return ConversationHandler.END",
"def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]",
"def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]",
"def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]",
"def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)",
"def cancel(self):\n if not self.parent_node.is_job:\n return\n\n # First perform clean operation\n self.clean()\n\n self.winstance.send_event('Cancelling job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.cancel',\n kwargs={\"name\": self.name})\n self.winstance.send_event('.. job canceled')\n result.task.wait_for_terminated()\n\n self._status = 'CANCELLED'",
"def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument('orderId', type=int, required=True,\n help='Order ID to cancel')\n args = parser.parse_args()\n return sync.cancel_order(args['orderId'])",
"def _execute_cancel(self) -> None:\n # validation\n if self.position.is_open:\n raise Exception('cannot cancel orders when position is still open. there must be a bug somewhere.')\n\n logger.info('cancel all remaining orders to prepare for a fresh start...')\n\n self.broker.cancel_all_orders()\n\n self._reset()\n\n self._broadcast('route-canceled')\n\n self.on_cancel()\n\n if not jh.is_unit_testing() and not jh.is_live():\n store.orders.storage[f'{self.exchange}-{self.symbol}'].clear()",
"def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True",
"def _order_cancel(self, bo):\n log.info(\"bo_blotter: order_cancel bracket order bo#%s\" % bo.ticket) \n cancelled = bo.cancel()\n return(cancelled)",
"def cancel_order(self, walletId, orderId):\n return",
"def _simple_deletion(self, operation, labels):\n from mogwai.models.edge import Edge\n\n label_strings = []\n for label in labels:\n if inspect.isclass(label) and issubclass(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, string_types):\n label_string = label\n else:\n raise MogwaiException('traversal labels must be edge classes, instances, or strings')\n label_strings.append(label_string)\n\n future = connection.future_class()\n future_result = self._delete_related(operation, label_strings)\n\n def on_read(f2):\n try:\n result = f2.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future.set_result(result)\n\n def on_save(f):\n try:\n stream = f.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future_read = stream.read()\n future_read.add_done_callback(on_read)\n\n future_result.add_done_callback(on_save)\n\n return future",
"def cancel(self):\n import googleapiclient\n\n # projects.locations.operations/cancel\n operations = self._api.projects().locations().operations()\n\n for job in self.active_jobs:\n request = operations.cancel(name=job.jobname)\n logger.debug(\"Cancelling operation {}\".format(job.jobid))\n try:\n self._retry_request(request)\n except (Exception, BaseException, googleapiclient.errors.HttpError):\n continue\n\n self.shutdown()",
"def cancel(self, cr, uid, ids, notes='', context=None):\n return self.write(cr, uid, ids, {'state':'cancel'}, context=context)",
"def cancel(self, id):\n self.__init_client()\n order = self.get_open_order(id)\n\n if order is None:\n return False\n\n try:\n retry(lambda: self.client.futures_cancel_order(symbol=self.pair, origClientOrderId=order['clientOrderId']))\n except HTTPNotFound:\n return False\n logger.info(f\"Cancel Order : (clientOrderId, type, side, quantity, price, stop) = \"\n f\"({order['clientOrderId']}, {order['type']}, {order['side']}, {order['origQty']}, \"\n f\"{order['price']}, {order['stopPrice']})\")\n return True"
] | [
"0.66223377",
"0.63557035",
"0.63557035",
"0.63557035",
"0.62062633",
"0.5849474",
"0.5820822",
"0.57708067",
"0.5647814",
"0.56469494",
"0.55416864",
"0.55382013",
"0.54923594",
"0.54598767",
"0.5451226",
"0.5448454",
"0.54425424",
"0.54425424",
"0.54425424",
"0.5434237",
"0.5426428",
"0.5382236",
"0.5362991",
"0.5299589",
"0.5277859",
"0.5275019",
"0.5255635",
"0.5233946",
"0.522269",
"0.5209626"
] | 0.8243501 | 0 |
Restore an Operationobject stored in the database by a Dataset consisting of | def restore_operation(cls, operation_record):
classname = operation_record["OPE_TYPE"]
module = "" #TODO Implement modulename from database if Operation belongs to Module
is_operation_of_module = False
exec """
try:
type(%(class)s)
except NameError,e:
is_operation_of_module = True"""%{'class':classname}
if is_operation_of_module:
exec """
from %(module)s import %(class)s
operation = %(class)s(cls._core)"""%{'class':classname,'module':module}
else:
exec """
operation = %(class)s(cls._core)"""%{'class':classname}
operation.set_id(operation_record['OPE_ID'])
db = cls._core.get_db()
stmnt = "SELECT OPD_KEY, OPD_VALUE, OPD_TYPE FROM OPERATIONDATA WHERE OPD_OPE_ID = ? ;"
cur = db.query(cls._core,stmnt,(operation_record["OPE_ID"],))
for row in cur.fetchallmap():
val = row["OPD_VALUE"]
exec """val = %s(val)"""%row["OPD_TYPE"]
operation.set_value(row["OPD_KEY"], val)
return operation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restore(self, oid, serial, data, version, prev_txn, transaction):\n assert not version\n self._check_trans(transaction, 'restore')\n self._async('restorea', oid, serial, data, prev_txn, id(transaction))",
"def restore(self, checkpoint):\n raise NotImplementedError",
"def mos_object(self):\n return self._restore_fn(*self._restore_args)",
"def restore(self, obj):\n return obj",
"def _restore(self, graph):\n raise NotImplementedError()",
"def restore(self):\n self.abstract_obj.restore()",
"def restore_data(self):\n self.R = self._Ro\n del self._Ro",
"def restore(self, restore):\n self._restore = restore",
"def restore(self):\n raise NotImplementedError",
"def restore(self, ids):\n with self._db_connection(transaction=True) as transaction:\n for id_ in ids:\n transaction.restore_dataset(id_)",
"def load(cls,data, recovery_mode = False):\n opid = _read_delimited_field(data)\n operation_type = _read_delimited_field(data)\n modlogger.debug( \"loading: %s,%s\"%(opid,operation_type))\n return _operation_type_map[operation_type].load(opid,data, recovery_mode = recovery_mode)",
"def persist(cls, dataset):\n return dataset",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def _restore(self, a_path):\n super(RDPAnalyzer, self)._restore(a_path)\n self._model._restore()",
"def test_restore(self):\n s = Source([[10, 10], [10, 20]], values=[1.0, 2.0])\n assert(array_equal(s.center, [10, 15]))\n\n assert(\"center\" in s.__dict__.keys())\n s.restore()\n assert(\"center\" not in s.__dict__.keys())\n\n assert(array_equal(s.center, [10, 15]))\n assert(\"center\" in s.__dict__.keys())\n s.restore(skip=\"center\")\n assert(\"center\" in s.__dict__.keys())",
"def __restoreBackup(self):\n pass #FIXME!!!",
"def restore(self, session, **attrs):\n body = {\"instance\": {\"restorePoint\": {\"backupRef\": self.id}}}\n body.update(attrs)\n resp = session.post('instances', service=self.service, json=body).body\n return resp['instance']",
"def test_backup_restore_with_ops(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n initial_gen = copy.deepcopy(gen)\n initial_keys = []\n for x in initial_gen:\n initial_keys.append(x[0])\n self.log.info(\"Start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.log.info(\"Create backup repo \")\n self.backup_create()\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n\n if self.compact_backup and self.ops_type == \"delete\":\n self.log.info(\"Start to compact backup \")\n self.backup_compact_validate()\n self.log.info(\"Validate deleted keys\")\n self.backup_compact_deleted_keys_validation(initial_keys)\n\n self.log.info(\"start restore cluster \")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n self.backupset.start = start\n self.backupset.end = end\n self._backup_restore_with_ops(backup=False, compare_function=\">=\")\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"",
"def restore_to_database(\r\n self,\r\n objects_to_restore=None,\r\n destination_client=None,\r\n sf_options=None):\r\n file_restore_option = {}\r\n\r\n if sf_options is None:\r\n sf_options = {}\r\n\r\n # check if client name is correct\r\n if destination_client is None:\r\n destination_client = self._backupset_object._instance_object.proxy_client\r\n\r\n if isinstance(destination_client, Client):\r\n dest_client = destination_client\r\n elif isinstance(destination_client, basestring):\r\n dest_client = Client(self._commcell_object, destination_client)\r\n else:\r\n raise SDKException('Subclient', '105')\r\n\r\n if not ('db_host_name' in sf_options and\r\n 'db_instance' in sf_options and\r\n 'db_name' in sf_options and\r\n 'db_user_name' in sf_options and\r\n 'db_user_password' in sf_options):\r\n raise SDKException('Salesforce', '101')\r\n\r\n # set the destination client\r\n file_restore_option[\"client_name\"] = dest_client.client_name\r\n file_restore_option[\"destination_path\"] = sf_options.get(\r\n \"destination_path\", self._backupset_object.download_cache_path\r\n )\r\n\r\n self._restore_destination_json(file_restore_option)\r\n\r\n # process the objects to restore\r\n if isinstance(objects_to_restore, list):\r\n objects_to_restore_list = objects_to_restore\r\n\r\n else:\r\n objects_to_restore_list = [objects_to_restore]\r\n\r\n file_restore_option[\"paths\"] = []\r\n browse_files, _ = self.browse(\r\n path='/Objects',\r\n from_time=sf_options.get(\"from_time\", 0),\r\n to_time=sf_options.get(\"to_time\", 0)\r\n )\r\n\r\n for each_object in objects_to_restore_list:\r\n if each_object.find('/Files') < 0:\r\n file_restore_option[\"paths\"].append(\r\n self.check_object_in_browse(\"%s\" % each_object, browse_files)\r\n )\r\n\r\n # set the salesforce options\r\n file_restore_option[\"staging_path\"] = sf_options.get(\r\n \"destination_path\", self._backupset_object.download_cache_path\r\n )\r\n file_restore_option[\"dependent_level\"] = sf_options.get(\"dependent_level\", 0)\r\n file_restore_option[\"streams\"] = sf_options.get(\"streams\", 2)\r\n file_restore_option[\"to_fs\"] = False\r\n file_restore_option[\"db_enabled\"] = True\r\n file_restore_option[\"db_type\"] = sf_options.get(\"db_type\", 'SQLSERVER')\r\n file_restore_option[\"db_host_name\"] = sf_options.get(\"db_host_name\", \"\")\r\n file_restore_option[\"db_instance\"] = sf_options.get(\"db_instance\", \"\")\r\n file_restore_option[\"db_name\"] = sf_options.get(\"db_name\", \"autorestoredb\")\r\n file_restore_option[\"db_port\"] = sf_options.get(\"db_port\", '1433')\r\n file_restore_option[\"db_user_name\"] = sf_options.get(\"db_user_name\", 'sa')\r\n db_base64_password = b64encode(sf_options['db_user_password'].encode()).decode()\r\n file_restore_option[\"db_user_password\"] = db_base64_password\r\n file_restore_option[\"override_table\"] = sf_options.get(\"override_table\", True)\r\n\r\n # set the browse option\r\n file_restore_option[\"copy_precedence_applicable\"] = True\r\n file_restore_option[\"copy_precedence\"] = sf_options.get(\"copy_precedence\", 0)\r\n file_restore_option[\"from_time\"] = sf_options.get(\"from_time\", 0)\r\n file_restore_option[\"to_time\"] = sf_options.get(\"to_time\", 0)\r\n\r\n # prepare and execute the Json\r\n request_json = self._prepare_salesforce_restore_json(file_restore_option)\r\n\r\n return self._process_restore_response(request_json)",
"def revert(self):\n if not 'savepoint' in self._cache:\n w = \"No saved session DataSet file found!\"\n warnings.warn(w)\n return None\n self._meta, self._data = self._cache['savepoint']\n print('Reverted to last savepoint of {}'.format(self.name))\n return None",
"def restore(self):\n return self._restore",
"def _load_restored(self, dataset_path):\n for group in ['knowledge', 'source', 'target']:\n if getattr(self, group + '_format') != 'none':\n text_data = load_restored(dataset_path, group + '.', ignore_file='vocab')[0]\n setattr(self, group + '_text_data', text_data)\n idx2token, token2idx = load_restored(dataset_path, ignore_file='data')\n setattr(self, 'idx2token', idx2token)\n setattr(self, 'token2idx', token2idx)\n self.max_vocab_size = len(self.idx2token)\n self.logger.info(\"Restore finished!\")",
"def restore_object(self):\n self.co_worker_list = self.original_co_worker_list",
"def _restore_sub_obj(\n self,\n attr_name: pathlib.Path\n ):\n return pickle.load(attr_name.open(mode=\"rb\"))",
"def _Restore(self) -> None:\n self._SetNodes(self._nodes)",
"def _restore(self, restore_folder):\n tf.reset_default_graph()\n self.init_session()\n ckpt = tf.train.get_checkpoint_state(restore_folder)\n self.saver = tf.train.import_meta_graph('{}.meta'.format(ckpt.model_checkpoint_path))\n self.saver.restore(self.sess, ckpt.model_checkpoint_path)\n print(\"Model restored from {}\".format(restore_folder))",
"def backup_dataset(outfile=None):\n return backup_es(Dataset, outfile=outfile)",
"def restore(self, snapshot):\n self.unit_name = snapshot[\"unit_name\"]",
"def archive_mds_ops_data(self, lmtdb):\n\n # mapping between OPERATION_INFO.OPERATION_NAME to HDF5 dataset names\n opname_to_dataset_name = {\n 'open': 'mdtargets/opens',\n 'close': 'mdtargets/closes',\n 'mknod': 'mdtargets/mknods',\n 'link': 'mdtargets/links',\n 'unlink': 'mdtargets/unlinks',\n 'mkdir': 'mdtargets/mkdirs',\n 'rmdir': 'mdtargets/rmdirs',\n 'rename': 'mdtargets/renames',\n 'getxattr': 'mdtargets/getxattrs',\n 'statfs': 'mdtargets/statfss',\n 'setattr': 'mdtargets/setattrs',\n 'getattr': 'mdtargets/getattrs',\n }\n dataset_names = list(opname_to_dataset_name.values())\n\n self.init_datasets(dataset_names, lmtdb.mds_names)\n\n results, columns = lmtdb.get_mds_ops_data(self.query_start, self.query_end_plusplus)\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'MDS_ID', 'OPERATION_ID', 'SAMPLES']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n\n # figure out the dataset this row's data will go into (this\n # implicitly filters out operations that aren't defined in\n # opname_to_dataset_name)\n op_name = lmtdb.mds_op_id_map[row[col_map['OPERATION_ID']]]\n dataset_name = opname_to_dataset_name.get(op_name)\n if dataset_name is None:\n continue\n\n # figure out which column (MDS name) this row's data will go into\n mds_name = lmtdb.mds_id_map.get(row[col_map['MDS_ID']])\n if not mds_name:\n errmsg = \"unknown MDS_ID %s\" % row[col_map['MDS_ID']]\n warnings.warn(errmsg)\n continue\n\n self[dataset_name].insert_element(\n timestamp,\n mds_name,\n row[col_map['SAMPLES']])",
"def restore(self):\n\t\treturn Job(SDK.PrlVm_Restore(self.handle)[0])"
] | [
"0.6201652",
"0.6147818",
"0.5943375",
"0.5855127",
"0.5704961",
"0.56648844",
"0.56335723",
"0.55953205",
"0.55918145",
"0.55536777",
"0.55432135",
"0.5536391",
"0.5470216",
"0.54262424",
"0.539617",
"0.5361314",
"0.5206639",
"0.51915765",
"0.5184326",
"0.5164573",
"0.51495296",
"0.51482344",
"0.5126082",
"0.51207787",
"0.51192796",
"0.5117878",
"0.51160187",
"0.51050395",
"0.51037204",
"0.5085244"
] | 0.6955749 | 0 |
Recursively executes the workloads of Operation's Childoperations It hereby catches exceptions in the workloads, sets the OPE_STATUS to 2 (FAILED) if a catch occurs, then passes the exception on to the higher layer. If an Operation succeeds, it's entry in DB gets deleted | def process_children(cls, operation):
db = cls._core.get_db()
stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;"
stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;"
cur = db.query(cls._core,stmnt,(operation.get_id(),))
for row in cur.fetchallmap():
child_operation = cls.restore_operation(row)
db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)
try:
cls.process_children(child_operation)
child_operation.do_workload()
except Exception,e:
stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;"
db.query(cls._core,stmnt_err,(int(row["OPE_ID"]),),commit=True)
#TODO GENERATE ERROR IN LOG
raise e
stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_ID = ?;"
db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def retry_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.retry_operation(row[\"OPE_ID\"])\n\n stmnt = \"UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def cancel_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.cancel_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def process_next(cls):\n db = cls._core.get_db()\n configuration = cls._core.get_configuration()\n if os.path.exists(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\"):\n return False\n lockfile = open(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\",\"w\")\n lockfile.close()\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 \\\n WHERE OPE_ID IN ( \\\n SELECT OPE_ID FROM OPERATIONS \\\n WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0 \\\n AND OPE_INVOKED = ( \\\n SELECT MIN(OPE_INVOKED) FROM OPERATIONS \\\n WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0) \\\n ) ;\"\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 1 ;\"\n db.query(cls._core,stmnt_lock,commit=True)\n cur = db.query(cls._core,stmnt)\n res = cur.fetchallmap()\n if len(res) > 0:\n operation = cls.restore_operation(res[0])\n try:\n cls.process_children(operation)\n operation.do_workload()\n except Exception, e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(operation.get_id(),),commit=True)\n error = StringIO()\n print_exc(None,error)\n cls._core.log(error.getvalue())\n ret = True\n else:\n ret = False\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_STATUS = 1 ;\"\n db.query(cls._core,stmnt_delete,commit=True)\n db.commit()\n try:\n os.unlink(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\")\n except OSError,e :\n raise OperationException(OperationException.get_msg(0))\n return ret",
"def drop_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.drop_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def execute(self,data):\n\n try:\n\n start = time.time()\n\n self.cursor.executemany(self.operation, data)\n\n end = time.time()\n\n logger.info(\"Operation [{}] took {:.3f} seconds; {} operations processed\".format(self.operation, end-start, len(data)))\n\n except Exception, exc:\n\n # Not so typical: handle integrity constraints (generate warnings)\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n self.conn.rollback()\n\n for record in data:\n\n try:\n self.cursor.execute(self.operation, record)\n self.conn.commit()\n\n except Exception, exc:\n\n # This record is the culprit\n if exc.__class__.__name__ != \"IntegrityError\":\n logger.error(\"Exception [{}] occurred inserting record {}\".format(exc.message, record))\n logger.error(\"Operation was: {}\".format(self.operation))\n raise\n\n error_msg = str(exc.message).rstrip()\n logger.warn( \"Integrity error (\\\"{}\\\"); data={}\".format(error_msg, record) )\n\n else:\n # If all goes well, we just need a single commit\n self.conn.commit()",
"def _recover(self,):\n modlogger.debug( \"starting recovery\")\n with self.id_lock: #Prevent new ops being created.\n logs = [ LogFile(x,readonly=True) for x in self._findlogs() ]\n logiter = [ iter(x) for x in logs ]\n ops = [ _getop(x) for x in logiter ]\n opids = [ _getid(x) for x in ops ]\n #order the log files by operation Id.\n data = sorted(zip(logs,logiter,ops,opids),key =lambda x:x[3])\n modlogger.debug( \"SR:%s\"%data)\n #And now got through all log files in Id order\n state = 'init'\n unrecoverable = []\n for log,it,op,opid in data:\n for cur_op in chain([op],it):\n #cur_op None indicated end of that logfile.\n if cur_op is None: break\n\n #We ignore any ops until we see a 'startTxn' marker, but we\n # keep a record of there ids to ensure we see a later checkpoint.\n # if we don't we can't replay partial Txn.\n modlogger.debug( \"R:%s,%s\",cur_op,state)\n if state=='init':\n #Record all operations we see before we see the first\n #start tx marker.\n if cur_op.optype == b'start_txn':\n state='txcomplete'\n elif cur_op.optype == b'abort_txn':\n #If the partial transaction we found was aborted\n # we don't need to worry about its operations. \n unrcoverable = [ ]\n elif cur_op.optype == b'Checkpoint':\n unrecoverable = _remove_commited(unrecoverable,cur_op.opid)\n else:\n unrecoverable += [ op.opid]\n \n\n #We are looking for a starttxn, marker to mark the operation\n #as valid. The only other meaningful transaction in the\n #journal in the state is a checkpoint making which ops have been\n #detected as committed to the main store by the FS.\n if state=='txcomplete':\n if cur_op.optype == b'start_txn':\n tx = cur_op.txn_id\n txops = [ ]\n state = 'txstarted'\n continue\n elif cur_op.optype == b'Checkpoint':\n unrecoverable = _remove_commited(unrecoverable,cur_op.opid)\n else: raise RecoveryError(\"Operation outside tx\")\n\n #In this state all operations are meaningful.\n # we store all operations (except checkpoint) until we see\n # a EndTxn op. At the end TxnOp we synchronously complete\n # all operations.\n if state =='txstarted':\n if cur_op.optype == b'end_txn': \n #The test below finds 'overlapped' tx, (or ones missing a commit record\n #for some reason. This forces us not to accept this log file.\n if cur_op.txn_id != tx: raise RecoveryError(\"Non matching Tx commit found\")\n else:\n for top in txops:\n top.do(sync = True)\n state = 'txcomplete'\n elif cur_op.optype == b'abort_txn':\n state = 'txcomplete'\n elif cur_op.optype == b'Checkpoint':\n unrecoverable = _remove_commited(unrecoverable,cur_op.opid)\n else:\n txops += [ cur_op ] \n #Log file has been processed successfully - remove it from the Fs.\n #we could call close() here and reused the allocated space on the\n #FS - but the logfile is readonly - and close() adds a terminator\n #to mark the file as empty.\n try:\n log.unlink()\n except OSError: pass\n\n #If there are any partial txn's left we have failed to recover.\n if unrecoverable: raise RecoveryError(\"Partial uncommitted txn found\")",
"def _simple_deletion(self, operation, labels):\n from mogwai.models.edge import Edge\n\n label_strings = []\n for label in labels:\n if inspect.isclass(label) and issubclass(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, string_types):\n label_string = label\n else:\n raise MogwaiException('traversal labels must be edge classes, instances, or strings')\n label_strings.append(label_string)\n\n future = connection.future_class()\n future_result = self._delete_related(operation, label_strings)\n\n def on_read(f2):\n try:\n result = f2.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future.set_result(result)\n\n def on_save(f):\n try:\n stream = f.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future_read = stream.read()\n future_read.add_done_callback(on_read)\n\n future_result.add_done_callback(on_save)\n\n return future",
"def execute(self, ops, exceptions=[], delay=5, maxretries=3):\n retry_errors = [NFS4ERR_DELAY, NFS4ERR_GRACE]\n state_errors = [NFS4ERR_STALE_CLIENTID, NFS4ERR_BADSESSION,\n NFS4ERR_BADSLOT, NFS4ERR_DEADSESSION]\n while True:\n res = self.sess.compound(ops)\n if res.status == NFS4_OK or res.status in exceptions:\n return res\n elif res.status in retry_errors:\n if maxretries > 0:\n maxretries -= 1\n time.sleep(delay)\n else:\n log.error(\"Too many retries with DS %s\" % self.server)\n raise Exception(\"Dataserver communication retry error\")\n elif res.status in state_errors:\n self.disconnect()\n self.connect()\n else:\n log.error(\"Unhandled status %s from DS %s\" %\n (nfsstat4[res.status], self.server))\n raise Exception(\"Dataserver communication error\")",
"def _executeOperation(self, request:CSERequest, reqRi:str) -> Result:\n\t\t# Execute the actual operation\n\t\trequest.args.operation == Operation.RETRIEVE and (operationResult := CSE.dispatcher.processRetrieveRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.CREATE and (operationResult := CSE.dispatcher.processCreateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.UPDATE and (operationResult := CSE.dispatcher.processUpdateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.DELETE and (operationResult := CSE.dispatcher.processDeleteRequest(request, request.headers.originator)) is not None\n\n\t\t# Retrieve the <request> resource\n\t\tif (res := CSE.dispatcher.retrieveResource(reqRi)).resource is None:\t\n\t\t\treturn Result(status=False) \t\t\t\t\t\t\t\t\t\t\t\t\t\t# No idea what we should do if this fails\n\t\treqres = res.resource\n\n\t\t# Fill the <request>\n\t\treqres['ors'] = {\t# operationResult\n\t\t\t'rsc'\t: operationResult.rsc,\n\t\t\t'rqi'\t: reqres.rid,\n\t\t\t'to'\t: request.id,\n\t\t\t'fr'\t: reqres.org,\n\t\t\t'ot'\t: reqres['mi/ot'],\n\t\t\t'rset'\t: reqres.et\n\t\t}\n\t\tif operationResult.rsc in [ RC.OK, RC.created, RC.updated, RC.deleted ] :\t\t\t# OK, created, updated, deleted -> resource\n\t\t\treqres['rs'] = RequestStatus.COMPLETED\n\t\t\tif operationResult.resource is not None:\n\t\t\t\treqres['ors/pc'] = operationResult.resource.asDict()\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Error\n\t\t\treqres['rs'] = RequestStatus.FAILED\n\t\t\tif operationResult.dbg is not None:\n\t\t\t\treqres['ors/pc'] = { 'm2m:dbg' : operationResult.dbg }\n\n\t\t# Update in DB\n\t\treqres.dbUpdate()\n\n\t\treturn Result(resource=reqres, status=True)",
"def rollback(self, stage, enodes, exception):",
"def main():\n session = create_session()\n delete_orphans(session)\n check_children(session)\n session.commit()",
"def _run_operations(self):\n # get job informations\n job = self.active_queue[0]\n job_id = job[\"id\"]\n job_logdir = self._create_logdir(job_id)\n\n for operation in self.config[\"operations\"]:\n self._acquire_lock(job_id + \",\" + operation)\n self._run_operation(operation, job_logdir)\n self._release_lock(job_id + \",\" + operation)\n\n files_to_archives = [job[\"objects_filename\"], job[\"config_filename\"]]\n self._archive_logs(job_logdir, files_to_archives)\n self._update_state(job_id)",
"def __call__(self, node, operations, last_operation):\n if last_operation == NO_OPERATION:\n return 0\n return 1",
"def _operation_tree(self):\n\n # initial state\n i = 0\n level = 0\n stack = []\n current = None\n\n def _create_operation(args):\n profile_stats = None\n name = args[0].strip()\n args.pop(0)\n if len(args) > 0 and \"Records produced\" in args[-1]:\n records_produced = int(\n re.search(\"Records produced: (\\\\d+)\", args[-1]).group(1)\n )\n execution_time = float(\n re.search(\"Execution time: (\\\\d+.\\\\d+) ms\", args[-1]).group(1)\n )\n profile_stats = ProfileStats(records_produced, execution_time)\n args.pop(-1)\n return Operation(\n name, None if len(args) == 0 else args[0].strip(), profile_stats\n )\n\n # iterate plan operations\n while i < len(self.plan):\n current_op = self.plan[i]\n op_level = current_op.count(\" \")\n if op_level == level:\n # if the operation level equal to the current level\n # set the current operation and move next\n child = _create_operation(current_op.split(\"|\"))\n if current:\n current = stack.pop()\n current.append_child(child)\n current = child\n i += 1\n elif op_level == level + 1:\n # if the operation is child of the current operation\n # add it as child and set as current operation\n child = _create_operation(current_op.split(\"|\"))\n current.append_child(child)\n stack.append(current)\n current = child\n level += 1\n i += 1\n elif op_level < level:\n # if the operation is not child of current operation\n # go back to it's parent operation\n levels_back = level - op_level + 1\n for _ in range(levels_back):\n current = stack.pop()\n level -= levels_back\n else:\n raise Exception(\"corrupted plan\")\n return stack[0]",
"def test_sub_doc_with_process_crash(self):\n if self.num_replicas < 2:\n self.assertTrue(False, msg=\"Required: num_replicas > 1\")\n\n # Override num_of_nodes affected to 1\n self.num_nodes_affected = 1\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n def_bucket = self.cluster.buckets[0]\n\n self.load_data_for_sub_doc_ops()\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n self.log.info(\"Will simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs = cbstat_obj[node.ip] .vbucket_list(def_bucket.name,\n \"active\")\n active_vbs_in_target_nodes += active_vbs\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n def_bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(def_bucket.name)\n\n # Remove active vbuckets from doc_loading to avoid errors\n\n load_spec = dict()\n # load_spec[\"target_vbuckets\"] = list(set(target_vbuckets)\n # ^ set(active_vbs_in_target_nodes))\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"subdoc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION] = 10\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION] = 50\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION] = 25\n load_spec[\"subdoc_crud\"][\n MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION] = 25\n\n self.log.info(\"Perform 'create', 'update', 'delete' mutations\")\n\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=1,\n async_load=True)\n\n self.sleep(5, \"Wait for doc loaders to start loading data\")\n\n for node in target_nodes:\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=def_bucket.name)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud(mutation_num=2)\n\n # Wait for document_loader tasks to complete\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Sub_doc CRUDs failed with process crash\")\n\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=def_bucket.name)\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(def_bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(def_bucket.name)\n\n # Failover validation\n val = \\\n failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n error_msg = \"Failover stats not updated after error condition\"\n self.assertTrue(val, msg=error_msg)\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()\n # Doc count validation\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)",
"def process_operation(self, resources, resource, api, operation, context):\n pass",
"def __exit__(self, exc_type, exc_instance, exc_traceback):\n # if there were no errors detected\n if exc_type is None:\n # commit the transaction to the datastore\n self.execute(*self.sql.commit())\n # otherwise\n else:\n # roll back\n self.execute(*self.sql.rollback())\n\n # indicate that we want to re-raise any exceptions that occurred while executing the\n # body of the {with} statement\n return False",
"async def run(\n self,\n ctx: BaseInputSetContext,\n octx: BaseOrchestratorContext,\n operation: Operation,\n inputs: Dict[str, Any],\n ) -> Union[bool, Dict[str, Any]]:\n if not operation.retry:\n return await self.run_no_retry(ctx, octx, operation, inputs)\n for retry in range(0, operation.retry):\n try:\n return await self.run_no_retry(ctx, octx, operation, inputs)\n except Exception:\n # Raise if no more tries left\n if (retry + 1) == operation.retry:\n raise\n # Otherwise if there was an exception log it\n self.logger.error(\n \"%r: try %d: %s\",\n operation.instance_name,\n retry + 1,\n traceback.format_exc().rstrip(),\n )",
"def fail_local_operation(operation, node, environment):\n run_operation(operation, node, environment, succeed=False)",
"def run_transaction(self, op, max_retries=3):\n\n if Database.conn is None:\n raise TypeError(\"Connection should not be None. Did you run connect_to_db()?\")\n\n # leaving this block the transaction will commit or rollback\n # (if leaving with an exception)\n with Database.conn:\n for retry in range(1, max_retries + 1):\n try:\n result = op(Database.conn)\n\n # If we reach this point, we were able to commit, so we break\n # from the retry loop.\n return result\n\n except SerializationFailure as e:\n # This is a retry error, so we roll back the current\n # transaction and sleep for a bit before retrying. The\n # sleep time increases for each failed transaction.\n logging.debug(\"got error: %s\", e)\n Database.conn.rollback()\n logging.debug(\"EXECUTE SERIALIZATION_FAILURE BRANCH\")\n sleep_ms = (2 ** retry) * 0.1 * (random.random() + 0.5)\n logging.debug(\"Sleeping %s seconds\", sleep_ms)\n time.sleep(sleep_ms)\n\n except psycopg2.Error as e:\n logging.debug(\"got error: %s\", e)\n logging.debug(\"EXECUTE NON-SERIALIZATION_FAILURE BRANCH\")\n raise e\n\n raise ValueError(f\"Transaction did not succeed after {max_retries} retries\")",
"async def resume_operations(self):\n await asyncio.sleep(10)\n for op in await self.get_service('data_svc').locate('operations', match=dict(finish=None)):\n self.loop.create_task(self.run_operation(op))",
"def _operation_traverse(self, op, op_f, aggregate_f, combine_f): # noqa\n # apply op_f for each operation\n op_res = op_f(op)\n if len(op.children) == 0:\n return op_res # no children return\n else:\n # apply _operation_traverse recursively\n children = [\n self._operation_traverse(child, op_f, aggregate_f, combine_f)\n for child in op.children\n ]\n # combine the operation result with the children aggregated result\n return combine_f(op_res, aggregate_f(children))",
"def run_all(operations=ops):\n for operation in operations:\n run(operation)",
"def do_operation(self):\n operation = self.inputs['operation']\n res = self.entity.do_operation(self.context, **self.inputs)\n if res:\n return self.RES_OK, \"Node operation '%s' succeeded.\" % operation\n else:\n return self.RES_ERROR, \"Node operation '%s' failed.\" % operation",
"async def run(self) -> Optional[BaseException]: # pylint: disable=too-many-branches,too-many-statements\n active = Invocation.active.get(self.name)\n if active is not None:\n return await self.done(self.wait_for(active))\n\n self._become_current()\n Logger.trace(\"Call\")\n\n global rebuild_changed_actions # pylint: disable=invalid-name\n if rebuild_changed_actions.value:\n self.new_persistent_actions.append(PersistentAction())\n self.read_old_persistent_actions()\n\n assert self.name not in Invocation.active\n Invocation.active[self.name] = self\n self.collect_initial_outputs()\n\n try:\n assert self.step is not None\n try:\n await self.done(self.step.function(**self.kwargs))\n except RestartException:\n self._restart()\n await self.done(self.step.function(**self.kwargs))\n await self.done(self.sync())\n await self.done(self.collect_final_outputs())\n\n except StepException as exception: # pylint: disable=broad-except\n self.exception = exception\n\n finally:\n self._become_current()\n\n if self.exception is None:\n assert not self.async_actions\n if self.new_persistent_actions:\n if len(self.new_persistent_actions) > 1 and self.new_persistent_actions[-1].is_empty():\n self.new_persistent_actions.pop()\n\n if not self.did_skip_actions:\n self.write_new_persistent_actions()\n elif len(self.new_persistent_actions) < len(self.old_persistent_actions):\n Logger.warning(\"Skipped some action(s) \" \"even though changed to remove some final action(s)\")\n\n if self.did_run_actions:\n Logger.trace(\"Done\")\n elif self.did_skip_actions:\n Logger.trace(\"Skipped\")\n else:\n Logger.trace(\"Complete\")\n\n else:\n while self.async_actions:\n try:\n await self.done(self.async_actions.pop())\n except StepException:\n pass\n if self.did_run_actions:\n self.poison_all_outputs()\n self.remove_old_persistent_data()\n if not isinstance(self.exception, DryRunException):\n Logger.trace(\"Fail\")\n\n del Invocation.active[self.name]\n if self.condition is not None:\n await self.done(self.condition.acquire())\n self.condition.notify_all()\n self.condition.release()\n\n global failure_aborts_build # pylint: disable=invalid-name\n if self.exception is not None and failure_aborts_build.value:\n no_additional_complaints()\n raise self.exception\n\n return self.exception",
"def execute(self, trans):\n \n # a \"circular\" task is a circularly-sorted collection of UOWTask/UOWTaskElements\n # derived from the components of this UOWTask, which accounts for inter-row dependencies. \n # if one was created for this UOWTask, it replaces the execution for this UOWTask.\n if self.circular is not None:\n self.circular.execute(trans)\n return\n\n # TODO: add a visitation system to the UOW classes and have this execution called\n # from a separate executor object ? (would also handle dumping)\n \n self._save_objects(trans)\n self._execute_cyclical_dependencies(trans, False)\n self._execute_per_element_childtasks(trans, False)\n self._execute_dependencies(trans)\n self._execute_cyclical_dependencies(trans, True)\n self._execute_childtasks(trans)\n self._execute_per_element_childtasks(trans, True)\n self._delete_objects(trans)",
"def transaction_failed_before_processing(self):",
"def clean_up():\n for action in reversed(undo_actions):\n try:\n action()\n except Exception, exc:\n sys.stderr.write(\"BAD CLEANUP: Call to %s failed\\n\"\n % action.func_name)\n sys.stderr.write(\" %s\\n\" % exc)",
"def test_flush_wrapper_operational_error(self):\n\n _session = self.sessionmaker()\n\n with _session.begin():\n foo = self.Foo(counter=1)\n _session.add(foo)\n\n _session.begin()\n self.addCleanup(_session.rollback)\n foo = self.Foo(counter=sqla.func.imfake(123))\n _session.add(foo)\n matched = self.assertRaises(sqla.exc.OperationalError, _session.flush)\n self.assertIn(\"no such function\", str(matched))",
"def workflow_complete():\n\n if request.method == \"POST\":\n \"\"\"\n request looks like:\n {\n \"workflow_name\": \"test-workflow\",\n \"dataset_id\": \"HRI107\",\n \"operation\": \"std-dev\",\n \"PID\": 1\n \"other_cardinals\": [(2, \"23.45.67.89\"), (3, \"34.56.78.90\")],\n \"jiff_server\": \"45.67.89.01\"\n }\n \"\"\"\n\n req = request.get_json(force=True)\n\n pods = get_pod_by_workflow_and_pid(req[\"workflow_name\"], req[\"PID\"])\n if pods is not None:\n for pod in pods:\n delete_entry(pod)\n\n jiff_server = get_jiff_server_by_workflow(req[\"workflow_name\"])\n if jiff_server is not None:\n delete_entry(jiff_server)\n\n orch = Orchestrator(req, app, len(get_running_workflows()))\n\n orch.stop_workflow()\n\n app.logger.info(f\"Workflow {req['workflow_name']} complete, removed from running jobs.\")\n\n event_timestamps = get_pod_event_timestamp_by_workflow_and_pid(req['workflow_name'],req['PID'])\n if event_timestamps is not None:\n delete_entry(event_timestamps)\n\n event_timestamps_dict = {x.name: str(getattr(event_timestamps, x.name)) for x in event_timestamps.__table__.columns}\n\n pod_resource_usage = get_pod_resource_consumption_by_workflow_and_pid(req['workflow_name'],req['PID'])\n usage = {'cpu': {'avg': None, 'max': None}, 'memory': {'avg': None, 'max': None}}\n if pod_resource_usage is not None:\n cpu_consumptions = [obj.cpu_usage for obj in pod_resource_usage]\n memory_consumptions = [obj.memory_usage for obj in pod_resource_usage]\n\n if len(cpu_consumptions) > 0:\n usage['cpu'] = {\n 'avg': sum(cpu_consumptions) / len(cpu_consumptions),\n 'max': max(cpu_consumptions)\n }\n\n if len(memory_consumptions) > 0:\n usage['memory'] = {\n 'avg': sum(memory_consumptions) / len(memory_consumptions),\n 'max': max(memory_consumptions)\n }\n\n for obj in pod_resource_usage:\n delete_entry(obj)\n\n app.logger.info(\"ABOUT TO send pod stats\")\n orch.send_pod_stats(usage, event_timestamps_dict)\n response = {\n \"MSG\": \"OK\",\n \"timestamps\": event_timestamps_dict,\n \"resource_consumption\": usage\n }\n else:\n\n app.logger.error(\n f\"Received request indicating the workflow {req['workflow_name']} \"\n f\"completed, but this workflow is not present in running jobs\"\n f\"record. Nothing to do.\")\n response = {\n \"MSG\": f\"ERR: {req['workflow_name']} not in running jobs record.\"\n }\n\n return jsonify(response)"
] | [
"0.6481785",
"0.6258558",
"0.6212218",
"0.5927201",
"0.5507661",
"0.5277229",
"0.5242107",
"0.5207474",
"0.51513904",
"0.5112994",
"0.5106702",
"0.5086462",
"0.50150937",
"0.4995987",
"0.49878561",
"0.497371",
"0.49726665",
"0.4905259",
"0.49032328",
"0.48811585",
"0.48808104",
"0.48575678",
"0.484289",
"0.48146537",
"0.48051938",
"0.48050416",
"0.4778383",
"0.47707385",
"0.4760732",
"0.47510603"
] | 0.766613 | 0 |
Sets the status of the next toplevel operation to 1 (ACTIVE) Fetches the next topleveloperation from the database, applies a FILESYSTEMLOCK! Which is /tmp/scv_operating.lck !!! | def process_next(cls):
db = cls._core.get_db()
configuration = cls._core.get_configuration()
if os.path.exists(configuration.get_entry("core.webpath")+"/scv_operating.lck"):
return False
lockfile = open(configuration.get_entry("core.webpath")+"/scv_operating.lck","w")
lockfile.close()
stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 \
WHERE OPE_ID IN ( \
SELECT OPE_ID FROM OPERATIONS \
WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0 \
AND OPE_INVOKED = ( \
SELECT MIN(OPE_INVOKED) FROM OPERATIONS \
WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0) \
) ;"
stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 1 ;"
db.query(cls._core,stmnt_lock,commit=True)
cur = db.query(cls._core,stmnt)
res = cur.fetchallmap()
if len(res) > 0:
operation = cls.restore_operation(res[0])
try:
cls.process_children(operation)
operation.do_workload()
except Exception, e:
stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;"
db.query(cls._core,stmnt_err,(operation.get_id(),),commit=True)
error = StringIO()
print_exc(None,error)
cls._core.log(error.getvalue())
ret = True
else:
ret = False
stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_STATUS = 1 ;"
db.query(cls._core,stmnt_delete,commit=True)
db.commit()
try:
os.unlink(configuration.get_entry("core.webpath")+"/scv_operating.lck")
except OSError,e :
raise OperationException(OperationException.get_msg(0))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def arm_oplock_future(self):\n self.oplock_future = self.tree.session.client.oplock_break_future(self.file_id)",
"def processLock(self):\r\n self.controller.executionLock()",
"def active(value):\r\n self.context.active = threading.BoundedSemaphore(value=value)",
"def state_wait_enter(cfg, app, win):",
"def attempt_to_acquire_leader(self, permanent=False):",
"def Operational(self):\r\n self.LogDebug(\"Port Operational - {}\".format(datetime.now().isoformat(\" \")))\r\n\r\n if \"SFTPConf\" in self.ConfigDict:\r\n self.SFTPConf = self.ConfigDict[\"SFTPConf\"]\r\n self.LogDebug(str(self.SFTPConf))\r\n\r\n # initialise the status of the binaries to be monitored\r\n for index in self.SFTPConf[\"IndexList\"]:\r\n self.binary[index] = {'status': False, 'start_time': None}\r\n\r\n try:\r\n with open('list_backup', 'r') as f:\r\n file_data = json.loads(f.read())\r\n self.file_list = [datetime.strptime(x, \"%Y-%m-%dT%H:%M:%S\") for x in file_data]\r\n self.LogDebug(\"Port Operational - File list loaded from backup file: {}\".format(file_data))\r\n except:\r\n self.LogDebug(\"Port Operational - no backup list to load\")\r\n\r\n # start the file retrieval timer\r\n timer_duration_ms = self.TimerDuration()\r\n odc.SetTimer(self.guid, 1, timer_duration_ms) \r\n next_timer = datetime.now()+timedelta(seconds=timer_duration_ms/1000)\r\n self.LogDebug(\"Next file retrieval: {}\".format(next_timer.isoformat(\" \")))\r\n\r\n return",
"def retry_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.retry_operation(row[\"OPE_ID\"])\n\n stmnt = \"UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)",
"def state_processing_enter(cfg, app, win):",
"def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')",
"def gate(self):\n locked = self.is_locked()\n if locked:\n self.PAUSED() # pause at locked gate\n self.fsm_gate.wait() # wait for gate to unlock\n self.CONTINUE() # continue through open gate",
"def _lock(self):\n import os\n from time import sleep\n # Waits until another process completes it's process\n while os.path.isfile(self.db_path+\".lock\"):\n print(\"Another process is using\",\n self.db_path, \". Waiting for release.\")\n sleep(1)\n with open(self.db_path+\".lock\", 'w') as fp:\n pass",
"def open(self, wait=True):\n if self.SE == 6:\n self.evr.polarity.put('VAL', 1, use_complete=True)\n else:\n self.RESET_PG = 0\n if self._follower_mode:\n self.follower_mode()\n self.evr.polarity.put('VAL', 1, use_complete=True)\n else:\n self.records.S_OPEN.put('VAL', 1, use_complete=True, wait=wait)",
"def wm(self):\n return self.get_par(\"readback\")",
"def _doLid1State(self, state = True):\n if state:\n self._executeServerTask(self._cmdOpenLid1)\n else:\n self._executeServerTask(self._cmdCloseLid1)",
"def status(self) -> NoReturn:\n\n curr_status= self.percent_done()\n while(curr_status < 100):\n\n update_status(name=self.name, status=curr_status)\n time.sleep(0.5)\n\n curr_status = self.percent_done()\n\n update_status(name=self.name, status=curr_status)",
"def set_lock_status(use_lock):\r\n get_lock.lock_is_enabled = use_lock",
"def check_current_lock(con, host, warning, critical,perf_data):\n warning = warning or 10\n critical = critical or 30\n data=get_server_status(con)\n\n lockTime=float(data['globalLock']['lockTime']) \n totalTime=float(data['globalLock']['totalTime']) \n\n err,delta=maintain_delta([totalTime,lockTime],host,\"locktime\") \n if err==0: \n lock_percentage = delta[2]/delta[1]*100 #lockTime/totalTime*100\n message = \"Current Lock Percentage: %.2f%%\" % lock_percentage\n message+=performance_data(perf_data,[(\"%.2f\" % lock_percentage,\"current_lock_percentage\",warning,critical)])\n return check_levels(lock_percentage,warning,critical,message)\n else :\n return exit_with_general_warning(\"problem reading data from temp file\")",
"def local(self):\n logging.info(__name__ + ' : Set control to local & locked')\n self.set_remote_status(0)",
"def reqNodeStatus(self):\n while self.status != Modem.Status.IDLE :\n sleep(0.1)\n if self.status != Modem.Status.IDLE:\n raise ValueError(\"Modem getNodeStatus unexpected status: \\\n \" + str(self.status))\n self.status = Modem.Status.BUSY2REQ\n self.send(self.interpreter.buildGetStatus())\n while self.status != Modem.Status.IDLE and self.status != Modem.Status.KILL:\n sleep(self.m_to)\n #self.recvCommand()\n if self.status == Modem.Status.KILL:\n return self.close()\n return self.errorCheck()",
"def os_start_db( self, ):\r\n pass",
"def i_am_locking(self):\r\n pass",
"def next_status(self):\n if self.status == self.DRAFT:\n self._advance_to_registration()\n elif self.status == self.REGISTRATION:\n self._advance_to_pending()\n elif self.status == self.PENDING:\n self._advance_to_running()",
"def lock_table(self):\n\n self.status = 'Locked'",
"def _handler_acquiring_status_enter(self):\n self._async_raise_fsm_event(ProtocolEvent.ACQUIRE_STATUS_ASYNC)",
"async def resume_operations(self):\n await asyncio.sleep(10)\n for op in await self.get_service('data_svc').locate('operations', match=dict(finish=None)):\n self.loop.create_task(self.run_operation(op))",
"def exec_dopq_lock_state(self, is_enqueue_update):\n if is_enqueue_update:\n self.dopq_wrp_obj.lock_update(True)\n else:\n self.dopq_wrp_obj.lock_update(False)\n\n print(\"(Data Platform)--> lock state: \", self.dopq_wrp_obj.lock_state)",
"def main(self):\n debug(\"Using %s\" % (self.PROC_DISKSTATS))\n\n initial = self.get_status()\n time.sleep(self.interval)\n final = self.get_status()\n\n # Get bytes/sec\n for d in self.partitions:\n r_diff = ((final[d].r_sectors - initial[d].r_sectors) * self.sector_size) / self.interval\n w_diff = ((final[d].w_sectors - initial[d].w_sectors) * self.sector_size) / self.interval\n final[d].r_rate = r_diff\n final[d].w_rate = w_diff\n \n # Status string\n msg = \" \".join([ \"%s (r: %d KB/s, w: %d KB/s)\" % (i.dev, i.r_rate / 1024, i.w_rate / 1024) for i in sorted(final.values(), key=lambda x:x.dev) ])\n performance = \" \".join([ \"'%s read'=%d '%s write'=%d\" % (i.dev, i.r_rate, i.dev, i.w_rate) for i in sorted(final.values(), key=lambda x:x.dev) ])\n\n return (EX_OK, msg, performance)",
"def acquire(self, access_mode=None):",
"def test_locked_file_03(self):\n \n f = open(\"tests/locked.db3\", \"a+\")\n fcntl.lockf(f.fileno(), fcntl.LOCK_EX) \n \n x = subprocess.Popen([\"sqlbak\", \"tests\", \"--ms-towait=4000\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n time.sleep(3)\n fcntl.lockf(f.fileno(), fcntl.LOCK_UN)\n f.close()\n\n result = x.communicate()[0]\n\n self.assertTrue(\"cannot obtain lock\" not in result)",
"def update_sys_resource():\n\n cpu_cores = get_cpu_cores()\n logger.debug(\"starting top module\")\n cpu_usage = get_cpu_usage()\n mem_usage = get_mem_usage()\n df_usage = get_df_usage()\n logger.debug(\"round instrument data ready, next is top 5data\")\n fields = [\n 'check_time', 'cpu_usage', 'cpu_all', 'cpu_using', 'mem_usage',\n 'mem_all', 'mem_using', 'disk_usage', 'disk_all', 'disk_using',\n 'cpu_topN', 'mem_topN', 'disk_topN', 'net_in_topN', 'net_out_topN'\n ]\n # result = {}\n # result.fromkeys(field, None)\n result = {i: None for i in fields}\n result['check_time'] = int(time.time())\n result['cpu_all'] = cpu_cores\n result['cpu_usage'] = cpu_usage\n result['mem_all'], result['mem_using'] = mem_usage\n result['disk_all'], result['disk_using'] = df_usage\n try:\n result['mem_usage'] = result['mem_using'] / result['mem_all']\n except ZeroDivisionError:\n result['mem_usage'] = 0.0\n try:\n result['disk_usage'] = result['disk_using'] / result['disk_all']\n except ZeroDivisionError:\n result['disk_usage'] = 0.0\n result['cpu_topN'] = get_topN_cpu()\n net_topn_data = get_topN_netIO()\n mnd_topn_data = get_topN_mnd()\n result[\"mem_topN\"] = mnd_topn_data[\"mem.bytes.memavailable\"]\n result[\"disk_topN\"] = mnd_topn_data[\"df.bytes.used\"]\n result[\"net_in_topN\"] = net_topn_data[\"cluster.net.dev.receive\"]\n result[\"net_out_topN\"] = net_topn_data[\"cluster.net.dev.transmit\"]\n # print(result)\n send_to_db('argus-statistics', 'sys_resource', result)\n logger.debug(\"update is already success\")"
] | [
"0.54746443",
"0.5449821",
"0.54388434",
"0.5305737",
"0.52546555",
"0.52230906",
"0.5174595",
"0.5144526",
"0.5143462",
"0.5060711",
"0.5041392",
"0.50204164",
"0.5015011",
"0.5006248",
"0.50048566",
"0.5001075",
"0.49896038",
"0.4973004",
"0.49551147",
"0.49547556",
"0.49520612",
"0.49466154",
"0.49461028",
"0.49428773",
"0.49293905",
"0.49125865",
"0.49088362",
"0.48780382",
"0.48766387",
"0.48683947"
] | 0.7192307 | 0 |
Sets this operations values from module metadata | def set_values(self,module):
if type(module) == dict:
self.set_value("name",module["name"])
self.set_value("hrname",module["hrname"])
self.set_value("version_major",module["version_major"])
self.set_value("version_minor",module["version_minor"])
self.set_value("revision",module["revision"])
if module.has_key("signature"):
self.set_value("signature",module["signature"])
elif module.__class__.__name__ == "Module":
pass #TODO IMPLEMENT / DISCUSS AFTER IMPLEMENTING MODULE-SUBSYSTEM | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_metadata(self, data):\r\n pass",
"def __init__(self):\n\n self.operations = {}",
"def PopulateModuleMetadata(self, mod, mojom_file):\n mod.name = os.path.basename(mojom_file.file_name)\n mod.path = mojom_file.file_name\n mod.namespace = mojom_file.module_namespace\n if mojom_file.attributes:\n mod.attributes = {attr.key: attr.value for attr in mojom_file.attributes}",
"def set_metadata(self, metadata):\n return self.client._perform_json(\n \"PUT\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name),\n body=metadata)",
"def __init__(self, operations = []):\n self.operations = operations",
"def __init__(self, op, op_param_list, op_reg_list):\n self. operation = {\n 'op': op,\n 'op_param_list': op_param_list,\n 'op_reg_list': op_reg_list\n }",
"def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license",
"def exec_module(self, **kwargs):\n\n for key in list(self.module_arg_spec.keys()) + ['tags']:\n if hasattr(self, key):\n setattr(self, key, kwargs[key])\n elif kwargs[key] is not None:\n if key == \"location\":\n self.parameters[\"location\"] = kwargs[key]\n elif key == \"plan\":\n self.parameters[\"plan\"] = kwargs[key]\n elif key == \"open_shift_version\":\n self.parameters[\"open_shift_version\"] = kwargs[key]\n elif key == \"public_hostname\":\n self.parameters[\"public_hostname\"] = kwargs[key]\n elif key == \"fqdn\":\n self.parameters[\"fqdn\"] = kwargs[key]\n elif key == \"network_profile\":\n self.parameters[\"network_profile\"] = kwargs[key]\n elif key == \"router_profiles\":\n self.parameters[\"router_profiles\"] = kwargs[key]\n elif key == \"master_pool_profile\":\n ev = kwargs[key]\n if 'vm_size' in ev:\n if ev['vm_size'] == 'standard_d2s_v3':\n ev['vm_size'] = 'Standard_D2s_v3'\n elif ev['vm_size'] == 'standard_d4s_v3':\n ev['vm_size'] = 'Standard_D4s_v3'\n if 'os_type' in ev:\n if ev['os_type'] == 'linux':\n ev['os_type'] = 'Linux'\n elif ev['os_type'] == 'windows':\n ev['os_type'] = 'Windows'\n self.parameters[\"master_pool_profile\"] = ev\n elif key == \"agent_pool_profiles\":\n ev = kwargs[key]\n if 'vm_size' in ev:\n if ev['vm_size'] == 'standard_d2s_v3':\n ev['vm_size'] = 'Standard_D2s_v3'\n elif ev['vm_size'] == 'standard_d4s_v3':\n ev['vm_size'] = 'Standard_D4s_v3'\n if 'os_type' in ev:\n if ev['os_type'] == 'linux':\n ev['os_type'] = 'Linux'\n elif ev['os_type'] == 'windows':\n ev['os_type'] = 'Windows'\n self.parameters[\"agent_pool_profiles\"] = ev\n elif key == \"auth_profile\":\n self.parameters[\"auth_profile\"] = kwargs[key]\n\n old_response = None\n response = None\n\n self.mgmt_client = self.get_mgmt_svc_client(ContainerServiceClient,\n base_url=self._cloud_environment.endpoints.resource_manager)\n\n resource_group = self.get_resource_group(self.resource_group)\n\n if \"location\" not in self.parameters:\n self.parameters[\"location\"] = resource_group.location\n\n old_response = self.get_openshiftmanagedcluster()\n\n if not old_response:\n self.log(\"Open Shift Managed Cluster instance doesn't exist\")\n if self.state == 'absent':\n self.log(\"Old instance didn't exist\")\n else:\n self.to_do = Actions.Create\n else:\n self.log(\"Open Shift Managed Cluster instance already exists\")\n if self.state == 'absent':\n self.to_do = Actions.Delete\n elif self.state == 'present':\n self.log(\"Need to check if Open Shift Managed Cluster instance has to be deleted or may be updated\")\n self.to_do = Actions.Update\n\n if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):\n self.log(\"Need to Create / Update the Open Shift Managed Cluster instance\")\n\n if self.check_mode:\n self.results['changed'] = True\n return self.results\n\n response = self.create_update_openshiftmanagedcluster()\n\n if not old_response:\n self.results['changed'] = True\n else:\n self.results['changed'] = old_response.__ne__(response)\n self.log(\"Creation / Update done\")\n elif self.to_do == Actions.Delete:\n self.log(\"Open Shift Managed Cluster instance deleted\")\n self.results['changed'] = True\n\n if self.check_mode:\n return self.results\n\n self.delete_openshiftmanagedcluster()\n # make sure instance is actually deleted, for some Azure resources, instance is hanging around\n # for some time after deletion -- this should be really fixed in Azure.\n while self.get_openshiftmanagedcluster():\n time.sleep(20)\n else:\n self.log(\"Open Shift Managed Cluster instance unchanged\")\n self.results['changed'] = False\n response = old_response\n\n if self.state == 'present':\n self.results.update(self.format_item(response))\n return self.results",
"def adjust_custom_op_info(compute_op_info):\n py_module_path = compute_op_info[\"py_module_path\"]\n if os.path.isfile(py_module_path):\n py_module_path, file_name = os.path.split(py_module_path)\n module_name, _ = os.path.splitext(file_name)\n compute_op_info[\"py_module_path\"] = py_module_path\n compute_op_info[\"module_name\"] = module_name",
"def setValues(self):\n pass",
"def setValues(self):\n pass",
"def setValues(self):\n pass",
"def setValues(self):\n pass",
"def setValues(self):\n pass",
"def setValues(self):\n pass",
"def set_invocation_metadata(self, items: Tuple[Tuple[str, str]]):\n self._invocation_metadata = items",
"def _set_attributes(self):",
"def _setModule(self, module):\n self._module = module\n # copy the original module for exploration\n self.n_values = deepcopy(module)\n self.n_values._params[:] = 0",
"def __setattr__(self, name, value):\n if isinstance(value, torch.jit.ScriptModule):\n object.__setattr__(self, name, value)\n elif isinstance(value, FrameworkTensor):\n self.role.register_state_tensor(value)\n self.state_attributes[name] = value\n elif isinstance(value, FrameworkLayerModule):\n for param in value.parameters():\n self.role.register_state_tensor(param)\n self.state_attributes[name] = value\n else:\n object.__setattr__(self, name, value)",
"def update_metadata(self):\n parser = GenericParser(\n fn_re='{}/(e\\d+s\\d+)_.*/Production.nc'.format(self.data_folder),\n group_names=['sim'],\n group_transforms=[lambda x: x],\n top_fn='',\n step_ps=self.timestep\n )\n meta = gather_metadata('{}/e*/*nc'.format(self.data_folder), parser)\n meta['top_fn'] = sorted(glob('{}/e*/structure.prmtop'.format(self.input_folder)))\n self.meta = meta",
"def __init__(self):\r\n self.operation_map = {}",
"def __init__(self, rpc, mtype, mname):\n\n self.moduletype = mtype\n self.modulename = mname\n self.rpc = rpc\n self._info = rpc.call(MsfRpcMethod.ModuleInfo, [mtype, mname])\n property_attributes = [\"advanced\", \"evasion\", \"options\", \"required\", \"runoptions\"]\n for k in self._info:\n if k not in property_attributes:\n # don't try to set property attributes\n setattr(self, k, self._info.get(k))\n self._moptions = rpc.call(MsfRpcMethod.ModuleOptions, [mtype, mname])\n self._roptions = []\n self._aoptions = []\n self._eoptions = []\n self._runopts = {}\n for o in self._moptions:\n if self._moptions[o]['required']:\n self._roptions.append(o)\n if self._moptions[o]['advanced']:\n self._aoptions.append(o)\n if self._moptions[o]['evasion']:\n self._eoptions.append(o)\n if 'default' in self._moptions[o]:\n self._runopts[o] = self._moptions[o]['default']\n\n if mtype in [\"auxiliary\", \"post\"]:\n d_act = self._info.get('default_action')\n if d_act is not None:\n act = 'ACTION'\n self._moptions[act] = {\"default\": d_act}\n self._runopts[act] = self._moptions[act]['default']",
"def set_metadata(self, key, val):\n \n self.metadata[key] = val",
"def setMetadata(self, metadata):\n document_properties = self.document_loaded.getDocumentProperties()\n user_defined_properties = document_properties.getUserDefinedProperties()\n new_properties = []\n for prop, value in metadata.items():\n for container in [document_properties, user_defined_properties]:\n current_value = getattr(container, prop, None)\n if current_value is not None:\n if isinstance(current_value, tuple):\n if isinstance(value, list):\n value = tuple(value)\n elif isinstance(value, basestring):\n # BBB: old ERP5 code sends Keywords as a string\n # separated by a whitespace.\n value = tuple(value.split(' '))\n if isinstance(value, type(current_value)):\n setattr(container, prop, value)\n break\n else:\n new_properties.append([prop, value])\n for prop, value in new_properties:\n if isinstance(value, basestring):\n user_defined_properties.addProperty(prop, 0, '')\n user_defined_properties.setPropertyValue(prop, value)\n self.document_loaded.store()\n self.document_loaded.dispose()",
"def __setstate__(self,values):\n self.initDefault()\n setter = object.__setattr__\n for value,attr in zip(values,self.persistent):\n setter(self,attr,value)\n if self.dirty_sizeCrc == None:\n self.dirty_sizeCrc = {} #--Use empty dict instead.\n self.refreshDataSizeCrc()",
"def set_metadata(self, metadata):\n self.metadata = metadata\n return self",
"def __metadata__(self):\n raise NotImplementedError",
"def operation(self, operation: str):\n\n self._operation = operation",
"def __init__(self, operation_inputs):\n\n full_operation_name = ctx.operation.name\n self.operation_name = full_operation_name.split('.').pop()\n\n # These should not make their way into the Operation inputs.\n os.environ['_PAGINATION_OFFSET'] = \\\n text_type(operation_inputs.pop('pagination_offset', 0))\n os.environ['_PAGINATION_SIZE'] = \\\n text_type(operation_inputs.pop('pagination_size', 1000))\n\n # cloudify client\n self.client_config = get_desired_value(\n 'client', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n if self.client_config:\n self.client = CloudifyClient(**self.client_config)\n else:\n self.client = manager.get_rest_client()\n\n # plugins\n self.plugins = get_desired_value(\n 'plugins', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # secrets\n self.secrets = get_desired_value(\n 'secrets', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties\n )\n\n # resource_config\n self.config = get_desired_value(\n 'resource_config', operation_inputs,\n ctx.instance.runtime_properties,\n ctx.node.properties)\n\n # Blueprint-related properties\n self.blueprint = self.config.get('blueprint', {})\n self.blueprint_id = self.blueprint.get('id') or ctx.instance.id\n self.blueprint_file_name = self.blueprint.get('main_file_name')\n self.blueprint_archive = self.blueprint.get('blueprint_archive')\n\n # Deployment-related properties\n self.deployment = self.config.get('deployment', {})\n self.deployment_id = self.deployment.get('id') or ctx.instance.id\n self.deployment_inputs = self.deployment.get('inputs', {})\n self.deployment_outputs = self.deployment.get('outputs')\n self.deployment_all_outputs = self.deployment.get('all_outputs', True)\n self.deployment_logs = self.deployment.get('logs', {})\n\n # Node-instance-related properties\n self.node_instance_proxy = self.config.get('node_instance')\n\n # Execution-related properties\n self.workflow_id = \\\n operation_inputs.get('workflow_id',\n 'create_deployment_environment')\n self.workflow_state = \\\n operation_inputs.get(\n 'workflow_state',\n 'terminated')\n self.reexecute = \\\n self.config.get('reexecute') \\\n or ctx.instance.runtime_properties.get('reexecute') \\\n or False\n\n # Polling-related properties\n self.interval = operation_inputs.get('interval', POLLING_INTERVAL)\n self.state = operation_inputs.get('state', 'terminated')\n self.timeout = operation_inputs.get('timeout', EXECUTIONS_TIMEOUT)\n\n # This ``execution_id`` will be set once execute workflow done\n # successfully\n self.execution_id = None",
"def set_params():\n global module \n global ora_inst\n global response_loc\n\n module_args=dict(\n ora_inst=dict(type='str', required=True),\n response_loc=dict(type='str', required=True)\n )\n\n module=AnsibleModule(\n argument_spec=module_args\n )\n\n ora_inst = module.params['ora_inst']\n response_loc = module.params['response_loc']"
] | [
"0.59990793",
"0.58392733",
"0.5662214",
"0.5603085",
"0.55330473",
"0.55237305",
"0.5490515",
"0.5485604",
"0.5479347",
"0.54730034",
"0.54730034",
"0.54730034",
"0.54730034",
"0.54730034",
"0.54730034",
"0.5461847",
"0.54556865",
"0.54516035",
"0.54180896",
"0.54075307",
"0.5385916",
"0.53844965",
"0.535654",
"0.534018",
"0.530905",
"0.5296158",
"0.5293843",
"0.5267278",
"0.5265926",
"0.5261922"
] | 0.6323762 | 0 |
Returns an Array of ModuleOperationObjects that are currently listedin the queue | def get_currently_processed_modules(cls):
db = cls._core.get_db()
stmnt = "SELECT OPE_ID, OPE_OPE_PARENT, OPE_TYPE FROM OPERATIONS \
WHERE OPE_TYPE = 'ModuleInstallOperation' \
or OPE_TYPE = 'ModuleUninstallOperation' ;"
cur = db.query(cls._core,stmnt);
ret = []
for row in cur.fetchallmap():
ret.append(Operation.restore_operation(row).get_meta())
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def operation_list(self):\n return self._operation_list",
"def operation_list(self):\n return self._operation_list",
"def operation_list(self):\n return self._operation_list",
"def operation_list(self):\n return self._operation_list",
"def get_queue_list(self):\n return self.manager.get_queue_list()",
"def get_operations(self):\n return self.operations[:] # Returns a copy instead of actual attribute",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def list_operations():",
"def listModules(self):\n modules = [(module.name,\n module.queue,\n module.Active) for module in self.db.getModules()]\n return modules",
"def objects(self):\n\t\treturn self._objects",
"def get_registered_jobs(self):\n with self.__lock:\n return list(self.__registered_jobs)",
"def hbObjects(self):\r\n return self.__hbObjs",
"def oplocks(self):\n return self._oplocks",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def generate_queue(self,pool):\n\t\tqueue = []\n\t\tfor ele in self.elements:\n\t\t\tif ele.pool == pool and ele.status == 'pending':\n\t\t\t\tele.abs_path = \"/%s/%s/%s/%s\" % (\n\t\t\t\t\tself.base_dir,\n\t\t\t\t\tself.parent_dir,\n\t\t\t\t\tself.project,\n\t\t\t\t\tele.filename\n\t\t\t\t\t)\n\t\t\t\tqueue.append(ele)\n\t\treturn queue",
"def _pull_batch_from_queue(self):\n rollout = self.explorer.queue.get( timeout = 600.0 )\n while not rollout.terminal:\n try: \n rollout.extend( self.explorer.queue.get_nowait() )\n except queue.Empty:\n break\n print(rollout.size())\n return rollout",
"def get(self):\n with self.lock:\n return list(self.jobShapes)"
] | [
"0.63590986",
"0.63590986",
"0.63590986",
"0.63590986",
"0.6177426",
"0.5978019",
"0.5920926",
"0.5920926",
"0.5920926",
"0.5920926",
"0.5920926",
"0.5920926",
"0.5920926",
"0.5920926",
"0.5920926",
"0.5798638",
"0.57853013",
"0.5771161",
"0.5731278",
"0.5724055",
"0.56982976",
"0.565988",
"0.565988",
"0.565988",
"0.565988",
"0.565988",
"0.565988",
"0.5647341",
"0.5635224",
"0.5632879"
] | 0.7111473 | 0 |
Compute squarefree decomposition of the monic ``f`` in ``GF(q)[X]``. Notes ===== Uses a modified version of Musser's algorithm for squarefree decomposition of univariate polynomials over finite fields. References ========== | def _gf_sqf_list(self, f):
domain = self.domain
n, factors, p = 1, [], int(domain.characteristic)
m = int(domain.order // p)
while not f.is_ground:
df = [f.diff(x) for x in self.gens]
if any(_ for _ in df):
g = f
for q in df:
g = self.gcd(g, q)
h, f, i = f // g, g, 1
while h != 1:
g = self.gcd(f, h)
h //= g
if not h.is_ground:
factors.append((h, i*n))
f //= g
h = g
i += 1
n *= p
g = self.zero
for monom, coeff in f.items():
g[tuple(_ // p for _ in monom)] = coeff**m
f = g
return factors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_squarefree(self, f):\n if f.is_ground:\n return True\n g = f\n for x in self.gens:\n g = self.gcd(g, f.diff(x))\n if g.is_ground:\n return True\n return False",
"def sqf_part(self, f):\n domain = self.domain\n\n if domain.is_FiniteField:\n g = self.one\n for f, _ in self.sqf_list(f)[1]:\n g *= f\n\n return g\n\n if not f:\n return f\n\n gcd = f\n for x in self.gens:\n gcd = self.gcd(gcd, f.diff(x))\n sqf = f // gcd\n\n if domain.is_Field:\n return sqf.monic()\n return sqf.primitive()[1]",
"def zzX_sqf_p(f):\n return zzX_one_p(zzX_gcd(zzX_primitive(f)[1], zzX_diff(f)))",
"def zzX_sqf_part(f):\n quo = zzX_quo(f, zzX_gcd(f, zzX_diff(f)))\n return zzX_primitive(quo)[1]",
"def sqf_norm(self, f):\n domain = self.domain\n\n if not domain.is_AlgebraicField:\n raise DomainError(f'ground domain must be algebraic, got {domain}')\n\n new_ring = self.to_ground().inject(*domain.symbols, front=True)\n g = domain.mod.set_ring(new_ring)\n s = 0\n\n while True:\n h = f.inject(front=True)\n r = g.resultant(h)\n\n if r.is_squarefree:\n return s, f, r\n f = f.compose({x: x - domain.unit for x in self.gens})\n s += 1",
"def zzx_sqf_part(f):\n quo = zzx_quo(f, zzx_gcd(f, zzx_diff(f)))\n return zzx_primitive(quo)[1]",
"def roots_quintic(f):\n result = []\n\n coeff_5, coeff_4, p_, q_, r_, s_ = f.all_coeffs()\n\n if not all(coeff.is_Rational for coeff in (coeff_5, coeff_4, p_, q_, r_, s_)):\n return result\n\n if coeff_5 != 1:\n f = Poly(f / coeff_5)\n _, coeff_4, p_, q_, r_, s_ = f.all_coeffs()\n\n # Cancel coeff_4 to form x^5 + px^3 + qx^2 + rx + s\n if coeff_4:\n p = p_ - 2*coeff_4*coeff_4/5\n q = q_ - 3*coeff_4*p_/5 + 4*coeff_4**3/25\n r = r_ - 2*coeff_4*q_/5 + 3*coeff_4**2*p_/25 - 3*coeff_4**4/125\n s = s_ - coeff_4*r_/5 + coeff_4**2*q_/25 - coeff_4**3*p_/125 + 4*coeff_4**5/3125\n x = f.gen\n f = Poly(x**5 + p*x**3 + q*x**2 + r*x + s)\n else:\n p, q, r, s = p_, q_, r_, s_\n\n quintic = PolyQuintic(f)\n\n # Eqn standardized. Algo for solving starts here\n if not f.is_irreducible:\n return result\n f20 = quintic.f20\n # Check if f20 has linear factors over domain Z\n if f20.is_irreducible:\n return result\n # Now, we know that f is solvable\n for _factor in f20.factor_list()[1]:\n if _factor[0].is_linear:\n theta = _factor[0].root(0)\n break\n d = discriminant(f)\n delta = sqrt(d)\n # zeta = a fifth root of unity\n zeta1, zeta2, zeta3, zeta4 = quintic.zeta\n T = quintic.T(theta, d)\n tol = S(1e-10)\n alpha = T[1] + T[2]*delta\n alpha_bar = T[1] - T[2]*delta\n beta = T[3] + T[4]*delta\n beta_bar = T[3] - T[4]*delta\n\n disc = alpha**2 - 4*beta\n disc_bar = alpha_bar**2 - 4*beta_bar\n\n l0 = quintic.l0(theta)\n Stwo = S(2)\n l1 = _quintic_simplify((-alpha + sqrt(disc)) / Stwo)\n l4 = _quintic_simplify((-alpha - sqrt(disc)) / Stwo)\n\n l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / Stwo)\n l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / Stwo)\n\n order = quintic.order(theta, d)\n test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )\n # Comparing floats\n if not comp(test, 0, tol):\n l2, l3 = l3, l2\n\n # Now we have correct order of l's\n R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4\n R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4\n R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4\n R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4\n\n Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]\n Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]\n\n # Simplifying improves performance a lot for exact expressions\n R1 = _quintic_simplify(R1)\n R2 = _quintic_simplify(R2)\n R3 = _quintic_simplify(R3)\n R4 = _quintic_simplify(R4)\n\n # hard-coded results for [factor(i) for i in _vsolve(x**5 - a - I*b, x)]\n x0 = z**(S(1)/5)\n x1 = sqrt(2)\n x2 = sqrt(5)\n x3 = sqrt(5 - x2)\n x4 = I*x2\n x5 = x4 + I\n x6 = I*x0/4\n x7 = x1*sqrt(x2 + 5)\n sol = [x0, -x6*(x1*x3 - x5), x6*(x1*x3 + x5), -x6*(x4 + x7 - I), x6*(-x4 + x7 + I)]\n\n R1 = R1.as_real_imag()\n R2 = R2.as_real_imag()\n R3 = R3.as_real_imag()\n R4 = R4.as_real_imag()\n\n for i, s in enumerate(sol):\n Res[1][i] = _quintic_simplify(s.xreplace({z: R1[0] + I*R1[1]}))\n Res[2][i] = _quintic_simplify(s.xreplace({z: R2[0] + I*R2[1]}))\n Res[3][i] = _quintic_simplify(s.xreplace({z: R3[0] + I*R3[1]}))\n Res[4][i] = _quintic_simplify(s.xreplace({z: R4[0] + I*R4[1]}))\n\n for i in range(1, 5):\n for j in range(5):\n Res_n[i][j] = Res[i][j].n()\n Res[i][j] = _quintic_simplify(Res[i][j])\n r1 = Res[1][0]\n r1_n = Res_n[1][0]\n\n for i in range(5):\n if comp(im(r1_n*Res_n[4][i]), 0, tol):\n r4 = Res[4][i]\n break\n\n # Now we have various Res values. Each will be a list of five\n # values. We have to pick one r value from those five for each Res\n u, v = quintic.uv(theta, d)\n testplus = (u + v*delta*sqrt(5)).n()\n testminus = (u - v*delta*sqrt(5)).n()\n\n # Evaluated numbers suffixed with _n\n # We will use evaluated numbers for calculation. Much faster.\n r4_n = r4.n()\n r2 = r3 = None\n\n for i in range(5):\n r2temp_n = Res_n[2][i]\n for j in range(5):\n # Again storing away the exact number and using\n # evaluated numbers in computations\n r3temp_n = Res_n[3][j]\n if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and\n comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)):\n r2 = Res[2][i]\n r3 = Res[3][j]\n break\n if r2 is not None:\n break\n else:\n return [] # fall back to normal solve\n\n # Now, we have r's so we can get roots\n x1 = (r1 + r2 + r3 + r4)/5\n x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5\n x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5\n x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5\n x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5\n result = [x1, x2, x3, x4, x5]\n\n # Now check if solutions are distinct\n\n saw = set()\n for r in result:\n r = r.n(2)\n if r in saw:\n # Roots were identical. Abort, return []\n # and fall back to usual solve\n return []\n saw.add(r)\n\n # Restore to original equation where coeff_4 is nonzero\n if coeff_4:\n result = [x - coeff_4 / 5 for x in result]\n return result",
"def sqf_part(f):\n return f.per(dmp_sqf_part(f.rep, f.lev, f.dom))",
"def zzx_sqf_p(f):\n return zzx_one_p(zzx_gcd(zzx_primitive(f)[1], zzx_diff(f)))",
"def vsfun(Q_slm, theta, phi,f=[]):\n vsf_th=numpy.zeros(theta.shape, dtype='complex')\n vsf_ph=numpy.zeros(theta.shape, dtype='complex')\n for (s,l,m) in Q_slm:\n vsh_th,vsh_ph=K(s, l, m, theta, phi)\n c_slm=Q_slm.getBysnm(s, l, m) if not(f) else Q_slm.getBysnm(s, l, m)(f)\n vsf_th=vsf_th+c_slm*vsh_th\n vsf_ph=vsf_ph+c_slm*vsh_ph\n return vsf_th, vsf_ph",
"def quo(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_quo(F, G, lev, dom))",
"def zzx_factor_sqf(f, **flags):\n cont, g = zzx_primitive(f)\n\n n = zzx_degree(g)\n\n if n <= 0:\n return cont, []\n\n if poly_LC(g) < 0:\n cont, g = -cont, zzx_neg(g)\n\n if n == 1 or zzx_eisenstein(g):\n return cont, [(g, 1)]\n\n factors = []\n\n if flags.get('cyclotomic', True):\n factors = zzx_cyclotomic_factor(g)\n\n if factors is None:\n factors = zzx_zassenhaus(g)\n\n def compare(f_a, f_b):\n i = len(f_a) - len(f_b)\n\n if not i:\n return cmp(f_a, f_b)\n else:\n return i\n\n return cont, sorted(factors, compare)",
"def _rr_yun0_sqf_list(self, f):\n if f.is_ground:\n return []\n\n result, count = [], 1\n qs = [f.diff(x) for x in self.gens]\n\n g = f\n for q in qs:\n g = self.gcd(g, q)\n\n while f != 1:\n qs = [q // g for q in qs]\n f //= g\n qs = [q - f.diff(x) for x, q in zip(self.gens, qs)]\n\n g = f\n for q in qs:\n g = self.gcd(g, q)\n if g != 1:\n result.append((g, count))\n\n count += 1\n\n return result",
"def form_factor( # pylint: disable=arguments-differ\n self, q: float, s: RealOrRealArray, t: RealOrRealArray, couplings: Couplings\n ) -> ComplexOrComplexArray:\n q2 = q**2 * 1e-6\n ss = s * 1e-6\n tt = t * 1e-6\n uu = q2 + MPI0_GEV**2 + 2 * MPI_GEV**2 - ss - tt\n\n ff = self.__form_factor(q2=q2, s=ss, t=tt, u=uu, couplings=couplings)\n return ff * 1e-9",
"def sqr(f):\n return f.per(dmp_sqr(f.rep, f.lev, f.dom))",
"def zzX_sqr(f):\n if poly_univariate_p(f):\n return zzx_sqr(f)\n\n if zzX_zero_p(f):\n return f\n\n df = zzX_degree(f)\n l = poly_level(f)-1\n\n h = []\n\n for i in xrange(0, 2*df+1):\n coeff = zzX_zero(l)\n\n jmin = max(0, i-df)\n jmax = min(i, df)\n\n n = jmax - jmin + 1\n\n jmax = jmin + n // 2 - 1\n\n for j in xrange(jmin, jmax+1):\n coeff = zzX_add(coeff, zzX_mul(f[j], f[i-j]))\n\n coeff = zzX_mul_const(coeff, 2)\n\n if n & 1:\n elem = zzX_sqr(f[jmax+1])\n coeff = zzX_add(coeff, elem)\n\n h.append(coeff)\n\n return h",
"def compute_clique_potentials(self,F):\r\n\r\n for i in self.nodes():\r\n self.node[i]['fac'] = factor([],[],[])\r\n \r\n for f in F.factors: # assign each factor to a clique\r\n for j,data in self.nodes_iter(data=True):\r\n if len(scipy.setdiff1d(f.var,data['clique']) ) ==0:\r\n self.node[j]['fac'] *= f\r\n self.nop += scipy.prod(self.node[j]['fac'].card)\r\n break",
"def roots_quartic(f):\n _, a, b, c, d = f.monic().all_coeffs()\n\n if not d:\n return [S.Zero] + roots([1, a, b, c], multiple=True)\n elif (c/a)**2 == d:\n x, m = f.gen, c/a\n\n g = Poly(x**2 + a*x + b - 2*m, x)\n\n z1, z2 = roots_quadratic(g)\n\n h1 = Poly(x**2 - z1*x + m, x)\n h2 = Poly(x**2 - z2*x + m, x)\n\n r1 = roots_quadratic(h1)\n r2 = roots_quadratic(h2)\n\n return r1 + r2\n else:\n a2 = a**2\n e = b - 3*a2/8\n f = _mexpand(c + a*(a2/8 - b/2))\n aon4 = a/4\n g = _mexpand(d - aon4*(a*(3*a2/64 - b/4) + c))\n\n if f.is_zero:\n y1, y2 = [sqrt(tmp) for tmp in\n roots([1, e, g], multiple=True)]\n return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]\n if g.is_zero:\n y = [S.Zero] + roots([1, 0, e, f], multiple=True)\n return [tmp - aon4 for tmp in y]\n else:\n # Descartes-Euler method, see [7]\n sols = _roots_quartic_euler(e, f, g, aon4)\n if sols:\n return sols\n # Ferrari method, see [1, 2]\n p = -e**2/12 - g\n q = -e**3/108 + e*g/3 - f**2/8\n TH = Rational(1, 3)\n\n def _ans(y):\n w = sqrt(e + 2*y)\n arg1 = 3*e + 2*y\n arg2 = 2*f/w\n ans = []\n for s in [-1, 1]:\n root = sqrt(-(arg1 + s*arg2))\n for t in [-1, 1]:\n ans.append((s*w - t*root)/2 - aon4)\n return ans\n\n # whether a Piecewise is returned or not\n # depends on knowing p, so try to put\n # in a simple form\n p = _mexpand(p)\n\n\n # p == 0 case\n y1 = e*Rational(-5, 6) - q**TH\n if p.is_zero:\n return _ans(y1)\n\n # if p != 0 then u below is not 0\n root = sqrt(q**2/4 + p**3/27)\n r = -q/2 + root # or -q/2 - root\n u = r**TH # primary root of solve(x**3 - r, x)\n y2 = e*Rational(-5, 6) + u - p/u/3\n if fuzzy_not(p.is_zero):\n return _ans(y2)\n\n # sort it out once they know the values of the coefficients\n return [Piecewise((a1, Eq(p, 0)), (a2, True))\n for a1, a2 in zip(_ans(y1), _ans(y2))]",
"def is_sqf(f):\n return dmp_sqf_p(f.rep, f.lev, f.dom)",
"def exquo(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_exquo(F, G, lev, dom))",
"def qft_recursive(qubits):\n qftcirc = Circuit()\n\n # First add the QFT subroutine above\n qftcirc.add(qft_no_swap(qubits))\n\n # Then add SWAP gates to reverse the order of the qubits:\n for i in range(math.floor(len(qubits) / 2)):\n qftcirc.swap(qubits[i], qubits[-i - 1])\n\n return qftcirc",
"def pquo(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_pquo(F, G, lev, dom))",
"def pexquo(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_pexquo(F, G, lev, dom))",
"def sub(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_sub(F, G, lev, dom))",
"def zzx_zassenhaus(f):\n n = zzx_degree(f)\n\n if n == 1:\n return [f]\n\n A = zzx_max_norm(f)\n b = poly_LC(f)\n B = abs(int(sqrt(n+1))*2**n*A*b)\n C = (n+1)**(2*n)*A**(2*n-1)\n gamma = int(ceil(2*log(C, 2)))\n prime_max = int(2*gamma*log(gamma))\n\n for p in xrange(3, prime_max+1):\n if not isprime(p) or b % p == 0:\n continue\n\n F = gf_from_int_poly(f, p)\n\n if gf_sqf_p(F, p):\n break\n\n l = int(ceil(log(2*B + 1, p)))\n\n modular = []\n\n for ff in gf_factor_sqf(F, p)[1]:\n modular.append(gf_to_int_poly(ff, p))\n\n g = zzx_hensel_lift(p, f, modular, l)\n\n T = set(range(len(g)))\n factors, s = [], 1\n\n while 2*s <= len(T):\n for S in subsets(T, s):\n G, H = [b], [b]\n\n S = set(S)\n\n for i in S:\n G = zzx_mul(G, g[i])\n for i in T-S:\n H = zzx_mul(H, g[i])\n\n G = zzx_trunc(G, p**l)\n H = zzx_trunc(H, p**l)\n\n G_norm = zzx_l1_norm(G)\n H_norm = zzx_l1_norm(H)\n\n if G_norm*H_norm <= B:\n T = T - S\n\n G = zzx_primitive(G)[1]\n f = zzx_primitive(H)[1]\n\n factors.append(G)\n b = poly_LC(f)\n\n break\n else:\n s += 1\n\n return factors + [f]",
"def zzX_heu_gcd(f, g, **flags):\n if poly_univariate_p(f):\n return zzx_heu_gcd(f, g, **flags)\n\n def interpolate(h, x):\n f = []\n\n while not zzX_zero_p(h):\n g = zzX_zz_trunc(h, x)\n f.insert(0, g)\n h = zzX_sub(h, g)\n h = zzX_quo_const(h, x)\n\n return f\n\n def finalize(h, cff, cfg, gcd):\n if zzX_zz_LC(h) > 0:\n h = zzX_mul_const(h, gcd)\n else:\n h = zzX_mul_const(h, -gcd)\n cff = zzX_neg(cff)\n cfg = zzX_neg(cfg)\n\n return h, cff, cfg\n\n zero_f = zzX_zero_p(f)\n zero_g = zzX_zero_p(g)\n\n l = poly_level(f)\n z = zzX_zero(l)\n\n if zero_f and zero_g:\n return z, z, z\n elif zero_f:\n return g, z, zzX_const(l, 1)\n elif zero_g:\n return f, zzX_const(l, 1), z\n\n df = zzX_degree(f)\n dg = zzX_degree(g)\n\n cf = zzX_zz_content(f)\n cg = zzX_zz_content(g)\n\n gcd = igcd(cf, cg)\n\n f = zzX_quo_const(f, gcd)\n g = zzX_quo_const(g, gcd)\n\n f_norm = zzX_max_norm(f)\n g_norm = zzX_max_norm(g)\n\n B = 2*min(f_norm, g_norm) + 29\n\n x = max(min(B, 99*INT_TYPE(isqrt(B))),\n 2*min(f_norm // abs(zzX_zz_LC(f)),\n g_norm // abs(zzX_zz_LC(g))) + 2)\n\n for i in xrange(0, 6):\n ff = zzX_eval(f, x)\n gg = zzX_eval(g, x)\n\n if not (zzX_zero_p(ff) or zzX_zero_p(gg)):\n h, cff, cfg = zzX_heu_gcd(ff, gg, **flags)\n\n h = interpolate(h, x)\n h = zzX_zz_primitive(h)[1]\n\n cff_, r = zzX_div(f, h)\n\n if zzX_zero_p(r):\n cfg_, r = zzX_div(g, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff_, cfg_, gcd)\n\n cff = interpolate(cff, x)\n\n h, r = zzX_div(f, cff)\n\n if zzX_zero_p(r):\n cfg_, r = zzX_div(g, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff, cfg_, gcd)\n\n cfg = interpolate(cfg, x)\n\n h, r = zzX_div(g, cfg)\n\n if zzX_zero_p(r):\n cff_, r = zzX_div(f, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff_, cfg, gcd)\n\n x = INT_TYPE(2.7319*x*isqrt(isqrt(x)))\n\n raise HeuristicGCDFailed('no luck')",
"def squareform(X, force=\"no\", checks=True):\n\n return ssd.squareform(X, force, checks)",
"def gcd(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_gcd(F, G, lev, dom))",
"def terms_gcd(f):\n J, F = dmp_terms_gcd(f.rep, f.lev, f.dom)\n return J, f.per(F)",
"def get_f_h_gas_comp_out(p: float, s: float) -> float:\n\n return - 1.869892835947070 * 10 ** (-1) * p ** 4 \\\n + 8.223224182177200 * 10 ** (-1) * p ** 3 \\\n + 4.124595239531860 * p ** 2 \\\n - 8.346302788803210 * 10 * p \\\n - 1.016388214044490 * 10 ** 2 * s ** 4 \\\n + 8.652428629143880 * 10 ** 2 * s ** 3 \\\n - 2.574830800631310 * 10 ** 3 * s ** 2 \\\n + 3.462049327009730 * 10 ** 3 * s \\\n + 9.209837906396910 * 10 ** (-1) * p ** 3 * s \\\n - 5.163305566700450 * 10 ** (-1) * p ** 2 * s ** 2 \\\n + 4.076727767130210 * p * s ** 3 \\\n - 8.967168786520070 * p ** 2 * s \\\n - 2.062021416757910 * 10 * p * s ** 2 \\\n + 9.510257675728610 * 10 * p * s \\\n - 1.476914346214130 * 10 ** 3"
] | [
"0.6348645",
"0.6082395",
"0.60524696",
"0.60362184",
"0.5977433",
"0.5936218",
"0.5890577",
"0.58653134",
"0.5856254",
"0.57929945",
"0.5784882",
"0.5635512",
"0.56248885",
"0.554542",
"0.55379504",
"0.5530155",
"0.5527634",
"0.55048215",
"0.54990107",
"0.5371403",
"0.53676456",
"0.53638583",
"0.5350854",
"0.53342074",
"0.53170127",
"0.53049266",
"0.5300341",
"0.52219623",
"0.5218056",
"0.5211141"
] | 0.6371153 | 0 |
Return ``True`` if ``f`` is a squarefree polynomial in ``K[X]``. Examples ======== >>> _, x, y = ring('x y', ZZ) >>> ((x + y)2).is_squarefree False >>> (x2 + y2).is_squarefree True | def is_squarefree(self, f):
if f.is_ground:
return True
g = f
for x in self.gens:
g = self.gcd(g, f.diff(x))
if g.is_ground:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_squarefree_hilbert_number(n):\n return is_hilbert_number(n) and is_hilbert_squarefree_number(n)",
"def isNodeSheaf(_session, _node):\n return checkIncToSets(_session, _node, [keynodes.info.stype_sheaf], sc.SC_A_CONST | sc.SC_POS)",
"def is_sqf(f):\n return dmp_sqf_p(f.rep, f.lev, f.dom)",
"def is_quantifier_free(formula: Formula) -> bool:\r\n # Task 11.3.1\r\n\r\n if is_quantifier(formula.root):\r\n return False\r\n\r\n if is_binary(formula.root):\r\n return is_quantifier_free(formula.first) and is_quantifier_free(formula.second)\r\n\r\n if is_unary(formula.root):\r\n return is_quantifier_free(formula.first)\r\n\r\n return True",
"def is_quantifier_free(formula):\n assert type(formula) is Formula\n # Task 11.3.1\n if is_constant(formula.root) or is_variable(formula.root) or is_relation(formula.root) or is_equality(formula.root):\n return True\n\n if is_quantifier(formula.root):\n return False\n\n is_first = is_quantifier_free(formula.first)\n if is_binary(formula.root):\n return is_first and is_quantifier_free(formula.second)\n\n return is_first",
"def is_square(q_1: Qs) -> bool:\n\n return math.sqrt(q_1.dim).is_integer()",
"def is_clique(G,S): #set of vertices where every pair in the set forms an edge \n for v in S:\n if list(set(S)&set(neighbors(G,v))) != []: #[] <-- empty list\n return False\n \n return True",
"def _can_do_sum_of_squares(n, k):\n if k < 1:\n return False\n if n < 0:\n return False\n if n == 0:\n return True\n if k == 1:\n return is_square(n)\n if k == 2:\n if n in (1, 2):\n return True\n if isprime(n):\n if n % 4 == 1:\n return 1 # signal that it was prime\n return False\n else:\n f = factorint(n)\n for p, m in f.items():\n # we can proceed iff no prime factor in the form 4*k + 3\n # has an odd multiplicity\n if (p % 4 == 3) and m % 2:\n return False\n return True\n if k == 3:\n if (n//4**multiplicity(4, n)) % 8 == 7:\n return False\n # every number can be written as a sum of 4 squares; for k > 4 partitions\n # can be 0\n return True",
"def isSqrt(self):\n return _libsbml.ASTNode_isSqrt(self)",
"def is_symbolic(self: Q) -> bool:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n return symbolic",
"def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, b, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n \n disc = 4*a*c - b**2\n if disc == 0 :\n return gcd([a,b,c]) < self._indefinite_content_bound()\n else :\n return disc < self.__disc",
"def is_primitive_root(g,n):\n\t# SAGE equivalent is mod(g,n).is_primitive_root() in IntegerMod class\n\tif gcd(g,n) != 1: return False # Not in the group of units\n\torder = euler_phi(n)\n\tif carmichael_lambda(n) != order: return False # Group of units isn't cyclic\n\torderfacts = prime_divisors(order)\n\tfor fact in orderfacts:\n\t\tif pow(g,order//fact,n) == 1: return False\n\treturn True",
"def has_xfree(self, s: set[Basic]):\n # protect O(1) containment check by requiring:\n if type(s) is not set:\n raise TypeError('expecting set argument')\n return any(a in s for a in iterfreeargs(self))",
"def is_square(N):\n if N < 0:\n print(\"N is negative number @is_square in ModulesFactorization.\")\n sys.exit()\n\n sqrt_N=round(math.sqrt(N))\n if N == sqrt_N*sqrt_N:\n return True\n else:\n return False",
"def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, _, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n\n return a + c < self.index()",
"def is_perfect_square():",
"def is_square(N):\n return N == round(N**(0.5))**2",
"def is_hilbert_squarefree_number(n):\n ubound = math.ceil(n / 2)\n for a in range(5, ubound + 1):\n if is_hilbert_square(a) and n % a == 0:\n return False\n return True",
"def is_square(x):\n\n if x < 0:\n return False\n if math.pow(int(math.sqrt(x)), 2) == x:\n return True",
"def has_path_sum(self, k):\n\n return self.has_path_sum_helper(self.root, k)",
"def bfs(graph: np.ndarray, row: int, s: int, t: int, parent: list) -> bool:\r\n visited = [False] * row\r\n queue = []\r\n queue.append(s)\r\n visited[s] = True\r\n\r\n while queue:\r\n\r\n u = queue.pop(0)\r\n\r\n for ind, val in enumerate(graph[u]):\r\n if visited[ind] is False and val > 0:\r\n queue.append(ind)\r\n visited[ind] = True\r\n parent[ind] = u\r\n\r\n return True if visited[t] else False",
"def stability(X, g_x_func, s, p, k):\n import numpy\n s.update_state(s, p, X = X, phase = k, Force_Update=True)\n H = hessian(g_x_func, s, p, dx=1e-6, gmix=True, k=k)\n Heig = numpy.linalg.eig(H)[0]\n HBeig = (Heig > 0.0)\n return numpy.all(HBeig)",
"def sroot(n):\n\n return int(n ** 0.5) == n ** 0.5",
"def isSymmetric(self, root: TreeNode) -> bool:\n return Solution().isMirror(root, root)",
"def is_triangular(k):\n sum = 0\n \n for number in range(1,k+1):\n sum += number\n if sum == k:\n return True\n if sum > k:\n return False",
"def is_semileaf(self):\n if self._leftchild and self._rightchild:\n return False\n if not self._leftchild and not self._rightchild:\n return False\n return True",
"def isSetStoichiometry(self):\n return _libsbml.SpeciesReference_isSetStoichiometry(self)",
"def in_family(ls, sol):\r\n familia = sym(sol)\r\n for k in range(1, len(familia)):\r\n if familia[k] in ls:\r\n return True\r\n return False",
"def is_stump(self):\n if self.is_leaf():\n return False\n return self.left_subtree.is_leaf() and self.right_subtree.is_leaf()",
"def is_straight(hand):\n # same suite\n suite = hand[0][1]\n vals = []\n for c in hand:\n vals.append(cards[c[0]])\n # check if vals are consecutive or not\n if is_contiguous(vals):\n return True\n else:\n return False"
] | [
"0.56360364",
"0.5465797",
"0.5397887",
"0.5356589",
"0.533036",
"0.51120263",
"0.50754285",
"0.5052217",
"0.49830848",
"0.49745223",
"0.49274656",
"0.4885014",
"0.48708126",
"0.484963",
"0.48122284",
"0.47914714",
"0.47727177",
"0.47523925",
"0.47462133",
"0.47416347",
"0.47316313",
"0.47047514",
"0.47031182",
"0.4694216",
"0.46798366",
"0.46722856",
"0.46672094",
"0.4665157",
"0.46398547",
"0.46220466"
] | 0.74997777 | 0 |
Squarefree norm of ``f`` in ``K[X]``, useful over algebraic domains. Returns ``s``, ``f``, ``r``, such that ``g(x) = f(xsa)`` and ``r(x) = Norm(g(x))`` is a squarefree polynomial over K, where ``a`` is the algebraic extension of ``K``. Examples ======== >>> _, x, y = ring('x y', QQ.algebraic_field(I)) >>> (xy + y2).sqf_norm() (1, xy Ix + y2 3Iy 2, x2y2 + x2 + 2xy3 + 2xy + y4 + 5y2 + 4) | def sqf_norm(self, f):
domain = self.domain
if not domain.is_AlgebraicField:
raise DomainError(f'ground domain must be algebraic, got {domain}')
new_ring = self.to_ground().inject(*domain.symbols, front=True)
g = domain.mod.set_ring(new_ring)
s = 0
while True:
h = f.inject(front=True)
r = g.resultant(h)
if r.is_squarefree:
return s, f, r
f = f.compose({x: x - domain.unit for x in self.gens})
s += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sqf_norm(f):\n s, g, r = dmp_sqf_norm(f.rep, f.lev, f.dom)\n return s, f.per(g), f.per(r, dom=f.dom.dom)",
"def squared_frobenius_norm(x):\n # http://mathworld.wolfram.com/FrobeniusNorm.html\n # The gradient of KL[p,q] is not defined when p==q. The culprit is\n # tf.norm, i.e., we cannot use the commented out code.\n # return tf.square(tf.norm(x, ord=\"fro\", axis=[-2, -1]))\n return tf.reduce_sum(tf.square(x), axis=[-2, -1])",
"def sqr(f):\n return f.per(dmp_sqr(f.rep, f.lev, f.dom))",
"def normsq(self):\n return sum(x**2 for x in self.data)",
"def norm(self):\n mag_squared = self._sum_of_squares()\n return sqrt(mag_squared)",
"def _rsq(self):\n return self._ss_reg / self._ss_tot",
"def normFro(X):\n return norm(X)",
"def norm(self):\n C = np.prod([F.T @ F for F in self.factors], axis=0)\n return np.sqrt(np.sum(C))",
"def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)",
"def normsq(self):\n return abs(sum(self._ar * self._ar))",
"def rsqrt(data):\n return _make.rsqrt(data)",
"def norm_sqr(x):\n return inner_prod(x, x)[0]",
"def test_sym_sqrtm(self): \n # create random symmetric n x n matrix\n n = 5\n A = 5.0 * 2.0*(torch.rand(n,n) - 0.5)\n A = A + A.T\n\n # reference implementation of scipy\n sqA_scipy = sla.sqrtm(A.numpy())\n isqA_scipy = sla.inv(sla.sqrtm(A.numpy()))\n # my own implementation using pure torch functions\n sqA,isqA = (x.numpy() for x in _sym_sqrtm(A))\n \n self.assertTrue(np.isclose(sqA, sqA_scipy).all())\n self.assertTrue(np.isclose(isqA, isqA_scipy).all())",
"def zzX_sqf_p(f):\n return zzX_one_p(zzX_gcd(zzX_primitive(f)[1], zzX_diff(f)))",
"def scalar_sqrt(self, dst, src):\n return self._scalar_single_func('sqrt', dst, src)",
"def norm(self):\n\t\treturn np.sqrt(self.normSq())",
"def score_sqrt(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n score_full = self.score_full(params)\n params_vec = params.get_packed(use_sqrt=True)\n\n lin, quad = self._reparam()\n\n scr = 0.\n for i in range(len(params_vec)):\n v = lin[i] + 2 * np.dot(quad[i], params_vec)\n scr += score_full[i] * v\n\n if self._freepat is not None:\n return self._freepat.get_packed() * scr\n else:\n return scr",
"def weight_norm(W, s):\n _eps = numpy_floatX(1e-5)\n W_norms = tensor.sqrt((W * W).sum(axis=0, keepdims=True) + _eps)\n W_norms_s = W_norms * s # do this first to ensure proper broadcasting\n return W / W_norms_s",
"def sparse_square_norm(A: SparseTensor, out: torch.Tensor) -> torch.Tensor:\n if not A.is_csr:\n raise RuntimeError(\"Squared norm can only be applied on CSR tensors\")\n if not check_same_dtype(A, out):\n raise ValueError(\"All data-types must match\")\n if A.shape[0] != out.shape[0]:\n raise ValueError(\"Dimension 0 of A must match the length of tensor 'out'\")\n\n return norm_sq(A.indexptr, A.data, out)",
"def norm(self):\n return sqrt(self.dot(self))",
"def norm(x):\n return inner_prod(x, x)[0].sqrt_()",
"def f_norm(self,G):\n if isinstance(G,np.ndarray):\n nu = np.outer(self.nu_array,1./G**2)\n # sigma_inv = np.outer(1./self.sigma,1./G**2)\n else:\n nu = self.nu_array/G**2\n # sigma_inv = 1./self.sigma*1./G**2\n #f = self.A*np.sqrt(2.*self.a/np.pi)*(1.+(1./self.a/nu)**self.p)*np.sqrt(nu)*np.exp(-self.a*nu/2.)\n f = self.f_nu(nu)\n\n #norm = np.trapz(f,np.log(sigma_inv),axis=0)\n norm = trapz2(f,np.log(1./self.sigma))\n return norm",
"def rsq(self):\n return np.squeeze(self._rsq)",
"def sqrt(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sqrt()))",
"def hessian_sqrt(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n score0 = self.score_full(params)\n hess0 = self.hessian_full(params)\n\n params_vec = params.get_packed(use_sqrt=True)\n\n lin, quad = self._reparam()\n k_tot = self.k_fe + self.k_re2\n\n # Convert Hessian to new coordinates\n hess = 0.\n for i in range(k_tot):\n hess += 2 * score0[i] * quad[i]\n for i in range(k_tot):\n vi = lin[i] + 2*np.dot(quad[i], params_vec)\n for j in range(k_tot):\n vj = lin[j] + 2*np.dot(quad[j], params_vec)\n hess += hess0[i, j] * np.outer(vi, vj)\n\n return hess",
"def norm(self) -> float:\n return self.squared_norm()**0.5",
"def sqrt(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.sqrt())",
"def norm(self):\n\t\treturn math.sqrt(self.norm2())",
"def norm(x):\r\n return sqrt(np.numerical.sum(x**2))",
"def sqrt(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.sqrt(), diag_shape=self.diag_shape)"
] | [
"0.6828397",
"0.6347827",
"0.58674204",
"0.58420646",
"0.57301825",
"0.56841624",
"0.56582105",
"0.56257766",
"0.5603542",
"0.55873054",
"0.5552461",
"0.55447716",
"0.55371946",
"0.5521278",
"0.5477447",
"0.54771346",
"0.5461877",
"0.54501307",
"0.54418135",
"0.53796417",
"0.53401095",
"0.5336934",
"0.530898",
"0.53083265",
"0.53063637",
"0.5299868",
"0.52738804",
"0.52732944",
"0.52694875",
"0.5263808"
] | 0.7036989 | 0 |
Start twisted event loop and the fun should begin... brokerTimeout how long to wait for a broker a negative number upon failure. Otherwise, it never returns. | def start(config, brokerTimeout = 60.0):
manager = multiprocessing.Manager()
serverUpEvent = manager.Event()
broker = multiprocessing.Process(target=startSTOMPBroker, args=(config,serverUpEvent))
broker.daemon = True
broker.name = 'STOMP-Broker'
broker.start()
serverUpEvent.wait(brokerTimeout)
if not serverUpEvent.is_set():
logger.fatal("Broker not available after %.1f seconds. Giving up", brokerTimeout)
return -1
#host side logic
host = config.get('Broker', 'host')
port = int(config.get('Broker', 'port'))
username = config.get('Broker', 'username')
password = config.get('Broker', 'password')
hostEngine = HostStompEngine(config)
stompProtocolFactory = StompProtocolFactory(hostEngine, username, password)
HostXMLRPCService(config).makeEngineAccesible(hostEngine)
reactor.connectTCP(host, port, stompProtocolFactory)
reactor.run() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_running():\n log.msg('reactor_loop Starting')\n try:\n conn = client.connect(reactor)\n si446x_do = Si446xComponent(conn)\n conn.addCallback(si446x_do.start)\n conn.addErrback(si446x_do.on_error)\n except error.DBusException, e:\n log.msg('reactor_loop Setup Error: {}'.format(e))\n reactor.stop()",
"def start(self):\n if not self._connected:\n self._client.connect(self._addr, port=self._port, keepalive=60, bind_address=\"\")\n self._client.loop_start()\n self._connected = True\n logger.info(\"Connection with MQTT Broker at %s:%d estabilished.\", self._addr, self._port)",
"def test_main():\n\n listener = Qe2ServerListener('', 4000)\n reactor.run()",
"def reactor_loop():\n def on_running():\n \"\"\"\n called when the twisted reactor is running\n \"\"\"\n log.msg('reactor_loop Starting')\n try:\n conn = client.connect(reactor)\n si446x_do = Si446xComponent(conn)\n conn.addCallback(si446x_do.start)\n conn.addErrback(si446x_do.on_error)\n except error.DBusException, e:\n log.msg('reactor_loop Setup Error: {}'.format(e))\n reactor.stop()\n\n signal.signal(signal.SIGINT, SIGINT_CustomEventHandler)\n signal.signal(signal.SIGHUP, SIGINT_CustomEventHandler)\n reactor.callWhenRunning(on_running)\n reactor.run()",
"def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()",
"async def async_start(self) -> None:\n\n self._shutdown = False\n\n # Start up the LifeSOS interface\n self._baseunit.start()\n\n # Connect to the MQTT broker\n self._mqtt_was_connected = False\n if self._config.mqtt.uri.port:\n self._mqtt.connect_async(\n self._config.mqtt.uri.hostname,\n self._config.mqtt.uri.port,\n keepalive=Translator.KEEP_ALIVE)\n else:\n self._mqtt.connect_async(\n self._config.mqtt.uri.hostname,\n keepalive=Translator.KEEP_ALIVE)\n\n # Start processing MQTT messages\n self._mqtt.loop_start()",
"def start_call_back_loop(loop: asyncio.AbstractEventLoop) -> None:\n asyncio.set_event_loop(loop)\n loop.run_forever()",
"def start(self) -> None:\n conn_manager = ConnectionManager(broker_host=self.broker_host, queue=self.queue)\n channel = conn_manager.start_channel()\n channel.basic_consume(queue=self.queue, on_message_callback=self.callback)\n\n try:\n print(\"PV Simulator...\")\n channel.start_consuming()\n except KeyboardInterrupt:\n pass",
"def test_runsUntilAsyncCallback(self):\n timePassed = []\n def main(reactor):\n finished = defer.Deferred()\n reactor.callLater(1, timePassed.append, True)\n reactor.callLater(2, finished.callback, None)\n return finished\n r = _FakeReactor()\n exitError = self.assertRaises(\n SystemExit, task.react, main, _reactor=r)\n self.assertEqual(0, exitError.code)\n self.assertEqual(timePassed, [True])\n self.assertEqual(r.seconds(), 2)",
"def test_eventloop_api_reactor(self):\n from twisted.internet import reactor\n _main.no_setup()\n self.assertIdentical(_main._reactor, reactor)",
"def run_reactor(self):\n self.reactor.run()",
"def init(\n self,\n ) -> bool:\n success = True\n try:\n self.client = mqtt.Client(client_id=\"Draco\", protocol=mqtt.MQTTv5)\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n self.client.connect(\n host=self._config[\"broker_ip\"], port=self._config[\"broker_port\"]\n )\n self.client.loop_start()\n\n except Exception as error:\n print(f\"Process {self._pid} - \" + repr(error))\n success = False\n return success",
"def _start_in_thread(self):\n return spawn_waitready(self._listen, self.start)[0]",
"def __call__(self, timeout=None, *args, **kwargs):\n if timeout:\n self.timeout = timeout\n started_observer = self.start(timeout, *args, **kwargs)\n if started_observer:\n return started_observer.await_done(*args, **kwargs)\n # TODO: raise ConnectionObserverFailedToStart",
"def start(self):\n self._connect()\n self._init_exchange()\n self._init_queue()\n self._bind_queue()",
"def _mqttActor(self) -> bool:\n\t\tself.isStopped = False\n\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.INFO, 'MQTT: client started')\n\t\twhile not self.isStopped:\n\t\t\tself.mqttClient.loop_forever()\t# Will return when disconnect() is called\n\t\tif self.messageHandler:\n\t\t\tself.messageHandler.onShutdown(self)\n\t\treturn True",
"def run(self) -> None:\n\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.DEBUG, f'MQTT: client name: {self.clientID}')\n\t\tself.mqttClient = mqtt.Client(client_id=self.clientID, clean_session=False if self.clientID else True)\t# clean_session=False is defined by TS-0010\n\n\t\t# Enable SSL\n\t\tif self.useTLS:\n\t\t\tself.mqttClient.tls_set(ca_certs=self.caFile, cert_reqs=ssl.CERT_REQUIRED if self.verifyCertificate else ssl.CERT_NONE)\n\n\t\t# Set username/password\n\t\tif self.username and self.password:\n\t\t\tself.mqttClient.username_pw_set(self.username, self.password)\n\t\t\n\t\tself.mqttClient.on_connect \t\t= self._onConnect\n\t\tself.mqttClient.on_disconnect\t= self._onDisconnect\n\t\tself.mqttClient.on_log\t\t\t= self._onLog\n\t\tself.mqttClient.on_subscribe\t= self._onSubscribe\n\t\tself.mqttClient.on_unsubscribe\t= self._onUnsubscribe\n\t\tself.mqttClient.on_message\t\t= self._onMessage\n\n\t\ttry:\n\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.DEBUG, f'MQTT: connecting to host:{self.address}, port:{self.port}, keepalive: {self.keepalive}, bind: {self.bindIF}')\n\t\t\tself.mqttClient.connect(host=self.address, port=self.port, keepalive=self.keepalive, bind_address=self.bindIF)\n\t\texcept Exception as e:\n\t\t\tif self.messageHandler:\n\t\t\t\tself.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot connect to broker: {e}')\n\t\t\t\tself.messageHandler.onError(self, -1)\n\n\t\t# Actually start the actor to run the MQTT client as a thread\n\t\tself.actor = BackgroundWorkerPool.newActor(self._mqttActor, name='MQTTClient').start()",
"async def async_connect(self) -> None:\n # pylint: disable-next=import-outside-toplevel\n import paho.mqtt.client as mqtt\n\n result: int | None = None\n try:\n result = await self.hass.async_add_executor_job(\n self._mqttc.connect,\n self.conf[CONF_BROKER],\n self.conf.get(CONF_PORT, DEFAULT_PORT),\n self.conf.get(CONF_KEEPALIVE, DEFAULT_KEEPALIVE),\n )\n except OSError as err:\n _LOGGER.error(\"Failed to connect to MQTT server due to exception: %s\", err)\n\n if result is not None and result != 0:\n _LOGGER.error(\n \"Failed to connect to MQTT server: %s\", mqtt.error_string(result)\n )\n\n self._mqttc.loop_start()",
"def test_reactor_stop_unblocks_EventualResult(self):\n program = \"\"\"\\\nimport os, threading, signal, time, sys\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet import reactor\n\nimport crochet\ncrochet.setup()\n\[email protected]_in_reactor\ndef run():\n reactor.callLater(0.1, reactor.stop)\n return Deferred()\n\ner = run()\ntry:\n er.wait(timeout=10)\nexcept crochet.ReactorStopped:\n sys.exit(23)\n\"\"\"\n process = subprocess.Popen([sys.executable, \"-c\", program],\n cwd=crochet_directory)\n self.assertEqual(process.wait(), 23)",
"def test_reactor_stop_unblocks(self):\n program = \"\"\"\\\nimport os, threading, signal, time, sys\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet import reactor\n\nimport crochet\ncrochet.setup()\n\n@crochet.%s\ndef run():\n reactor.callLater(0.1, reactor.stop)\n return Deferred()\n\ntry:\n er = run()\nexcept crochet.ReactorStopped:\n sys.exit(23)\n\"\"\" % (self.DECORATOR_CALL, )\n process = subprocess.Popen([sys.executable, \"-c\", program],\n cwd=crochet_directory)\n self.assertEqual(process.wait(), 23)",
"def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()",
"def run(self):\n\n if self._quit_time is not None:\n self.schedule_quit_time(self._quit_time)\n\n # Run the broker until it, and thus the whole scale client, have a stop event fully propagated\n self.__broker.run()",
"def test_connect_success():\n\n t = Thread(target=setup_socket)\n t.start()\n\n data_sender = DataSender('127.0.0.1', 12345)\n server_response = data_sender.notify('test')\n\n assert server_response == 'ok'\n\n data_sender.close()\n t.join()",
"def start(self):\n if not self._host:\n print(\"No host selected, starting local instance.\")\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()\n else:\n reactor.callLater(0, self._connect) #@UndefinedVariable\n if not reactor.running: #@UndefinedVariable\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n\n self._database = self.get_service_interface(\"database\")\n self._stock_exchange = self.get_service_interface(\"stock_exchange\")",
"def acqstart(self):\n return 0",
"async def twisted_sleep(delay: float, twisted_reactor: \"SygnalReactor\") -> None:\n deferred: Deferred[None] = Deferred()\n twisted_reactor.callLater(delay, deferred.callback, None)\n await deferred",
"async def server_loop(host=None, port=23, evt=None, protocol_factory=TelnetServer, shell=None, log=None, **kwds):\n \"\"\"\n :param float connect_maxwait: If the remote end is not compliant, or\n otherwise confused by our demands, the shell continues anyway after the\n greater of this value has elapsed. A client that is not answering\n option negotiation will delay the start of the shell by this amount.\n \"\"\"\n\n protocol_factory = protocol_factory or TelnetServer\n l = await anyio.create_tcp_listener(local_host=host, local_port=port)\n log = log or logging.getLogger(__name__)\n if shell is None:\n async def shell(_s):\n while True:\n await anyio.sleep(99999)\n async def serve(s):\n async with protocol_factory(s, log=log, **kwds) as stream:\n await shell(stream)\n\n log.info('Server ready on {0}:{1}'.format(host, port))\n if evt is not None:\n evt.set()\n await l.serve(serve)",
"def run(self, timeout=None):\n self.factory.manager.run()\n\n wait_connect = threading.Event()\n self.factory.on_ready(lambda _: wait_connect.set())\n\n if not wait_connect.wait(timeout):\n raise Exception('Failed to connect to ROS')",
"def start_tor_with_timer(reactor, config, control_port, tor_binary, data_dir,\n bridges, timeout):\n error_msg = \"Bootstrapping has exceeded the timeout limit...\"\n with_timeout = deferred_timeout(timeout, e=error_msg)(start_tor)\n try:\n setup = yield with_timeout(reactor, config, control_port, tor_binary,\n data_dir, process_cb=setup_done,\n process_eb=setup_fail)\n except TimeoutError, te:\n log.err(te)\n defer.returnValue(None)\n #except Exception, e:\n # log.err(e)\n # defer.returnValue(None)\n else:\n state = yield remove_public_relays(setup, bridges)\n defer.returnValue(state)",
"def run(self):\n\n if reactor.running:\n return\n\n self._thread = threading.Thread(target=reactor.run, args=(False,))\n self._thread.daemon = True\n self._thread.start()"
] | [
"0.6220351",
"0.62159324",
"0.61877704",
"0.6160593",
"0.61132985",
"0.59028834",
"0.58682054",
"0.5843688",
"0.5822215",
"0.5798466",
"0.57124496",
"0.57084095",
"0.566796",
"0.56646776",
"0.56603914",
"0.5605297",
"0.55968124",
"0.55797726",
"0.55603266",
"0.55498093",
"0.5531244",
"0.5510697",
"0.551031",
"0.5508833",
"0.5504746",
"0.54844993",
"0.54838544",
"0.5482441",
"0.546809",
"0.54662997"
] | 0.6690256 | 0 |
Calculates the solar noon (the time when the sun is at its highest point.) | def solar_noon(self, date=None, local=True):
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
noon = self.astral.solar_noon_utc(date, self.longitude)
if local:
return noon.astimezone(self.tz)
else:
return noon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solar_noon_local(LonDegE):\n return 12.",
"def solar_noon_utc(self, date, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n newt = self._jday_to_jcentury(julianday + 0.5 + longitude / 360.0)\n\n eqtime = self._eq_of_time(newt)\n timeUTC = 720.0 + (longitude * 4.0) - eqtime\n\n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n noon = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return noon",
"def _solar_time(date, lon, lat):\n # IDL is computing time_t as hours and fractional minutes\n time_t = ee.Date(date).get('hour').add(\n ee.Date(date).get('minute').divide(60))\n\n # This will return the hour floating point value\n # time_t = ee.Date(date).get('hour').add(ee.Date(date).getFraction('hour'))\n\n # CGM - DOY and hour could be images in order to make these expressions\n julian = _to_jd(date)\n\n # Sunrise time\n julian_ = time_t.divide(24.0).add(julian)\n j_cen = julian_.add(0.5 - 2451545.0).divide(36525.0)\n # CGM - Does the mod happen before or after the multiply\n lon_sun = j_cen.multiply(0.0003032).add(36000.76983) \\\n .multiply(j_cen).mod(360.0).add(280.46646).subtract(360)\n an_sun = j_cen.multiply(-0.0001537).add(35999.05029) \\\n .multiply(j_cen).add(357.52911)\n ecc = j_cen.multiply(0.0000001267).add(0.000042037) \\\n .multiply(j_cen).multiply(-1).add(0.016708634)\n ob_ecl = j_cen.multiply(-0.001813).add(0.00059) \\\n .multiply(j_cen).add(46.815) \\\n .multiply(j_cen).multiply(-1).add(21.448) \\\n .divide(60.0).add(26).divide(60).add(23)\n ob_corr = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).cos() \\\n .multiply(0.00256).add(ob_ecl)\n var_y = ob_corr.divide(2.0).multiply(deg2rad).tan().multiply(\n ob_corr.divide(2.0).multiply(deg2rad).tan())\n eq_t = (\n lon_sun.multiply(2.0).multiply(deg2rad).sin().multiply(var_y)\n .subtract(an_sun.multiply(deg2rad).sin().multiply(ecc).multiply(2.0))\n .add(an_sun.multiply(deg2rad).sin()\n .multiply(lon_sun.multiply(2.0).multiply(deg2rad).cos())\n .multiply(var_y).multiply(ecc).multiply(4.0))\n .subtract(lon_sun.multiply(4.0).multiply(deg2rad).sin()\n .multiply(var_y).multiply(var_y).multiply(0.5))\n .subtract(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(ecc).multiply(ecc).multiply(1.25))\n .multiply(4.0).multiply(rad2deg))\n sun_eq = (\n an_sun.multiply(deg2rad).sin().multiply(\n j_cen.multiply(0.000014).add(0.004817)\n .multiply(j_cen).multiply(-1).add(1.914602))\n .add(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(j_cen.multiply(-0.000101).add(0.019993)))\n .add(an_sun.multiply(3.0).multiply(deg2rad).sin().multiply(0.000289)))\n sun_true = sun_eq.add(lon_sun)\n sun_app = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).sin() \\\n .multiply(-0.00478).subtract(0.00569).add(sun_true)\n\n # CGM - Intentionally not converting back to degrees\n d = ob_corr.multiply(deg2rad).sin() \\\n .multiply(sun_app.multiply(deg2rad).sin()) \\\n .asin()\n\n # CGM - Functions below are lat/lon dependent and can be written as\n # ee.Image expressions\n # CGM - d is still in radians, not converting\n ha_t = lat.expression(\n 'acos((cos(90.833 * pi / 180) / (cos(lat) * cos(d))) - tan(lat) * tan(d))'\n ' * (180 / pi)',\n {'lat': lat, 'd': d, 'pi': math.pi})\n\n # print('\\n{:10s} {:.12f}'.format('julian_', julian_.getInfo()))\n # print('{:10s} {:.12f}'.format('time_t', time_t.getInfo()))\n # print('{:10s} {:.12f}'.format('j_cen', j_cen.getInfo()))\n # print('{:10s} {:.12f}'.format('lon_sun', lon_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('an_sun', an_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('ecc', ecc.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_ecl', ob_ecl.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_corr', ob_corr.getInfo()))\n # print('{:10s} {:.12f}'.format('var_y', var_y.getInfo()))\n # print('{:10s} {:.12f}'.format('eq_t', eq_t.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_eq', sun_eq.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_true', sun_true.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_app', sun_app.getInfo()))\n # print('{:10s} {:.12f}'.format('d', d.getInfo()))\n\n return d, eq_t, ha_t",
"def solar_time_index(self):\n return self.data.solar_time_index",
"def omega_sun_snodgrass90(lat):\n return differential_rotation(lat, 14.71, -2.39, -1.78)",
"def equation_of_time(cls, date):\n offset = cls.sine(cls.mean_position(date, cls.ANOMALISTIC_YEAR))\n equation_sun = (offset * angle(57, 18, 0) * (14/360 - (abs(offset) / 1080)))\n return ((cls.daily_motion(date) / 360) * (equation_sun / 360) * cls.SIDEREAL_YEAR)",
"def get_vsolar(self):\n return self.read_register(4098, 1, 3)",
"def update(self, time):\n\n delta_J2000 = self.time - constant.J2000_DATE\n n_days_J2000 = delta_J2000.days + delta_J2000.seconds/86400\n\n mean_lon_sun = 280.460 + 0.9856474*n_days_J2000\n mean_lon_sun %= 360.0\n mean_lon_sun *= constant.DEG_TO_RAD\n\n mean_anomaly_sun = 357.528 + 0.9856003*n_days_J2000\n mean_anomaly_sun %= 360.0\n mean_anomaly_sun *= constant.DEG_TO_RAD\n\n ecliptic_lon_sun = ( mean_lon_sun/constant.DEG_TO_RAD +\n 1.915*math.sin(mean_anomaly_sun) +\n 0.020*math.sin(2.0*mean_anomaly_sun) )\n ecliptic_lon_sun *= constant.DEG_TO_RAD\n\n dist_earth_to_sun = (1.00014 -\n 0.01671*math.cos(mean_anomaly_sun) -\n 0.00014*math.cos(2.0*mean_anomaly_sun) )\n dist_earth_to_sun *= constant.AU_TO_KM\n\n obliquity_ecliptic = 23.439 - 0.0000004*n_days_J2000\n obliquity_ecliptic *= constant.DEG_TO_RAD\n\n x_J2000_sun = math.cos(ecliptic_lon_sun)\n y_J2000_sun = math.cos(obliquity_ecliptic)*math.sin(ecliptic_lon_sun)\n z_J2000_sun = math.sin(obliquity_ecliptic)*math.sin(ecliptic_lon_sun)\n\n self.direction = vt.Vector([x_J2000_sun, y_J2000_sun, z_J2000_sun])\n self.distance = dist_earth_to_sun\n self.time = time",
"def solar_noon_utc(LonDegE):\n _timezone = array([-180, -172.5, -157.5, -142.5, -127.5, -112.5, -97.5, -82.5, -67.5, -52.5, -37.5, -22.5, -7.5, 7.5, 22.5, 37.5, 52.5, 67.5, 82.5, 97.5, 112.5, 127.5, 142.5, 157.5, 172.5, 180]).repeat(2, 0)[1:-1].reshape(-1, 2)\n for i, (low, high) in enumerate(_timezone):\n if LonDegE >= low:\n if LonDegE <= high:\n return 12 -(-12 + i)",
"def time_NEURON():\n recorded_time = h.Vector()\n recorded_time.record(h._ref_t)\n return recorded_time",
"def moon_phase(\n datetime_index,\n epsilon=1e-6,\n epoch=2444237.905,\n ecliptic_longitude_epoch=278.833540,\n ecliptic_longitude_perigee=282.596403,\n eccentricity=0.016718,\n moon_mean_longitude_epoch=64.975464,\n moon_mean_perigee_epoch=349.383063,\n):\n # set time to Noon if not otherwise given, as midnight is confusingly close to previous day\n if np.sum(datetime_index.hour) == 0:\n datetime_index = datetime_index + pd.Timedelta(hours=12)\n days = datetime_index.to_julian_date() - epoch\n\n # Mean anomaly of the Sun\n a = (360 / 365.2422) * days\n N = a - 360.0 * np.floor(a / 360.0)\n N = N + ecliptic_longitude_epoch - ecliptic_longitude_perigee\n # Convert from perigee coordinates to epoch 1980\n M = a - 360.0 * np.floor(N / 360.0)\n\n m = torad(M)\n e = m.copy()\n while 1:\n delta = e - eccentricity * np.sin(e) - m\n e = e - delta / (1.0 - eccentricity * np.cos(e))\n if abs(delta).max() <= epsilon:\n break\n\n Ec = sqrt((1 + eccentricity) / (1 - eccentricity)) * np.tan(e / 2.0)\n # True anomaly\n Ec = 2 * todeg(np.arctan(Ec))\n # Suns's geometric ecliptic longuitude\n a = Ec + ecliptic_longitude_perigee\n lambda_sun = a - 360.0 * np.floor(a / 360.0)\n\n # Calculation of the Moon's position\n\n # Moon's mean longitude\n a = 13.1763966 * days + moon_mean_longitude_epoch\n moon_longitude = a - 360.0 * np.floor(a / 360.0)\n\n # Moon's mean anomaly\n a = moon_longitude - 0.1114041 * days - moon_mean_perigee_epoch\n MM = a - 360.0 * np.floor(a / 360.0)\n\n # Moon's ascending node mean longitude\n # MN = fixangle(c.node_mean_longitude_epoch - 0.0529539 * day)\n\n evection = 1.2739 * np.sin(torad(2 * (moon_longitude - lambda_sun) - MM))\n\n # Annual equation\n annual_eq = 0.1858 * np.sin(torad(M))\n\n # Correction term\n A3 = 0.37 * np.sin(torad(M))\n\n MmP = MM + evection - annual_eq - A3\n\n # Correction for the equation of the centre\n mEc = 6.2886 * np.sin(torad(MmP))\n\n # Another correction term\n A4 = 0.214 * np.sin(torad(2 * MmP))\n\n # Corrected longitude\n lP = moon_longitude + evection + mEc - annual_eq + A4\n\n # Variation\n variation = 0.6583 * np.sin(torad(2 * (lP - lambda_sun)))\n\n # True longitude\n lPP = lP + variation\n\n # Calculation of the phase of the Moon\n\n # Age of the Moon, in degrees\n moon_age = lPP - lambda_sun\n\n # Phase of the Moon\n moon_phase = (1 - np.cos(torad(moon_age))) / 2.0\n return moon_phase\n # return pd.Series(moon_phase, index=datetime_index)",
"def lunarperigee(time):\n dtor = np.pi / 180\n t1 = 1 + time\n t2 = t1 * t1\n t3 = t2 * t1\n perigee = (\n 334.329653 * dtor\n + 4069.0340329575 * dtor * t1\n - 0.010325 * dtor * t2\n - 1.2e-5 * dtor * t3\n )\n return perigee",
"def zodiac(cls, tee):\n return quotient(float(cls.solar_longitude(tee)), 30) + 1",
"def alt_sunrise(cls, date):\n rise = cls.UJJAIN.dawn(date, angle(0, 47, 0))\n return 1/24 * 1/60 * iround(rise * 24 * 60)",
"def hindu_lunar_station(date):\n critical = HinduDate.sunrise(date)\n return quotient(HinduLunarDate.longitude(critical), angle(0, 800, 0)) + 1",
"def sunset(cls, date):\n return (date + Clock.days_from_hours(18) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n (((1577917828/1582237828) / 360) *\n (- cls.ascensional_difference(date, cls.LOCATION) +\n (3/4 * cls.solar_sidereal_difference(date)))))",
"def SunPosition(time):\n # Correct for light travel time from the Sun.\n # Otherwise season calculations (equinox, solstice) will all be early by about 8 minutes!\n adjusted_time = time.AddDays(-1.0 / C_AUDAY)\n earth2000 = _CalcEarth(adjusted_time)\n sun2000 = [-earth2000.x, -earth2000.y, -earth2000.z]\n\n # Convert to equatorial Cartesian coordinates of date.\n stemp = _precession(sun2000, adjusted_time, _PrecessDir.From2000)\n sun_ofdate = _nutation(stemp, adjusted_time, _PrecessDir.From2000)\n\n # Convert equatorial coordinates to ecliptic coordinates.\n true_obliq = math.radians(adjusted_time._etilt().tobl)\n return _RotateEquatorialToEcliptic(sun_ofdate, true_obliq, time)",
"def calculate_sun_earth_distance(doy):\n #The eccentricity of the Earth's orbit is currently about 0.0167 (wiki)\n ec=0.0167\n d=1+ec*np.sin(2*np.pi*(doy-93.5)/365)\n return d",
"def solar_model():\n \n latitude, longitude, timezone, elevation = location_input()\n year, time = time_input()\n\n lat_r = latitude/180*np.pi\n lon_r = longitude/180*np.pi \n n = 0\n for i in range(1900,year):\n if i%4 == 0:\n n += 366\n else:\n n+=365\n JulD = n + time + 2415018.5 - (timezone)/24\n LT = time - int(time)\n JC = (JulD - 2451545) / 36525\n x = 46.815 + JC * (0.00059 - JC * 0.001813)\n M_OE = 23 + (26 + (21.448 - JC * x) / 60) / 60\n EEO = 0.016708634 - JC * (0.000042037 + 0.0000001267 * JC)\n GMAS = 357.52911 + JC * (35999.05029 - 0.0001537 * JC)\n GMAS_r = m.radians(GMAS)\n GMLS = (280.46646 + JC * (36000.76983 + JC * 0.0003032))%360\n GMLS_r = m.radians(GMLS)\n Obliq_C = M_OE + 0.00256 * np.cos((125.04 - 1934.136 * JC) / 180 * np.pi)\n Obliq_C_r = m.radians(Obliq_C)\n SEC = np.sin(GMAS_r) * (1.914602 - JC * (0.004817 + 0.000014 * JC)) + np.sin(2 * GMAS_r) * (0.019993 - 0.000101 * JC) + np.sin(3 * GMAS_r) * 0.000289\n STL = GMLS + SEC\n SAL = STL - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136 * JC) / 180 * np.pi)\n SAL_r = m.radians(SAL)\n sin_Delta = np.sin(Obliq_C_r) * np.sin(SAL_r)\n Delta_r = np.arcsin(sin_Delta) #in radians \n Var_y = np.tan((Obliq_C / 2) / 180 * np.pi) * np.tan((Obliq_C / 2) / 180 * np.pi)\n EOT_prime = Var_y * np.sin(2 * GMLS_r) - 2 * EEO * np.sin(GMAS_r) + 4 * EEO * Var_y * np.sin(GMAS_r) * np.cos(2 * GMLS_r) - 0.5 * Var_y * Var_y * np.sin(4 * GMLS_r) - 1.25 * EEO * EEO * np.sin(2 * GMAS_r)\n EOT = 4 * EOT_prime / np.pi * 180 \n TST = (LT * 1440 + EOT + 4 * longitude - 60 * timezone)%1440\n if TST / 4 < 0:\n Omega = TST/4+180\n else:\n Omega = TST/4 - 180 \n Omega_r = m.radians(Omega)\n \n cos_Zenith = np.sin(lat_r) * np.sin(Delta_r) + np.cos(lat_r) * np.cos(Delta_r) * np.cos(Omega_r)\n Zenith_r = np.arccos(cos_Zenith) #in radians\n Aprime_r = np.arccos((np.sin(lat_r) * np.cos(Zenith_r) - np.sin(Delta_r)) / (np.cos(lat_r) * np.sin(Zenith_r)))\n Aprime = Aprime_r / np.pi * 180\n if Omega > 0:\n Azimuth = (Aprime + 180) % 360 #in degrees\n else:\n Azimuth = (540 - Aprime) % 360 #in degrees \n Azimuth_r = Azimuth / 180 * np.pi\n Elev_angle = (np.pi)/2 - Zenith_r\n\n \n # calculate incidence angle\n # Beta is equal to angle of tilted surface to horizontal (in radians)\n Beta = 45 # in degrees\n Beta_r = m.radians(Beta)\n \n cos_incidence = np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r) * np.sin(Azimuth_r) * np.sin(Omega_r) \n incidence_ang_r = np.arccos(cos_incidence)\n \n return Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle",
"def sunlongitude(time):\n B0 = 36000.7695\n C0 = 280.4659\n # fmt: off\n A = np.array([19147e-4, 200e-4, 48e-4, 20e-4, 18e-4, 18e-4, \\\n 15e-4, 13e-4, 7e-4, 7e-4, 7e-4, 6e-4, \\\n 5e-4, 5e-4, 4e-4, 4e-4])\n B = np.array([35999.050, 71998.1, 1934, 32964, 19, \\\n 445267, 45038, 22519, 65929, 3035, \\\n 9038, 33718, 155, 2281, 29930, \\\n 31557])\n C = np.array([267.520, 265.1, 145, 158, 159, 208, \\\n 254., 352, 45, 110, 64, 316, \\\n 118., 221, 48, 161])\n # fmt: on\n RAD = 0.0174532925199433\n A[0] = 1.9147 - 0.0048 * time\n tempb = (B * time + C) * RAD\n amp = A * np.cos(tempb)\n sunlon = np.sum(amp)\n sunlon = (sunlon + B0 * time + C0) * RAD\n return sunlon",
"def sunrise(cls, date):\n return (date + Clock.days_from_hours(6) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n ((1577917828/1582237828 / 360) *\n (cls.ascensional_difference(date, cls.LOCATION) +\n (1/4 * cls.solar_sidereal_difference(date)))))",
"def nancay():\n return coord.EarthLocation(lat=47.376511*u.deg, lon=2.1924002*u.deg)",
"def apparent_magnitude(sat, topos, earth, sun, time):\n\n position = earth + sat\n observer = earth + topos\n barycentric_o = position.at(time).observe(observer)\n barycentric_s = position.at(time).observe(sun)\n phase_angle = barycentric_o.separation_from(barycentric_s).radians\n _, _, distance = barycentric_o.radec()\n term_1 = -1.3 # standard satellite intrinsic magnitude\n term_2 = +5.0 * np.log10(distance.km / 1000.)\n arg = np.sin(phase_angle) + (np.pi - phase_angle) * np.cos(phase_angle)\n term_3 = -2.5 * np.log10(arg)\n return term_1 + term_2 + term_3",
"def sidereal_zodiac(tee):\n return quotient(int(sidereal_solar_longitude(tee)), 30) + 1",
"def sidereal_solar_longitude(tee):\n return mod(Solar.solar_longitude(tee) - Astro.precession(tee) + SIDEREAL_START, 360)",
"def earth_tide(theta, lamda, gtime):\n\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist # bpos common block\n global h, k, l # love common block\n h = [0.6114, 0.2891, 0.175]\n k = [0.304, 0.09421, 0.043]\n l = [0.0832, 0.0145, 0.0103]\n\n global azt, azs # azimut common block\n global etmut # tdiff common block\n global moon # sunny common block\n moon = 0\n # hardwire these - you can only send it ONE droptime\n deltat = 1\n NPT = 1\n\n temp_time = num2date(gtime)\n\n YY = temp_time.year\n MO = temp_time.month\n DD = temp_time.day\n HH = temp_time.hour\n MM = temp_time.minute\n SS = temp_time.second\n # Initialize variables\n irl = 1\n iflag = 0\n ntotl = 1\n iget = [0, 0, 0, 0, 0, 0, 0] # ' !!!\n ispc = [0, 0, 0, 0] # ' !!!\n ntw = [1, 0, 0] # ' !!!\n ioptn = 't'\n ielement = 0\n # \tdata statements for input and output unit numbers (on terminal I/O)\n inun = 5\n ioun = 6\n nptpb = 6\n\n yr1 = YY - 1900\n day1 = date2num(datetime(YY, MO, DD))\n # \tfind times in hours from 0 hr, 1 jan 1900\n # matlab:\n ts = (\n SS / 3600\n + MM / 60\n + HH\n + 24 * (day1 - 1)\n + 8760 * yr1\n + 24 * np.fix((yr1 - 1) / 4)\n )\n # python:\n dj = date_to_julian_day(datetime(YY, MO, DD))\n djref = date_to_julian_day(datetime(1899, 12, 31, 0, 0, 0))\n delta_dj = (\n dj - djref\n ) # difference in days from current date (0hr) to 0hr, 1 jan 1900\n delta_djhr = float(delta_dj) * 24.0 + HH - 12.0 + MM / 60.0 + SS / 3600.0\n te = ts + (NPT - 1) * deltat / 3600\n d = deltat / 3600\n # terms=(te-ts)/d + 1\n terms = NPT\n\n # done asking questions - begin execution\n i = 1\n tt = ts\n sph(theta, lamda, 0)\n etmut = 41.184 + yr1 - 70\n # matlab:\n # t = (tt+12 + (etmut/3600))/876600\n t = (delta_djhr + etmut / 3600) / 876600\n # t is ephemeris time in julian centuries from 12 hr 0 jan 1900\n ephem(t)\n\n # calculate normalized gravity tides\n [grav, tilt, strain, gdc] = elastd(ntw)\n\n gravtide = 1.0e5 * grav\n # convert m/s² to mgal: 1m/s² = 100 gal = 100 000 mgal\n\n iflag = 1\n\n iterms = np.fix(terms)\n i = 1\n return gravtide",
"def nyquist(self):\n return 1 / (2 * np.median(np.diff(self.lc.time)))",
"def sundial_time(cls, tee):\n date = Clock.fixed_from_moment(tee)\n time = mod(tee, 1)\n q = ifloor(4 * time)\n if q == 0:\n a = cls.sunset(date - 1)\n b = cls.sunrise(date)\n t = Clock.days_from_hours(-6)\n elif q == 3:\n a = cls.sunset(date)\n b = cls.sunrise(date + 1)\n t = Clock.days_from_hours(18)\n else:\n a = cls.sunrise(date)\n b = cls.sunset(date)\n t = Clock.days_from_hours(6)\n return a + (2 * (b - a) * (time - t))",
"def solar_sidereal_difference(cls, date):\n return cls.daily_motion(date) * cls.rising_sign(date)",
"def power_output_existing_solar_rule(_m, g, y, s, t):\r\n\r\n if y != m.Y.last():\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_6[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * m.C_MC[g, y])\r\n == 0)\r\n\r\n else:\r\n return (- m.sigma_1[g, y, s, t] + m.sigma_6[g, y, s, t]\r\n - m.lamb[self.k(m, g), y, s, t]\r\n + (m.DELTA[y] * m.RHO[y, s] * (1 + (1 / m.INTEREST_RATE)) * m.C_MC[g, y])\r\n == 0)"
] | [
"0.6816998",
"0.66865885",
"0.6304783",
"0.6012745",
"0.60126984",
"0.59854287",
"0.592383",
"0.585429",
"0.5843106",
"0.5810727",
"0.58008647",
"0.5759068",
"0.5738313",
"0.57033736",
"0.570194",
"0.56586534",
"0.5638657",
"0.5631576",
"0.5618385",
"0.5601817",
"0.55713826",
"0.55486596",
"0.552072",
"0.5519554",
"0.55015796",
"0.5499403",
"0.54887146",
"0.5464477",
"0.5449517",
"0.5447395"
] | 0.6865988 | 0 |
Calculates the solar azimuth angle for a specific date/time. | def solar_azimuth(self, dateandtime=None):
if self.astral is None:
self.astral = Astral()
if dateandtime is None:
dateandtime = datetime.datetime.now(tz=self.tz)
return self.astral.solar_azimuth(dateandtime, self.latitude, self.longitude) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solar_azimuth(self, dateandtime, latitude, longitude):\n \n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n zone = -dateandtime.utcoffset().seconds / 3600.0\n utc_datetime = dateandtime.astimezone(pytz.utc)\n timenow = utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600)\n\n JD = self._julianday(dateandtime.day, dateandtime.month, dateandtime.year)\n t = self._jday_to_jcentury(JD + timenow / 24.0)\n theta = self._sun_declination(t)\n Etime = self._eq_of_time(t)\n \n eqtime = Etime\n solarDec = theta # in degrees\n \n solarTimeFix = eqtime - (4.0 * longitude) + (60 * zone)\n trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix\n # in minutes\n \n while trueSolarTime > 1440:\n trueSolarTime = trueSolarTime - 1440\n \n hourangle = trueSolarTime / 4.0 - 180.0\n # Thanks to Louis Schwarzmayr for the next line:\n if hourangle < -180:\n hourangle = hourangle + 360.0\n \n harad = radians(hourangle)\n \n csz = sin(radians(latitude)) * sin(radians(solarDec)) + \\\n cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)\n \n if csz > 1.0:\n csz = 1.0\n elif csz < -1.0:\n csz = -1.0\n \n zenith = degrees(acos(csz))\n \n azDenom = (cos(radians(latitude)) * sin(radians(zenith)))\n \n if (abs(azDenom) > 0.001):\n azRad = ((sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))) / azDenom\n \n if abs(azRad) > 1.0:\n if azRad < 0:\n azRad = -1.0\n else:\n azRad = 1.0\n \n azimuth = 180.0 - degrees(acos(azRad))\n \n if hourangle > 0.0:\n azimuth = -azimuth\n else:\n if latitude > 0.0:\n azimuth = 180.0\n else:\n azimuth = 0#\n \n if azimuth < 0.0:\n azimuth = azimuth + 360.0\n \n return azimuth",
"def getAzimuthAngle(self):\n return self._azimuth",
"def azimuth(vv, v0, v1):\n with np.errstate(divide='ignore', invalid='ignore'):\n n0 = np.cross(v0, v1)\n n0 /= np.dual.norm(n0, axis=-1)[..., np.newaxis]\n nn = np.cross(v0, vv)\n nn /= np.dual.norm(nn, axis=-1)[..., np.newaxis]\n\n azi = np.arccos(np.sum(nn * n0, -1))\n if len(np.shape(azi)) > 0:\n azi[np.dot(vv, n0) < 0] *= -1\n # arbitrary angle where vv is (anti)parallel to v0\n azi[np.isnan(azi)] = 0\n elif np.isnan(azi):\n return 0\n elif np.dot(vv, v0) < 1 and azi > 0:\n azi *= -1\n\n return azi",
"def solar_elevation(self, dateandtime, latitude, longitude):\n \n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n\n zone = -dateandtime.utcoffset().seconds / 3600.0\n utc_datetime = dateandtime.astimezone(pytz.utc)\n timenow = utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600)\n \n JD = self._julianday(dateandtime.day, dateandtime.month, dateandtime.year)\n t = self._jday_to_jcentury(JD + timenow / 24.0)\n theta = self._sun_declination(t)\n Etime = self._eq_of_time(t)\n \n eqtime = Etime\n solarDec = theta # in degrees\n \n solarTimeFix = eqtime - (4.0 * longitude) + (60 * zone)\n trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix\n # in minutes\n \n while trueSolarTime > 1440:\n trueSolarTime = trueSolarTime - 1440\n \n hourangle = trueSolarTime / 4.0 - 180.0\n # Thanks to Louis Schwarzmayr for the next line:\n if hourangle < -180:\n hourangle = hourangle + 360.0\n \n harad = radians(hourangle)\n \n csz = sin(radians(latitude)) * sin(radians(solarDec)) + \\\n cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)\n \n if csz > 1.0:\n csz = 1.0\n elif csz < -1.0:\n csz = -1.0\n \n zenith = degrees(acos(csz))\n \n azDenom = (cos(radians(latitude)) * sin(radians(zenith)))\n \n if (abs(azDenom) > 0.001):\n azRad = ((sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))) / azDenom\n \n if abs(azRad) > 1.0:\n if azRad < 0:\n azRad = -1.0\n else:\n azRad = 1.0\n \n azimuth = 180.0 - degrees(acos(azRad))\n \n if hourangle > 0.0:\n azimuth = -azimuth\n else:\n if latitude > 0.0:\n azimuth = 180.0\n else:\n azimuth = 0\n \n if azimuth < 0.0:\n azimuth = azimuth + 360.0\n \n exoatmElevation = 90.0 - zenith\n\n if exoatmElevation > 85.0:\n refractionCorrection = 0.0\n else:\n te = tan(radians(exoatmElevation))\n if exoatmElevation > 5.0:\n refractionCorrection = 58.1 / te - 0.07 / (te * te * te) + 0.000086 / (te * te * te * te * te)\n elif exoatmElevation > -0.575:\n step1 = (-12.79 + exoatmElevation * 0.711)\n step2 = (103.4 + exoatmElevation * (step1))\n step3 = (-518.2 + exoatmElevation * (step2))\n refractionCorrection = 1735.0 + exoatmElevation * (step3)\n else:\n refractionCorrection = -20.774 / te\n \n refractionCorrection = refractionCorrection / 3600.0\n \n solarzen = zenith - refractionCorrection\n \n solarelevation = 90.0 - solarzen\n \n return solarelevation",
"def fun_azimuth(self):\n\n energy_kev = self.energy_kev.get()\n hkl = self.hkl_magnetic.get()\n hkl = hkl.replace(',', ' ') # remove commas\n hkl = hkl.replace('(', '').replace(')', '') # remove brackets\n hkl = hkl.replace('[', '').replace(']', '') # remove brackets\n hkl = np.fromstring(hkl, sep=' ')\n\n azi = self.azim_zero.get()\n azi = azi.replace(',', ' ') # remove commas\n azi = azi.replace('(', '').replace(')', '') # remove brackets\n azi = azi.replace('[', '').replace(']', '') # remove brackets\n azi = np.fromstring(azi, sep=' ')\n\n pol = self.polval.get()\n if pol == u'\\u03c3-\\u03c3':\n pol = 's-s'\n elif pol == u'\\u03c3-\\u03c0':\n pol = 's-p'\n elif pol == u'\\u03c0-\\u03c3':\n pol = 'p-s'\n else:\n pol = 'p-p'\n\n F0 = self.resF0.get()\n F1 = self.resF1.get()\n F2 = self.resF2.get()\n\n isres = self.isres.get()\n if isres:\n # Resonant scattering\n self.xtl.Plot.simulate_azimuth_resonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol,\n F0=F0, F1=F1, F2=F2)\n plt.show()\n else:\n # Non-Resonant scattering\n self.xtl.Plot.simulate_azimuth_nonresonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol)\n plt.show()",
"def azimuth(self, other, projected=True):\n x0, y0 = self.x, self.y\n if self.crs != other.crs:\n x1, y1 = other.get_vertex(self.crs)[:2]\n else:\n x1, y1 = other.x, other.y\n\n if (x0, y0) == (x1, y1):\n az = np.nan\n elif projected and not isinstance(self.crs, GeographicalCRS):\n az = 90.0 - math.atan2(y1-y0, x1-x0)*180.0/math.pi\n az = (az+180) % 360 - 180\n else:\n lon0, lat0 = self.crs.project(x0, y0, inverse=True)\n lon1, lat1 = self.crs.project(x1, y1, inverse=True)\n az, _, _ = self.crs.inverse(lon0, lat0, lon1, lat1)\n return az",
"def azimuth(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_azi = np.array([vec[0], vec[1], 0])\n vec_n = np.array([0, 1, 0])\n # update by Santosh\n # angle2vecs gives the smallest angle between the vectors\n # so for a west wall angle2vecs will give 90\n # the following 'if' statement will make sure 270 is returned\n x_vector = vec_azi[0]\n if x_vector < 0:\n return 360 - angle2vecs(vec_azi, vec_n)\n else:\n return angle2vecs(vec_azi, vec_n)",
"def azimuth(self, right: GeoSpatialValue) -> ir.FloatingValue:\n return ops.GeoAzimuth(self, right).to_expr()",
"def horiz_angle(time, data):\n\n # TODO What should 0deg be? Set it to inline w/ target? facing target?\n\n # direction of the sun. measured in degrees counted clockwise from north.\n azimuth = data[time]['azimuth']\n\n h_angle = (azimuth / 2 - 90)\n\n # returns answer between -180 and 180 degrees\n return round(((h_angle + 180) % 360) - 180, 4)",
"def set_azimuth(self):\n self.azimuth = self.Calculations.convert_to_azimuth( self.declination, self.right_ascension, self.Latitude, self.LHA)\n if self.azimuth < 0:\n self.azimuth = self.azimuth + 360.0\n return self.azimuth\n else:\n pass\n return self.azimuth\n print('azimuth set to', self.azimuth)",
"def get_azimuth(self, degrees=True):\n if degrees:\n return math.degrees(self.current_location.az)\n else:\n return self.current_location.az",
"def IAngle(a, b, t):\n \n # http://www.engineersedge.com/material_science/moment-inertia-gyration-7.htm\n d = b - t \n y = b - (t*(2*d + a) + d**2)/(2*(d+a))\n I = 1/3 * (t*y**3 + a*(b-y)**3 - (a-t)*(b-y-t)**3)\n return I",
"def calculate_orbiting_angle(orbiting_center, raft):\n\n # note the negative sign before the first component, the y component\n # it is to make the orbiting angle in a right-handed coordiante.\n angle = np.arctan2(-(raft[1] - orbiting_center[1]), (raft[0] - orbiting_center[0])) * 180 / np.pi\n\n return angle",
"def calc_angle_of_incidence(g, lat, ha, tilt, teta_z):\n # surface normal vector\n n_E = sin(tilt)*sin(teta_z)\n n_N = sin(tilt)*cos(teta_z)\n n_Z = cos(tilt)\n # solar vector\n s_E = -cos(g)*sin(ha)\n s_N = sin(g)*cos(lat) - cos(g)*sin(lat)*cos(ha)\n s_Z = cos(g)*cos(lat)*cos(ha) + sin(g)*sin(lat)\n\n # angle of incidence\n teta_B = acos(n_E*s_E + n_N*s_N + n_Z*s_Z)\n return teta_B",
"def azimuth(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[0];",
"def rotated_equatorial_hour_angle(hour, location):\n equatorial_angle = equatorial_hour_angle(hour, location)\n equatorial_angle_from_solar_noon = equatorial_angle - np.pi\n # Angle currently is angle referenced from solar noon, positive (pm) towards the east.\n # Change to mathematical angle, anticlockwise from 0 in the east.\n return np.pi / 2 - equatorial_angle_from_solar_noon",
"def azimuth_update(self):\n self.current_azimuth = self.azimuth_encoder.get_degrees()\n azimuth_error = self.azimuth - float(self.current_azimuth)\n # print('goal azimuth', self.azimuth, 'current azimuth', self.azimuth_encoder.get_degrees(), 'difference in azimuth', azimuth_error)\n if azimuth_error >0:\n # print('positive azimuth')\n self.azimuth_motor.set_direction(1)\n elif azimuth_error > 0:\n # print('negative azimuth')\n self.azimuth_motor.set_direction(0)\n azimuth_error = abs(azimuth_error)\n self.azimuth_error = azimuth_error\n if azimuth_error >= 0:\n self.azimuth_motor.set_speed(0)\n if azimuth_error >= 35:\n self.azimuth_motor.set_speed(1)\n if azimuth_error >= 40:\n self.azimuth_motor.set_speed(2)\n if azimuth_error >= 80:\n self.azimuth_motor.set_speed(3)\n if azimuth_error >= 160:\n self.azimuth_motor.set_speed(4)\n if azimuth_error >= 280:\n self.azimuth_motor.set_speed(5)\n self.azimuth_error = azimuth_error\n print('debug_azimuth', self.current_azimuth, self.azimuth_error, self.azimuth_motor.speed)\n return self.azimuth_error",
"def get_azimuth(self):\n self.degrees = self.azimuth_encoder.get_degrees()\n self.tele_azimuth = self.Calculations.convert_degrees(self.degrees)\n return self.tele_azimuth",
"def azimuth_speed(self, degrees = True):\n return self.angularSpeed(self.future_location.az, self.old_location.az)",
"def angle(self, angle: int, time: int = 0, /) -> None:",
"def get_azimuth(self, p, az):\n az.value = self._get_azimuth(p, az.value)",
"def setAzimuthAngle(self, angle):\n angle = int(round(angle))\n if angle != self._azimuth:\n self._azimuth = angle\n self._updateLight()\n self.sigAzimuthAngleChanged.emit()",
"def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360",
"def polar_angle(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return atan2(y_span, x_span)",
"def calc_surface_azimuth(xdir, ydir, B):\n B = radians(B)\n teta_z = degrees(asin(xdir / sin(B)))\n # set the surface azimuth with on the sing convention (E,N)=(+,+)\n if xdir < 0:\n if ydir <0:\n surface_azimuth = 180 + teta_z # (xdir,ydir) = (-,-)\n else: surface_azimuth = 360 + teta_z # (xdir,ydir) = (-,+)\n elif ydir < 0:\n surface_azimuth = 180 + teta_z # (xdir,ydir) = (+,-)\n else: surface_azimuth = teta_z # (xdir,ydir) = (+,+)\n return surface_azimuth # degree",
"def platform_auto_calibrate_azimuth_servo(self):\n self._platform_auto_calibrate_check()\n self.platform.auto_calibrate_azimuth_servo()",
"def do_azangle(self):\n angle_1, angle_2 = cbp.potentiometer.main()\n current_angle = angle_2\n #print(current_angle)\n self.azangle = current_angle\n return current_angle",
"def imu_get_azimuth(self):\n return self.imu.get_azimuth()",
"def create_azimuthal_polarization(dim, rotation):\n theta_array = np.zeros((dim, dim))\n\n for i in range(np.size(theta_array, 0)):\n for j in range(np.size(theta_array, 1)):\n x = -dim / 2 + i\n y = -dim / 2 + j\n # perform roation\n th = math.pi*rotation/180.0\n x = np.cos(th)*x - np.sin(th)*y\n y = np.sin(th)*x + np.cos(th)*y\n\n rot = math.atan2(x, y) + math.pi/2\n # factor = (rot % (2*math.pi))\n theta_array[i][j] = (rot % (2 * math.pi))\n return theta_array",
"def leaf_azimuth(size=1, phyllotactic_angle=180, phyllotactic_deviation=15, plant_orientation=0, spiral=False):\n if size == 1:\n return plant_orientation\n if spiral:\n main = numpy.arange(0, size) * phyllotactic_angle\n else:\n it = cycle((0, phyllotactic_angle))\n main = numpy.array([it.next() for i in xrange(size)])\n azim = plant_orientation + main + (numpy.random.random(size) - 0.5) * 2 * phyllotactic_deviation\n azim = azim % 360\n return numpy.where(azim <= 180, azim, azim - 360)"
] | [
"0.78250027",
"0.67446834",
"0.6695796",
"0.64115757",
"0.6395283",
"0.63717794",
"0.63140875",
"0.62267244",
"0.6145953",
"0.6141899",
"0.6060071",
"0.598316",
"0.5980102",
"0.59746766",
"0.5884954",
"0.58290213",
"0.5812057",
"0.58084035",
"0.57966334",
"0.5779464",
"0.5756145",
"0.57466495",
"0.5734621",
"0.5720032",
"0.5689982",
"0.5673885",
"0.5667952",
"0.56595176",
"0.565421",
"0.5652813"
] | 0.7764275 | 1 |
Calculates the solar elevation angle for a specific time. | def solar_elevation(self, dateandtime=None):
if self.astral is None:
self.astral = Astral()
if dateandtime is None:
dateandtime = datetime.datetime.now(tz=self.tz)
return self.astral.solar_elevation(dateandtime, self.latitude, self.longitude) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solar_elevation(self, dateandtime, latitude, longitude):\n \n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n\n zone = -dateandtime.utcoffset().seconds / 3600.0\n utc_datetime = dateandtime.astimezone(pytz.utc)\n timenow = utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600)\n \n JD = self._julianday(dateandtime.day, dateandtime.month, dateandtime.year)\n t = self._jday_to_jcentury(JD + timenow / 24.0)\n theta = self._sun_declination(t)\n Etime = self._eq_of_time(t)\n \n eqtime = Etime\n solarDec = theta # in degrees\n \n solarTimeFix = eqtime - (4.0 * longitude) + (60 * zone)\n trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix\n # in minutes\n \n while trueSolarTime > 1440:\n trueSolarTime = trueSolarTime - 1440\n \n hourangle = trueSolarTime / 4.0 - 180.0\n # Thanks to Louis Schwarzmayr for the next line:\n if hourangle < -180:\n hourangle = hourangle + 360.0\n \n harad = radians(hourangle)\n \n csz = sin(radians(latitude)) * sin(radians(solarDec)) + \\\n cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)\n \n if csz > 1.0:\n csz = 1.0\n elif csz < -1.0:\n csz = -1.0\n \n zenith = degrees(acos(csz))\n \n azDenom = (cos(radians(latitude)) * sin(radians(zenith)))\n \n if (abs(azDenom) > 0.001):\n azRad = ((sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))) / azDenom\n \n if abs(azRad) > 1.0:\n if azRad < 0:\n azRad = -1.0\n else:\n azRad = 1.0\n \n azimuth = 180.0 - degrees(acos(azRad))\n \n if hourangle > 0.0:\n azimuth = -azimuth\n else:\n if latitude > 0.0:\n azimuth = 180.0\n else:\n azimuth = 0\n \n if azimuth < 0.0:\n azimuth = azimuth + 360.0\n \n exoatmElevation = 90.0 - zenith\n\n if exoatmElevation > 85.0:\n refractionCorrection = 0.0\n else:\n te = tan(radians(exoatmElevation))\n if exoatmElevation > 5.0:\n refractionCorrection = 58.1 / te - 0.07 / (te * te * te) + 0.000086 / (te * te * te * te * te)\n elif exoatmElevation > -0.575:\n step1 = (-12.79 + exoatmElevation * 0.711)\n step2 = (103.4 + exoatmElevation * (step1))\n step3 = (-518.2 + exoatmElevation * (step2))\n refractionCorrection = 1735.0 + exoatmElevation * (step3)\n else:\n refractionCorrection = -20.774 / te\n \n refractionCorrection = refractionCorrection / 3600.0\n \n solarzen = zenith - refractionCorrection\n \n solarelevation = 90.0 - solarzen\n \n return solarelevation",
"def solar_azimuth(self, dateandtime=None):\n\n if self.astral is None:\n self.astral = Astral()\n\n if dateandtime is None:\n dateandtime = datetime.datetime.now(tz=self.tz)\n \n return self.astral.solar_azimuth(dateandtime, self.latitude, self.longitude)",
"def solar_azimuth(self, dateandtime, latitude, longitude):\n \n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n zone = -dateandtime.utcoffset().seconds / 3600.0\n utc_datetime = dateandtime.astimezone(pytz.utc)\n timenow = utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600)\n\n JD = self._julianday(dateandtime.day, dateandtime.month, dateandtime.year)\n t = self._jday_to_jcentury(JD + timenow / 24.0)\n theta = self._sun_declination(t)\n Etime = self._eq_of_time(t)\n \n eqtime = Etime\n solarDec = theta # in degrees\n \n solarTimeFix = eqtime - (4.0 * longitude) + (60 * zone)\n trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix\n # in minutes\n \n while trueSolarTime > 1440:\n trueSolarTime = trueSolarTime - 1440\n \n hourangle = trueSolarTime / 4.0 - 180.0\n # Thanks to Louis Schwarzmayr for the next line:\n if hourangle < -180:\n hourangle = hourangle + 360.0\n \n harad = radians(hourangle)\n \n csz = sin(radians(latitude)) * sin(radians(solarDec)) + \\\n cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)\n \n if csz > 1.0:\n csz = 1.0\n elif csz < -1.0:\n csz = -1.0\n \n zenith = degrees(acos(csz))\n \n azDenom = (cos(radians(latitude)) * sin(radians(zenith)))\n \n if (abs(azDenom) > 0.001):\n azRad = ((sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))) / azDenom\n \n if abs(azRad) > 1.0:\n if azRad < 0:\n azRad = -1.0\n else:\n azRad = 1.0\n \n azimuth = 180.0 - degrees(acos(azRad))\n \n if hourangle > 0.0:\n azimuth = -azimuth\n else:\n if latitude > 0.0:\n azimuth = 180.0\n else:\n azimuth = 0#\n \n if azimuth < 0.0:\n azimuth = azimuth + 360.0\n \n return azimuth",
"def era(self):\n # earth rotation angle using Universal Time\n J = self.MJD - 51544.5\n fraction = np.mod(J, self.turn)\n theta = np.mod(0.7790572732640 + 0.00273781191135448*J, self.turn)\n return self.turndeg*np.mod(theta + fraction, self.turn)",
"def AngleFromSun(body, time):\n if body == Body.Earth:\n raise EarthNotAllowedError()\n sv = GeoVector(Body.Sun, time, True)\n bv = GeoVector(body, time, True)\n return AngleBetween(sv, bv)",
"def angle(self, angle: int, time: int = 0, /) -> None:",
"def ayanamsha(tee):\n return Solar.solar_longitude(tee) - sidereal_solar_longitude(tee)",
"def vert_angle(time, data, height, distance):\n\n altitude = float(data[time]['altitude'])\n\n return round((degrees(atan2(height, distance)) - altitude) / 2, 4)",
"def horiz_angle(time, data):\n\n # TODO What should 0deg be? Set it to inline w/ target? facing target?\n\n # direction of the sun. measured in degrees counted clockwise from north.\n azimuth = data[time]['azimuth']\n\n h_angle = (azimuth / 2 - 90)\n\n # returns answer between -180 and 180 degrees\n return round(((h_angle + 180) % 360) - 180, 4)",
"def diffraction_angle_for(self, wavelength: float = 532., theta: float = 0.):\n return np.arcsin(np.sin(-theta / 180. * np.pi)\n - self.interference * wavelength / 1000. / self.grating) * 180 / np.pi + theta",
"def altAz2RADec(azim, elev, jd, lat, lon):\n\n azim = np.radians(azim)\n elev = np.radians(elev)\n lat = np.radians(lat)\n lon = np.radians(lon)\n \n # Calculate hour angle\n ha = np.arctan2(-np.sin(azim), np.tan(elev)*np.cos(lat) - np.cos(azim)*np.sin(lat))\n\n # Calculate Local Sidereal Time\n lst = np.radians(JD2LST(jd, np.degrees(lon))[0])\n \n # Calculate right ascension\n ra = (lst - ha)%(2*np.pi)\n\n # Calculate declination\n dec = np.arcsin(np.sin(lat)*np.sin(elev) + np.cos(lat)*np.cos(elev)*np.cos(azim))\n\n return np.degrees(ra), np.degrees(dec)",
"def solarelevation_function_overcast(latitude_deg, longitude_deg, utc_datetime,\n elevation = elevation_default, temperature_celsius = 25,\n pressure_millibars = 1013.25):\n altitude = solar.GetAltitude(latitude_deg, longitude_deg,utc_datetime, elevation, temperature_celsius,pressure_millibars)\n return ((-0.0067133) + (0.78600 * (math.sin(altitude)))) + (0.22401 * (0.5 * (1 - math.cos(2 * altitude))))",
"def lunarperigee(time):\n dtor = np.pi / 180\n t1 = 1 + time\n t2 = t1 * t1\n t3 = t2 * t1\n perigee = (\n 334.329653 * dtor\n + 4069.0340329575 * dtor * t1\n - 0.010325 * dtor * t2\n - 1.2e-5 * dtor * t3\n )\n return perigee",
"def get_rotationalAngularPosition(self, t): # returns [rad]\n angle = self.theta0 + self.rotationalAngularVelocity * t # angular position [rad]\n return angle",
"def equatorial_hour_angle(hour, location):\n equatorial_angle = (hour - location.timezone) * 2 * np.pi / 24 + (np.deg2rad(location.longitude))\n logging.getLogger(\"hour.angle.equ\").debug(\"For hour %d, equatorial angle %g\" % (hour, np.rad2deg(equatorial_angle)))\n return equatorial_angle",
"def getAltitudeAngle(self):\n return self._altitude",
"def calc_angle_of_incidence(g, lat, ha, tilt, teta_z):\n # surface normal vector\n n_E = sin(tilt)*sin(teta_z)\n n_N = sin(tilt)*cos(teta_z)\n n_Z = cos(tilt)\n # solar vector\n s_E = -cos(g)*sin(ha)\n s_N = sin(g)*cos(lat) - cos(g)*sin(lat)*cos(ha)\n s_Z = cos(g)*cos(lat)*cos(ha) + sin(g)*sin(lat)\n\n # angle of incidence\n teta_B = acos(n_E*s_E + n_N*s_N + n_Z*s_Z)\n return teta_B",
"def get_azimuth(self):\n self.degrees = self.azimuth_encoder.get_degrees()\n self.tele_azimuth = self.Calculations.convert_degrees(self.degrees)\n return self.tele_azimuth",
"def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360",
"def calculate_angles():\n time = request.args.get('time')\n\n result = Helpers.validate_and_parse_input(time)\n if result:\n hour, minute = result\n\n hour_angle = 0.5 * (hour * 60 + minute)\n minute_angle = 6 * minute\n\n angle = abs(hour_angle - minute_angle)\n angle = min(360 - angle, angle)\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, angle)\n\n return Helpers.success(angle)\n else:\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, 'bad_request')\n return Helpers.bad_request(r\"query parameter time should follow regex ^\\d{1,2}:\\d{1,2}$ and value should be \"\n r\"between 00:00 and 23:59\")",
"def get_altaz(ra,dec,jd=None,lat = 37.9183, lon = -122.1067, alt = 304, equinox='J2000'):\n if jd: t = ap.time.Time(jd,format='jd')\n else: t = ap.time.Time(time.time(),format='unix')\n l = ap.coordinates.EarthLocation(lat=lat*u.deg,\n lon=lon*u.deg,height=alt*u.m)\n f = ap.coordinates.AltAz(obstime=t,location=l)\n c = ap.coordinates.SkyCoord(ra, dec, frame='fk5',unit='deg',equinox=equinox)\n altaz = c.transform_to(f)\n return altaz.alt.deg, altaz.az.deg",
"def do_azangle(self):\n angle_1, angle_2 = cbp.potentiometer.main()\n current_angle = angle_2\n #print(current_angle)\n self.azangle = current_angle\n return current_angle",
"def _calc_solar_from_clouds_and_angle(hr, ds_path):\n # Solar radiation [W/m^2] incident on top of atmosphere\n Q_o = 1368.0\n # Cloud model based on Dobson and Smith, table 5\n # SEA -- May 2010 : redid the cloud parametrization based on UBC\n # Solar data (/ocean/shared/SoG/met/solar/) fitting Q to cos_Z\n # (not Q/cos_Z as Kate did). Allen and Wolfe (2013). (0) no\n # clouds, (1) 1/10 cloud fraction (10) 100% clouds. Four sig\n # figs are what comes out of matlab but standard deviations are\n # 40W/m2 for low cloud fraction to 120 W/m2 for 6-9 cloud\n # fraction to 85 W/m2 for completely cloudy.\n cloud_consts = SimpleNamespace(\n A=numpy.array(\n [\n 0.6337,\n 0.6149,\n 0.5861,\n 0.5512,\n 0.5002,\n 0.4649,\n 0.4225,\n 0.3669,\n 0.2468,\n 0.1981,\n 0.0841,\n ]\n ),\n B=numpy.array(\n [\n 0.1959,\n 0.2119,\n 0.2400,\n 0.2859,\n 0.3192,\n 0.3356,\n 0.3339,\n 0.3490,\n 0.4427,\n 0.3116,\n 0.2283,\n ]\n ),\n )\n # Local standard time\n ## WARNING: .to(\"PST\") may be fragile and incorrect for summer-time dates\n lst = hr.to(\"PST\")\n # day_time is in seconds, LST\n day_time = (lst - lst.floor(\"day\")).seconds\n # hour of day as degrees from noon\n hour = (day_time / 3600 - 12) * 15\n # day is year-day\n day = (lst - lst.floor(\"year\")).days\n # solar declination [radians]\n declination = (\n 23.45 * numpy.pi / 180 * numpy.sin((284 + day) / 365.25 * 2 * numpy.pi)\n )\n # Latitude of approximate centre of model domain in radians\n lat = numpy.pi * 50 / 180\n # solar elevation\n elev_sin = numpy.sin(declination) * numpy.sin(lat)\n elev_cos = numpy.cos(declination) * numpy.cos(lat)\n cos_Z = elev_sin + elev_cos * numpy.cos(numpy.pi / 180 * hour)\n # cos of -hour_angle in radians\n hour_angle = numpy.tan(lat) * numpy.tan(declination)\n # assume we are south of the Arctic Circle\n day_length = numpy.arccos(-hour_angle) / 15 * 2 * 180 / numpy.pi\n sunrise = 12 - 0.5 * day_length # hours\n sunset = 12 + 0.5 * day_length # hours\n Qso = Q_o * (1 + 0.033 * numpy.cos(day / 365.25 * 2 * numpy.pi))\n with xarray.open_dataset(ds_path) as ds:\n cf_value = ds.percentcloud * 10\n fcf = numpy.floor(cf_value).astype(int) # integer below cf value\n fcf = xarray.where(fcf == 10, 9, fcf).data\n ccf = fcf + 1 # integer above cf value\n if (sunrise > day_time / 3600) or (day_time / 3600 > sunset):\n # nighttime\n return xarray.zeros_like(ds.percentcloud)\n return (\n Qso\n * (\n cloud_consts.A[fcf] * (ccf - cf_value)\n + cloud_consts.A[ccf] * (cf_value - fcf)\n + (\n cloud_consts.B[fcf] * (ccf - cf_value)\n + cloud_consts.B[ccf] * (cf_value - fcf)\n )\n * cos_Z\n )\n * cos_Z\n )",
"def getEdgeAngle():\n '''\n returns angle a\n a\n ◿\n b c\n '''\n ANGLE_OFFSET = 8 # How far off the angle measurements are in degrees.\n THRESHOLD = 220 # How much light must be reflected to 'notice' the desk.\n angle = 0\n while angle < panTilt.TLT_RANGE:\n angle += 1\n panTilt.tilt(int(angle))\n deskDetected = ir.readWithDelay()\n # print \"Angle:\", angle + ANGLE_OFFSET, \", ir reading:\", deskDetected\n if deskDetected > THRESHOLD or angle == panTilt.TLT_RANGE:\n # print \"-----------------------\"\n break # Break out of looking downwards loop\n panTilt.up() # Look up again\n return 90 - angle - ANGLE_OFFSET",
"def set_azimuth(self):\n self.azimuth = self.Calculations.convert_to_azimuth( self.declination, self.right_ascension, self.Latitude, self.LHA)\n if self.azimuth < 0:\n self.azimuth = self.azimuth + 360.0\n return self.azimuth\n else:\n pass\n return self.azimuth\n print('azimuth set to', self.azimuth)",
"def IAngle(a, b, t):\n \n # http://www.engineersedge.com/material_science/moment-inertia-gyration-7.htm\n d = b - t \n y = b - (t*(2*d + a) + d**2)/(2*(d+a))\n I = 1/3 * (t*y**3 + a*(b-y)**3 - (a-t)*(b-y-t)**3)\n return I",
"def fun_azimuth(self):\n\n energy_kev = self.energy_kev.get()\n hkl = self.hkl_magnetic.get()\n hkl = hkl.replace(',', ' ') # remove commas\n hkl = hkl.replace('(', '').replace(')', '') # remove brackets\n hkl = hkl.replace('[', '').replace(']', '') # remove brackets\n hkl = np.fromstring(hkl, sep=' ')\n\n azi = self.azim_zero.get()\n azi = azi.replace(',', ' ') # remove commas\n azi = azi.replace('(', '').replace(')', '') # remove brackets\n azi = azi.replace('[', '').replace(']', '') # remove brackets\n azi = np.fromstring(azi, sep=' ')\n\n pol = self.polval.get()\n if pol == u'\\u03c3-\\u03c3':\n pol = 's-s'\n elif pol == u'\\u03c3-\\u03c0':\n pol = 's-p'\n elif pol == u'\\u03c0-\\u03c3':\n pol = 'p-s'\n else:\n pol = 'p-p'\n\n F0 = self.resF0.get()\n F1 = self.resF1.get()\n F2 = self.resF2.get()\n\n isres = self.isres.get()\n if isres:\n # Resonant scattering\n self.xtl.Plot.simulate_azimuth_resonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol,\n F0=F0, F1=F1, F2=F2)\n plt.show()\n else:\n # Non-Resonant scattering\n self.xtl.Plot.simulate_azimuth_nonresonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol)\n plt.show()",
"def getAltAz(arr,header,time,location):\n\tsoln = wcs.WCS(header)\n\tcoords = cartesian([arange(arr.shape[1]),arange(arr.shape[0])])\n\tworld = soln.wcs_pix2world(coords,0)\n\tradec = SkyCoord(ra=world[:,0],dec=world[:,1],frame='icrs',unit='deg')\n\taltaz = radec.transform_to(AltAz(obstime=time,location=telescope))\n\treturn altaz.alt.deg,altaz.az.deg,coords[:,0],coords[:,1]",
"def calculate_orbiting_angle(orbiting_center, raft):\n\n # note the negative sign before the first component, the y component\n # it is to make the orbiting angle in a right-handed coordiante.\n angle = np.arctan2(-(raft[1] - orbiting_center[1]), (raft[0] - orbiting_center[0])) * 180 / np.pi\n\n return angle",
"def pointing_dir_earth (self, time):\n\n return self.vect_from_lspe_to_earth (self.pointing_dir_lspe (time),\n time)"
] | [
"0.71530664",
"0.67977107",
"0.67431724",
"0.6220381",
"0.61309683",
"0.6082073",
"0.6003304",
"0.5945914",
"0.5920471",
"0.58973986",
"0.5858497",
"0.58510685",
"0.58294576",
"0.579033",
"0.57864755",
"0.57316357",
"0.5688304",
"0.56844896",
"0.56828004",
"0.56804633",
"0.5671778",
"0.5669164",
"0.56561786",
"0.5654852",
"0.5646939",
"0.56376356",
"0.56368685",
"0.56348133",
"0.56116825",
"0.56085527"
] | 0.70600396 | 1 |
Initialise the city database and set the default depression. | def __init__(self):
self._citydb = CityDB()
self._depression = 6 # Set default depression in degrees | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n self.my_city = City()",
"def initialize():\n sql_db = SQLConnection()\n with SQLCursor(sql_db) as cur:\n cur.execute('SELECT position from govt_info')\n row = cur.fetchone()\n for pos in Government.positions:\n if row is None or len(row) != len(Government.positions):\n cur.execute('INSERT OR IGNORE INTO govt_info (position) VALUES (?);', (pos,))",
"def reset_db(self):\n self._cities_distance.drop()",
"def initCitys(self):\n self.cities = []\n for vertex in self.metaGraph:\n self.cities.append(vertex)",
"def setUp(self):\n\n Cafe.query.delete()\n City.query.delete()\n\n sf = City(**CITY_DATA)\n db.session.add(sf)\n\n cafe = Cafe(**CAFE_DATA)\n db.session.add(cafe)\n\n db.session.commit()\n\n self.cafe = cafe",
"def setUp(self):\n\n Cafe.query.delete()\n City.query.delete()\n\n sf = City(**CITY_DATA)\n db.session.add(sf)\n\n cafe = Cafe(**CAFE_DATA)\n db.session.add(cafe)\n\n db.session.commit()\n\n self.cafe = cafe",
"def init_db() -> None:\n conn = sqlite3.connect('../Utils/map_storage.db')\n cursor = conn.cursor()\n\n with conn:\n station_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n nodes(city TEXT, name TEXT, is_station TEXT, x INT, y INT, zone TEXT)\"\"\"\n\n cursor.execute(station_cmd)\n\n connection_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n connections(city TEXT, name_1 TEXT, name_2 TEXT, color TEXT)\"\"\"\n\n cursor.execute(connection_cmd)",
"def init_database(self):\n # init_database(self.engine)",
"def __init__(self):\n self._zipcode = None\n self._city = None",
"def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()",
"def initialise_bdd(self):\n print(fr.FR[1])\n self.base.create_database(\"sql/p5.sql\")\n print(fr.FR[2])\n self.category_table.save_category()\n print(fr.FR[3])",
"def __init_database(self):\n from admin.database import init_db\n init_db()",
"def init_database(self):\n init_database(self.engine)",
"def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()",
"def setUp(self):\n\n City.query.delete()\n Cafe.query.delete()\n\n sf = City(**CITY_DATA)\n db.session.add(sf)\n\n cafe = Cafe(**CAFE_DATA)\n db.session.add(cafe)\n\n db.session.commit()\n\n self.cafe_id = cafe.id",
"def setUpClass(cls):\n cls.city = City()",
"def set_tour(self, city_list=None):\n self.cities = city_list or \\\n random.sample(range(len(self.x_points)), len(self.y_points))\n self.distance = 0\n self.fitness = 0",
"def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)",
"def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()",
"def setUp(self):\n\n Cafe.query.delete()\n City.query.delete()\n\n sf = City(**CITY_DATA)\n db.session.add(sf)\n\n cafe = Cafe(**CAFE_DATA)\n db.session.add(cafe)\n\n db.session.commit()\n\n self.cafe_id = cafe.id",
"def populate_cities():\n if City.query.filter_by(name=CITIES[0]).first():\n return\n\n for city in CITIES:\n _add_city(city)",
"def init_db():\n db.drop_all()\n db.create_all()\n\n print(\"Initialized Connect 4 Database.\")",
"def __init__(self, city):\r\n self.city = city",
"def initialise(self):\n self.set_up()",
"def init_db():\n\tdb.drop_all()\n\tdb.create_all()\n\n\tprint(\"Initialized Database.\")\n\treturn",
"def city(self, city):\n self._city = city",
"def populate_db(self):\n # Get donors\n log.info(\"Populating donors.\")\n\n self.r.hmset('Thomas', {'donations': '500', 'email': '[email protected]', 'city': 'Athens', 'state': 'GA', 'zip': 30606})\n\n self.r.hmset('Ted', {'donations': '1', 'email': '[email protected]', 'city': 'Memphis', 'state': 'TN', 'zip': 38104})\n\n self.r.hmset(\"Bailey\", {'donations': '1000', 'email': '[email protected]', 'city': 'Washington', 'state': 'DC', 'zip': 12345})",
"def __init__(self):\n\t\tDBHelper.initialize() #initiate dababase helper",
"def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()",
"def setup_database(self):\n self.db.setup_database()"
] | [
"0.6182034",
"0.6132606",
"0.60481834",
"0.60060346",
"0.5950479",
"0.5950479",
"0.59373367",
"0.59160274",
"0.5874279",
"0.58517987",
"0.5830317",
"0.58109874",
"0.5791189",
"0.57841307",
"0.5774677",
"0.57559514",
"0.5754031",
"0.57528317",
"0.5746476",
"0.5727893",
"0.567058",
"0.5652014",
"0.563074",
"0.5621083",
"0.5593378",
"0.5565594",
"0.55648994",
"0.5560749",
"0.55539143",
"0.55507696"
] | 0.77128774 | 0 |
Returns the City instance specified by ``key``. | def __getitem__(self, key):
city = self._citydb[key]
city.astral = self
return city | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_city(self, name: str):\n key = name.lower()\n try:\n return self._cities[key]\n except KeyError:\n city = City(name=name, state=self)\n self._cities[key] = city\n return city",
"def __getitem__(self, key):\n \n key = str(key).lower().encode('utf-8')\n for group in self._groups.values():\n try:\n return group[key]\n except KeyError:\n pass\n\n raise KeyError('Unrecognised city name - %s' % key)",
"def retrieve_city(city_id):\n city = storage.get('City', city_id)\n if city:\n return city.to_dict()\n abort(404)",
"def fetch(cls, key):\n return cls(_key=key, **(cls._dbag[key]))",
"def get_city(city_id):\n city = storage.get(\"City\", city_id)\n if city is None:\n abort(404)\n return jsonify(city.to_json())",
"def city_by_id(city_id):\n\n fetched_obj = storage.get(\"City\", str(city_id))\n\n if fetched_obj is None:\n abort(404)\n\n return jsonify(fetched_obj.to_json())",
"def get_city(city_id):\n city = storage.get(\"City\", city_id)\n if city is None:\n abort(404)\n return jsonify(city.to_dict())",
"def __getitem__(self, key):\n if key.isdigit():\n customer = self.request.db.query(models.Klant).get(key)\n if customer:\n return Customer(self, key, customer=customer)\n raise KeyError",
"def get_city(self, city_id):\n city = self.city_repo.get_by_id(city_id)\n\n resource = self.city_repo.dump(city)\n return dict(city=resource), [], SUCCESS",
"def retrieve_city(city_id):\n obj = models.storage.get(\"City\", city_id)\n if obj is not None:\n return jsonify(obj.to_dict())\n else:\n abort(404)",
"def find(cls, key):\r\n return cls.query().get(key)",
"def get(self, key):\n if self.db is None:\n self._init()\n return self.db[key]",
"def __getitem__(cls, key):\n return cls(cls._nameToValue[key])",
"def get_case(self, key: str):\n case = self.cases.get(key)\n if not hasattr(case, 'case_id'):\n message = \"get_case(): Case key {} does not have a case_id\"\n logmessage(message.format(key))\n else:\n logmessage(\"get_case(): \" + \"Retrieved case {}\".format(str(case)))\n return case",
"def city_by_id(city_id):\n cities_values = storage.all(\"City\").values()\n for obj in cities_values:\n if obj.id == city_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def get(self, key):\n # Initialize key variables\n result = self.cache.get(key)\n\n # Return\n return result",
"def get_city(self, territory_id: str = \"\"):",
"def get_city(self, territory_id: str = \"\"):",
"def city(city_id):\n\n if storage.get(\"City\", city_id) is not None:\n return jsonify(storage.get(\"City\", city_id).to_dict())\n else:\n abort(404)",
"def get(self, key):\n return self._cache[key]",
"def find_city(city, dbsession):\n\n\t# Since we're creating the FK relation based on ID, and hence the casing has no bearing on \n\t# whether the city record associates with the address, I'm upcasing the city to prevent dupes.\n\tcity = str(city)\n\tcity = city.upper()\n\n\tresult = dbsession.query(db.City).filter_by(city_name=city).first()\n\n\tif result is None:\n\t\t# Create a new instance of city\n\t\tcity_object = db.City(city)\n\t\t# I'm adding the city without committing the transaction since it would also\n\t\t# commit the address insert transaction that's still open in routes.py.\n\t\tdbsession.add(city_object)\n\t\treturn city_object\n\telse:\n\t\t# Assign the existing user object to the variable\n\t\treturn result",
"def __getitem__(self, key):\n return type(self)(self.origin, typeof(key))",
"def get(self, id):\n\n session = Session()\n city = session.query(Cities).get(id)\n if city:\n response = dict(data=city.get_as_dict())\n else:\n return \"City with id={} does not exist!\".format(id), HTTP_NOT_FOUND_CODE\n\n return response, HTTP_OK_CODE",
"def show_city(city_id=None):\n city = storage.get(City, city_id)\n\n if city is None:\n abort(404)\n\n return jsonify(city.to_dict())",
"def __getitem__(self, key):\n self._remove_expired()\n\n cache_entry = self._d.get(key, None)\n log.debug(\"__getitem__: {}\".format(cache_entry))\n\n return cache_entry",
"def get(self, key):\n return self.container[key]",
"def get(self, key):\n person = self._data.get(key)\n\n if not person:\n raise NotFoundError(\"{} could not be found\".format(key))\n\n return Person(key, person)",
"def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None",
"def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None",
"def __getitem__(self, key):\n log.info(\"querying post %s, %s:%s\" % (self.blog, self.date, key))\n try:\n post = DBSession.query(Post).filter_by(blog=self.blog, date=self.date, slug=key).one()\n # make location aware\n post.__parent__ = self\n return post\n except NoResultFound:\n raise KeyError"
] | [
"0.6803799",
"0.64028823",
"0.61282915",
"0.6064786",
"0.598396",
"0.5972638",
"0.59683293",
"0.59556264",
"0.5922655",
"0.5832542",
"0.5792213",
"0.5784531",
"0.57582283",
"0.573862",
"0.57019925",
"0.5670712",
"0.56591344",
"0.56591344",
"0.56481266",
"0.56079686",
"0.5563027",
"0.55553216",
"0.5541962",
"0.5518856",
"0.5516858",
"0.55150664",
"0.5490744",
"0.54855865",
"0.54855865",
"0.54367745"
] | 0.769488 | 0 |
Calculate dawn time in the UTC timezone. | def dawn_utc(self, date, latitude, longitude):
julianday = self._julianday(date.day, date.month, date.year)
if latitude > 89.8:
latitude = 89.8
if latitude < -89.8:
latitude = -89.8
t = self._jday_to_jcentury(julianday)
eqtime = self._eq_of_time(t)
solarDec = self._sun_declination(t)
try:
hourangle = self._hour_angle_sunrise(latitude, solarDec)
except:
raise AstralError('Sun remains below horizon on this day, at this location.')
delta = longitude - degrees(hourangle)
timeDiff = 4.0 * delta
timeUTC = 720.0 + timeDiff - eqtime
newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)
eqtime = self._eq_of_time(newt)
solarDec = self._sun_declination(newt)
hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression)
delta = longitude - degrees(hourangle)
timeDiff = 4 * delta
timeUTC = 720 + timeDiff - eqtime
timeUTC = timeUTC/60.0
hour = int(timeUTC)
minute = int((timeUTC - hour) * 60)
second = int((((timeUTC - hour) * 60) - minute) * 60)
if second > 59:
second -= 60
minute += 1
elif second < 0:
second += 60
minute -= 1
if minute > 59:
minute -= 60
hour += 1
elif minute < 0:
minute += 60
hour -= 1
if hour > 23:
hour -= 24
date += datetime.timedelta(days=1)
elif hour < 0:
hour += 24
date -= datetime.timedelta(days=1)
dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)
return dawn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dawn(self, date=None, local=True):\n\n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n dawn = self.astral.dawn_utc(date, self.latitude, self.longitude)\n\n if local:\n return dawn.astimezone(self.tz) \n else:\n return dawn",
"def get_unixtime(self):\n if not self.Complete():\n raise DateTimeError(\"get_unixtime requires complete timepoint\")\n zoffset = self.time.GetZoneOffset()\n if zoffset is None:\n raise DateTimeError(\"get_unixtime requires timezone\")\n elif zoffset == 0:\n zt = self\n else:\n zt = self.ShiftZone(zDirection=0)\n days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay()\n seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds()\n return 86400 * days + seconds",
"def utcTime():\r\n return calendar.timegm(time.gmtime())",
"def dusk_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dusk",
"def _clock_time(self):\n return self._shifted_time % (24*3600)",
"def start_hour_utc(self) -> int:\n return pulumi.get(self, \"start_hour_utc\")",
"def get_chime_time(self):\n actual_time = datetime(year=self.start_time.year, month=self.start_time.month, day=self.start_time.day,\n hour=self.start_time.hour, minute=0, second=0, microsecond=0)\n if self.start_time.minute > 30:\n actual_time = actual_time + timedelta(hours=1)\n return actual_time",
"def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()",
"def latest_synop_time()-> datetime:\n utc = datetime.utcnow()\n\n if utc.hour < 1:\n utc = utc - timedelta(days=1)\n utc = utc.replace(hour=18)\n elif utc.hour < 7:\n utc = utc.replace(hour=0)\n elif utc.hour < 13:\n utc = utc.replace(hour=6)\n elif utc.hour < 19:\n utc = utc.replace(hour=12)\n else:\n utc = utc.replace(hour=18)\n\n utc.replace(minute=0, second=0)\n return utc",
"def _clock_day(self):\n return int(self._shifted_time / 86400)",
"def _get_tz():\n return 'UTC'",
"async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()",
"def uptime(self) -> timedelta:\n return timedelta(seconds=int(time() - self.start_time))",
"def itow2utc(itow: int) -> datetime.time:\r\n\r\n utc = datetime(1980, 1, 6) + timedelta(seconds=(itow / 1000) - (35 - 19))\r\n return utc.time()",
"def timezone():\n \n pass",
"def schedule(self):\n\n crontab = self._crontab\n return datetime.now() + timedelta(\n seconds=math.ceil(\n crontab.next(default_utc=False)\n )\n )",
"def Timer():\n ltime = time.localtime()\n h, m, s = ltime[3:6]\n return h * 3600.0 + m * 60.0 + s",
"def get_time():\n\teastern = timezone('US/Eastern')\n\tnow = datetime.datetime.now(eastern).time()\n\treturn(now)",
"def nowUTC():\n return datetime.datetime.now(pytz.utc)",
"def calculate_time(start_time):\r\n return round(time() - start_time, 2)",
"def getUtcSeconde(self) -> int:\n ...",
"def run_hour(self) -> int:\n return self.timestamp.hour",
"def timecalc():\n print(\"timecalc started. Ain't nobody got TIME for: \\n\")\n if len(sys.argv) == 2:\n print(\"single input argument, assuming this is a UTC epoch timestamp in ms\")\n dt = int(sys.argv[1])\n dt = datetime.datetime.utcfromtimestamp(dt / 1000.0)\n else:\n if \":\" in sys.argv[2]:\n dt = sys.argv[1] + \" \" + sys.argv[2]\n dt = datetime.datetime.strptime(dt, DATETIME_FORMAT)\n else:\n print(\"timecalc requires time in either UTC epoch time or datetime in {}\".format(DATETIME_FORMAT))\n raise ValueError('UTC datetime needs to be {}'.format(DATETIME_FORMAT))\n\n gpstime = utctoweekseconds(dt)\n towsec = gpstime[2] + (gpstime[3] / 1000000.0)\n\n print(\"UTC DATETIME: {} \\nGPS WEEK: {}, TOW: {}\".format(dt, gpstime[0], towsec))",
"def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim",
"def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)",
"def get_epoch_time(utc_datetime=None):\n if not utc_datetime:\n utc_datetime = datetime.datetime.utcnow()\n return math.ceil((utc_datetime - EPOCH_START).total_seconds())",
"def py2_earth_hours_left(start_date=BITE_CREATED_DT):\n\n td = (PY2_DEATH_DT - start_date)\n return round((td.days*24 + td.seconds/3600), 1)",
"def uptime():\n seconds = timedelta(seconds=int(time.time() - start_time))\n d = datetime(1, 1, 1) + seconds\n return(\"%dD:%dH:%dM:%dS\" % (d.day-1, d.hour, d.minute, d.second))",
"def get_time():\n # Use this one for production:\n now_time = pendulum.now(tz=pendulum.timezone(\"America/New_York\"))\n # Use this one for testing and modify as needed:\n # now_time = pendulum.datetime(2019, 7, 21, 20, 00, tz='America/New_York')\n\n return now_time",
"def brasilia_day():\n return (dt.datetime.utcnow() + dt.timedelta(hours=-3)).replace(hour=0, minute=0, second=0, microsecond=0)"
] | [
"0.628965",
"0.59754497",
"0.58763033",
"0.5851417",
"0.5806555",
"0.5735901",
"0.56585526",
"0.56538594",
"0.55857354",
"0.55760634",
"0.5519843",
"0.55160165",
"0.55128175",
"0.55116653",
"0.53910935",
"0.5379949",
"0.536428",
"0.5360612",
"0.53601545",
"0.5338035",
"0.5333986",
"0.5324582",
"0.5288426",
"0.527294",
"0.52505535",
"0.5246069",
"0.52418363",
"0.52414656",
"0.52271163",
"0.5214999"
] | 0.76470286 | 0 |
Calculate sunrise time in the UTC timezone. | def sunrise_utc(self, date, latitude, longitude):
julianday = self._julianday(date.day, date.month, date.year)
t = self._jday_to_jcentury(julianday)
eqtime = self._eq_of_time(t)
solarDec = self._sun_declination(t)
try:
hourangle = self._hour_angle_sunrise(latitude, solarDec)
except:
raise AstralError('Sun remains below horizon on this day, at this location.')
delta = longitude - degrees(hourangle)
timeDiff = 4.0 * delta
timeUTC = 720.0 + timeDiff - eqtime
newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)
eqtime = self._eq_of_time(newt)
solarDec = self._sun_declination(newt)
hourangle = self._hour_angle_sunrise(latitude, solarDec)
delta = longitude - degrees(hourangle)
timeDiff = 4 * delta
timeUTC = 720 + timeDiff - eqtime
timeUTC = timeUTC/60.0
hour = int(timeUTC)
minute = int((timeUTC - hour) * 60)
second = int((((timeUTC - hour) * 60) - minute) * 60)
if second > 59:
second -= 60
minute += 1
elif second < 0:
second += 60
minute -= 1
if minute > 59:
minute -= 60
hour += 1
elif minute < 0:
minute += 60
hour -= 1
if hour > 23:
hour -= 24
date += datetime.timedelta(days=1)
elif hour < 0:
hour += 24
date -= datetime.timedelta(days=1)
sunrise = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)
return sunrise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_sun_rise_set_time(self, sun_time):\n if sun_time:\n return datetime.fromtimestamp(sun_time).strftime(self.time_format)\n return sun_time",
"def alt_sunrise(cls, date):\n rise = cls.UJJAIN.dawn(date, angle(0, 47, 0))\n return 1/24 * 1/60 * iround(rise * 24 * 60)",
"def sunrise(cls, date):\n return (date + Clock.days_from_hours(6) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n ((1577917828/1582237828 / 360) *\n (cls.ascensional_difference(date, cls.LOCATION) +\n (1/4 * cls.solar_sidereal_difference(date)))))",
"def computeSunTime(self, latitude, longitude, startDate, endDate): \n self.sun = sun(lat=latitude, long=longitude)\n dateTime = datetime.datetime.combine(startDate, datetime.time(hour=8))\n while dateTime.date() <= endDate: \n daytimeStart, daytimeEnd = self.computeDaytimeStartEnd(dateTime)\n self.sunriseTimeDict[dateTime.date()] = daytimeStart\n self.sunsetTimeDict[dateTime.date()] = daytimeEnd\n dateTime += self.oneDayDelta",
"def sun_set_rise_times(self, date=None):\n rstimes = (self.sunset(date=date),\n self.evening_twilight_12(date=date),\n self.evening_twilight_18(date=date),\n self.morning_twilight_18(date=date),\n self.morning_twilight_12(date=date),\n self.sunrise(date=date))\n return rstimes",
"async def sunrise(self, aware=False, today=False, days_offset=0) -> dt.datetime:\n return await self.AD.sched.sunrise(aware, today=today, days_offset=days_offset)",
"def computeDaytimeStartEnd(self, date):\n dayStartTime = datetime.datetime.combine(date.date(), datetime.time())\n #compute sunrise time for that date\n (h, m, s) = self.sun.sunrise(when=date)\n time_delta = datetime.timedelta(hours=h, minutes=m, seconds=s)\n sunrise_datetime = dayStartTime + time_delta\n #print(sunrise_datetime) \n #compute sunset time for that date \n (h, m, s) = self.sun.sunset(when=date)\n time_delta = datetime.timedelta(hours=h, minutes=m, seconds=s)\n sunset_datetime = dayStartTime + time_delta\n \n return (sunrise_datetime, sunset_datetime)",
"def sunset_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n sunset = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return sunset",
"def sunrise(self, date=None, local=True):\n\n if self.astral is None:\n self.astral = Astral()\n \n if date is None:\n date = datetime.date.today()\n\n sunrise = self.astral.sunrise_utc(date, self.latitude, self.longitude)\n\n if local:\n return sunrise.astimezone(self.tz) \n else:\n return sunrise",
"def GetSunriseSunset(latitude_deg, longitude_deg, utc_datetime, timezone):\n\n # Day of the year\n day = solar.GetDayOfYear(utc_datetime)\n\n # Solar hour angle\n SHA = ((timezone)* 15.0 - longitude_deg)\n\n # Time adjustment\n TT = (279.134+0.985647*day)*math.pi/180\n\n # Time adjustment in hours\n time_adst = ((5.0323 - 100.976*math.sin(TT)+595.275*math.sin(2*TT)+\n 3.6858*math.sin(3*TT) - 12.47*math.sin(4*TT) - 430.847*math.cos(TT)+\n 12.5024*math.cos(2*TT) + 18.25*math.cos(3*TT))/3600)\n \n # Time of noon\n TON = (12 + (SHA/15.0) - time_adst)\n \n sunn = (math.pi/2-(23.45*math.pi/180)*math.tan(latitude_deg*math.pi/180)*\n math.cos(2*math.pi*day/365.25))*(180/(math.pi*15))\n\n # Sunrise_time in hours\n sunrise_time = (TON - sunn + time_adst)\n \n # Sunset_time in hours\n sunset_time = (TON + sunn - time_adst) \n\n sunrise_time_dt = date_with_decimal_hour(utc_datetime, sunrise_time) \n sunset_time_dt = date_with_decimal_hour(utc_datetime, sunset_time) \n\n return sunrise_time_dt, sunset_time_dt",
"def estimate_sunrise_sunset(self, date):\n\n if self.source_type != 'solar':\n raise ValueError(\"You can only estimate sunrise and sunset for \"\n \"solar sources.\")\n\n date = pd.Timestamp(date)\n historic_data = self.data\n # The range is 14 days ago to the end of yesterday\n start_date = date - datetime.timedelta(days=14)\n end_date = date - datetime.timedelta(hours=1)\n\n # We grab all hours where actual power is greater than 0\n relevant_data = historic_data[start_date:end_date]\n daylight_data = relevant_data[relevant_data['actuals'] > 0]\n\n # We do this to stop a warning from appearing, we know it's a copy\n daylight_data.is_copy = False\n daylight_data['hours'] = daylight_data.index.hour\n\n # Find the min and max hour for each day where we have positive\n # observed power generation.\n sunrises = daylight_data.groupby(daylight_data.index.date).min()['hours']\n sunsets = daylight_data.groupby(daylight_data.index.date).max()['hours']\n\n # We round in order to have an integer value for sunrise and sunset.\n average_sunrise = int(max(round(sunrises.mean()) - 1, 0))\n average_sunset = int(min(round(sunsets.mean()) + 1, 23))\n\n return average_sunrise, average_sunset",
"def sundial_time(cls, tee):\n date = Clock.fixed_from_moment(tee)\n time = mod(tee, 1)\n q = ifloor(4 * time)\n if q == 0:\n a = cls.sunset(date - 1)\n b = cls.sunrise(date)\n t = Clock.days_from_hours(-6)\n elif q == 3:\n a = cls.sunset(date)\n b = cls.sunrise(date + 1)\n t = Clock.days_from_hours(18)\n else:\n a = cls.sunrise(date)\n b = cls.sunset(date)\n t = Clock.days_from_hours(6)\n return a + (2 * (b - a) * (time - t))",
"def sunrise(self, date=None):\n self.site.horizon = self.horizon\n self._set_site_date(date)\n r_date = self.site.next_rising(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date",
"def sun_utc(self, date, latitude, longitude):\n \n dawn = self.dawn_utc(date, latitude, longitude)\n sunrise = self.sunrise_utc(date, latitude, longitude)\n noon = self.solar_noon_utc(date, longitude)\n sunset = self.sunset_utc(date, latitude, longitude)\n dusk = self.dusk_utc(date, latitude, longitude)\n \n return {'dawn': dawn, 'sunrise': sunrise, 'noon': noon, 'sunset': sunset, 'dusk': dusk}",
"def rahukaalam_utc(self, date, latitude, longitude):\n \n if date is None:\n date = datetime.date.today()\n\n try:\n sunrise = self.sunrise_utc(date, latitude, longitude)\n sunset = self.sunset_utc(date, latitude, longitude)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n \n octant_duration = (sunset - sunrise) / 8\n\n # Mo,Sa,Fr,We,Th,Tu,Su\n octant_index = [1,6,4,5,3,2,7]\n \n weekday = date.weekday()\n octant = octant_index[weekday]\n \n start = sunrise + (octant_duration * octant)\n end = start + octant_duration\n \n return {'start': start, 'end': end}",
"def estimate_sunrise_sunset(self, date, verbose=True):\n if self.source_type != 'solar':\n raise ValueError(\"You can only estimate sunrise and sunset for \"\n \"solar sources.\")\n\n date = pd.Timestamp(date)\n\n if self.diurnal_pattern is None:\n if verbose:\n print(\"Warning: Source {} has no diurnal pattern, estimating \"\n \"sunrise and sunset using average of past data.\"\n .format(self.name), file=sys.stderr)\n return Source.estimate_sunrise_sunset(self, date)\n\n if verbose:\n print(\"{} {}: Using Diurnal Pattern to estimate sunrise and sunset\"\n .format(self.name, date.date()))\n\n diurnal_pattern = self.diurnal_pattern\n daily_pattern = diurnal_pattern[date:date+datetime.timedelta(hours=23)]\n\n sunrise, sunset = None, None\n\n # This will walk through finding first sun hour and first night hour\n for hour, pattern in enumerate(daily_pattern.values):\n if sunrise is None and pattern > 0:\n sunrise = hour\n\n # If sun has risen, and we have not found night and we reach a 0\n if sunrise is not None and sunset is None and pattern == 0:\n sunset = hour\n\n if sunrise is None and sunset is None:\n raise ValueError(\"No solar power was generated on {}\".format(date))\n\n return sunrise, sunset",
"def dawn_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n \n try:\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n \n dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dawn",
"def _sunrise_sunset(self, seconds=None, milliseconds=None, hour=None, freq=None, temp_start=None, temp_end=None, setting=True):\n FUDGE_FACTOR = 0.86\n if hour==None:\n # Work out what the defaults should be\n ## MOVE IT INSIDE THE Override values.\n t0 = temp_start.split('K')[0]\n t1 = temp_end.split('K')[0]\n if t0 > t1:\n temp_step = -100\n x_start = 0\n x_step_amount = 1\n else:\n temp_step = 100\n x_start = 60\n x_step_amount = -1\n temp_0 = int(t0)\n temp_n = int(t1)\n # You can override these defaults if either temp_start or temp_end is set\n if temp_start:\n try:\n _exists = NAMED_COLOURS[temp_start.lower()]\n except (TypeError,ValueError): # Means the starting temp has NOT been provided, use default\n pass\n except KeyError:\n logging.warning(\"Sunrise/sunset: Your starting colour temperature '{}' is not a valid colour temperature\".format(temp_start))\n if temp_end:\n try:\n _exists = NAMED_COLOURS[temp_end.lower()]\n except (TypeError, ValueError): # Means the ending temp has NOT been provided, use default\n pass\n except KeyError:\n logging.warning(\"Sunrise/sunset: Your ending colour temperature '{}' is not a valid colour temperature\".format(temp_end))\n\n #Add in a fudge factor to cater for CPU doing other things:\n #Calculate our z scaling factor:\n target_time = self.clean_time_in_milliseconds(seconds, milliseconds, default_seconds=1, minimum_milliseconds=1000)\n z_factor = (target_time*FUDGE_FACTOR) / 2.564949357\n x_step = x_start\n #And run the loop\n t1 = time.time()\n check = True #We only check the current values on the first run\n for temp in xrange(temp_0,temp_n,temp_step):\n if self._sequence_stop_signal: #Bail if sequence should stop\n return None\n k = u\"%sk\" % temp\n self.fade(k, fade_time=((100+z_factor)/(65-x_step)), check=check) #ms, slows down as sunset progresses\n x_step += x_step_amount\n check=False\n t2 = time.time()\n logging.info(\"%ss, target=%ss\" % ((t2-t1),target_time/1000.0))\n else:\n temp_0=temp_start[0].split('K')[0]\n\t temp_n=temp_end[0].split('K')[0]\n if self.p_alarm != []:\n self.teardown_alarm()\n process_alarm=[]\n for tt in range(0,len(hour)):\n milliseconds=0\n proc_hour=hour[tt]\n\t\talarm_arg=(proc_hour,temp_0,temp_n,FUDGE_FACTOR,freq,seconds[tt],milliseconds)\n \n process_alarm.append(Process(target=self.schedule_alarm,args=alarm_arg))\n [pp.start() for pp in process_alarm] # Start processes in the background which contain the schedule of the alarm\n self.p_alarm=process_alarm",
"def sunrise(self):\r\n try:\r\n return str(self.connect()['sys']['sunrise'])\r\n except:\r\n return '@weather_sunrise'",
"def dusk_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dusk",
"def sun(self, date=None, local=True):\n \n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n sun = self.astral.sun_utc(date, self.latitude, self.longitude)\n\n if local:\n for key, dt in sun.items():\n sun[key] = dt.astimezone(self.tz)\n\n return sun",
"def mask_nighttime(lon, lat, date=date, mask_daytime=mask_daytime,\n ref_date=datetime.datetime(1899, 12, 31, 12),\n buffer_hours=buffer_hours, debug=False):\n # --- get lat and lon values from columns\n if debug:\n print((\"--- (s4-1) %s seconds ---\" % (time.time() - start_time)))\n # --- get sunrise and sunset for location\n o = ephem.Observer()\n # set lat (decimal?), lon (decimal?), and date (UTC)\n o.lat = str(lat)\n o.long = str(lon)\n o.date = date\n # planetary body\n s = ephem.Sun()\n if debug:\n print((\"--- (s4-2) %s seconds ---\" % (time.time() - start_time)))\n\n # Compute sun vs observer\n s.compute()\n if debug:\n print((\"--- (s4-3) %s seconds ---\" % (time.time() - start_time)))\n\n # Work out if day or night based on sunrises and sunsets\n mask_value = 0\n try:\n\n # get sunrise time and date\n next_rising = o.next_rising(s)\n next_setting = o.next_setting(s)\n\n # convert to datetime.datetime\n next_rising = add_days(ref_date, next_rising)\n next_setting = add_days(ref_date, next_setting)\n\n # did the sun last rise or set? (inc. any adjustments)\n sun_last_rose = False\n if next_setting < next_rising:\n sun_last_rose = True\n\n # Add buffer to rising/setting if provided with buffer_hours\n if buffer_hours != 0:\n\n # Calculate last rise\n previous_rising = o.previous_rising(s)\n # convert to datetime.datetime\n previous_rising = add_days(ref_date, previous_rising)\n # Calculate last setting\n previous_setting = o.previous_setting(s)\n # convert to datetime.datetime\n previous_setting = add_days(ref_date, previous_setting)\n\n # Calculate absolute difference\n time_from_rise = (date-previous_rising).total_seconds()\n time_till_set = (date-next_setting).total_seconds()\n time_from_set = (date-previous_setting).total_seconds()\n time_till_rise = (date-next_rising).total_seconds()\n\n # If absolutely difference less than buffer\n if abs(time_from_rise)/60./60. <= buffer_hours:\n mask_value = 1\n elif abs(time_till_set)/60./60. <= buffer_hours:\n mask_value = 1\n elif abs(time_from_set)/60./60. < buffer_hours:\n mask_value = 1\n elif abs(time_till_rise)/60./60. < buffer_hours:\n mask_value = 1\n\n # --- Check if daytime or nighttime and mask if condition met.\n if sun_last_rose:\n if mask_daytime:\n # ... and has not set yet, it must be daytime\n if (date < next_setting):\n mask_value = 1\n\n # if the sun last set... (mask nighttime is default)\n else:\n # if mask nighttime (aka not mask_daytime)\n if not mask_daytime:\n # ... and has not risen yet, it must be nighttime\n if (date < next_rising):\n mask_value = 1\n\n # Add gotcha for locations where sun is always up.\n except AlwaysUpError:\n if mask_daytime:\n mask_value = 1\n\n # Add gotcha for locations where sun is always down.\n except NeverUpError:\n if not mask_daytime:\n mask_value = 1\n\n except:\n print('FAIL')\n sys.exit()\n\n # Mask value in array\n return mask_value",
"def start_hour_utc(self) -> int:\n return pulumi.get(self, \"start_hour_utc\")",
"def sunset(self, seconds=None, milliseconds=None, temp_start=None, temp_end=None):\n return self.run_sequence(self._sunrise_sunset, seconds=seconds, milliseconds=milliseconds, temp_start=temp_start, temp_end=temp_end, setting=True)",
"def sunset(cls, date):\n return (date + Clock.days_from_hours(18) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n (((1577917828/1582237828) / 360) *\n (- cls.ascensional_difference(date, cls.LOCATION) +\n (3/4 * cls.solar_sidereal_difference(date)))))",
"def solar_noon_utc(self, date, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n newt = self._jday_to_jcentury(julianday + 0.5 + longitude / 360.0)\n\n eqtime = self._eq_of_time(newt)\n timeUTC = 720.0 + (longitude * 4.0) - eqtime\n\n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n noon = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return noon",
"def _solar_time(date, lon, lat):\n # IDL is computing time_t as hours and fractional minutes\n time_t = ee.Date(date).get('hour').add(\n ee.Date(date).get('minute').divide(60))\n\n # This will return the hour floating point value\n # time_t = ee.Date(date).get('hour').add(ee.Date(date).getFraction('hour'))\n\n # CGM - DOY and hour could be images in order to make these expressions\n julian = _to_jd(date)\n\n # Sunrise time\n julian_ = time_t.divide(24.0).add(julian)\n j_cen = julian_.add(0.5 - 2451545.0).divide(36525.0)\n # CGM - Does the mod happen before or after the multiply\n lon_sun = j_cen.multiply(0.0003032).add(36000.76983) \\\n .multiply(j_cen).mod(360.0).add(280.46646).subtract(360)\n an_sun = j_cen.multiply(-0.0001537).add(35999.05029) \\\n .multiply(j_cen).add(357.52911)\n ecc = j_cen.multiply(0.0000001267).add(0.000042037) \\\n .multiply(j_cen).multiply(-1).add(0.016708634)\n ob_ecl = j_cen.multiply(-0.001813).add(0.00059) \\\n .multiply(j_cen).add(46.815) \\\n .multiply(j_cen).multiply(-1).add(21.448) \\\n .divide(60.0).add(26).divide(60).add(23)\n ob_corr = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).cos() \\\n .multiply(0.00256).add(ob_ecl)\n var_y = ob_corr.divide(2.0).multiply(deg2rad).tan().multiply(\n ob_corr.divide(2.0).multiply(deg2rad).tan())\n eq_t = (\n lon_sun.multiply(2.0).multiply(deg2rad).sin().multiply(var_y)\n .subtract(an_sun.multiply(deg2rad).sin().multiply(ecc).multiply(2.0))\n .add(an_sun.multiply(deg2rad).sin()\n .multiply(lon_sun.multiply(2.0).multiply(deg2rad).cos())\n .multiply(var_y).multiply(ecc).multiply(4.0))\n .subtract(lon_sun.multiply(4.0).multiply(deg2rad).sin()\n .multiply(var_y).multiply(var_y).multiply(0.5))\n .subtract(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(ecc).multiply(ecc).multiply(1.25))\n .multiply(4.0).multiply(rad2deg))\n sun_eq = (\n an_sun.multiply(deg2rad).sin().multiply(\n j_cen.multiply(0.000014).add(0.004817)\n .multiply(j_cen).multiply(-1).add(1.914602))\n .add(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(j_cen.multiply(-0.000101).add(0.019993)))\n .add(an_sun.multiply(3.0).multiply(deg2rad).sin().multiply(0.000289)))\n sun_true = sun_eq.add(lon_sun)\n sun_app = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).sin() \\\n .multiply(-0.00478).subtract(0.00569).add(sun_true)\n\n # CGM - Intentionally not converting back to degrees\n d = ob_corr.multiply(deg2rad).sin() \\\n .multiply(sun_app.multiply(deg2rad).sin()) \\\n .asin()\n\n # CGM - Functions below are lat/lon dependent and can be written as\n # ee.Image expressions\n # CGM - d is still in radians, not converting\n ha_t = lat.expression(\n 'acos((cos(90.833 * pi / 180) / (cos(lat) * cos(d))) - tan(lat) * tan(d))'\n ' * (180 / pi)',\n {'lat': lat, 'd': d, 'pi': math.pi})\n\n # print('\\n{:10s} {:.12f}'.format('julian_', julian_.getInfo()))\n # print('{:10s} {:.12f}'.format('time_t', time_t.getInfo()))\n # print('{:10s} {:.12f}'.format('j_cen', j_cen.getInfo()))\n # print('{:10s} {:.12f}'.format('lon_sun', lon_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('an_sun', an_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('ecc', ecc.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_ecl', ob_ecl.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_corr', ob_corr.getInfo()))\n # print('{:10s} {:.12f}'.format('var_y', var_y.getInfo()))\n # print('{:10s} {:.12f}'.format('eq_t', eq_t.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_eq', sun_eq.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_true', sun_true.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_app', sun_app.getInfo()))\n # print('{:10s} {:.12f}'.format('d', d.getInfo()))\n\n return d, eq_t, ha_t",
"def utcTime():\r\n return calendar.timegm(time.gmtime())",
"def equation_of_time(cls, date):\n offset = cls.sine(cls.mean_position(date, cls.ANOMALISTIC_YEAR))\n equation_sun = (offset * angle(57, 18, 0) * (14/360 - (abs(offset) / 1080)))\n return ((cls.daily_motion(date) / 360) * (equation_sun / 360) * cls.SIDEREAL_YEAR)",
"def get_unixtime(self):\n if not self.Complete():\n raise DateTimeError(\"get_unixtime requires complete timepoint\")\n zoffset = self.time.GetZoneOffset()\n if zoffset is None:\n raise DateTimeError(\"get_unixtime requires timezone\")\n elif zoffset == 0:\n zt = self\n else:\n zt = self.ShiftZone(zDirection=0)\n days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay()\n seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds()\n return 86400 * days + seconds"
] | [
"0.74266624",
"0.7356725",
"0.72597265",
"0.7091791",
"0.70251125",
"0.7006317",
"0.6827473",
"0.68216705",
"0.68117666",
"0.66464955",
"0.65655184",
"0.6516135",
"0.6488186",
"0.6406055",
"0.63925016",
"0.6312478",
"0.62507564",
"0.6104449",
"0.60976666",
"0.6094293",
"0.60576445",
"0.5848984",
"0.5747845",
"0.5713557",
"0.5574055",
"0.550444",
"0.5495502",
"0.5492533",
"0.5469824",
"0.5416343"
] | 0.79015124 | 0 |
Calculate solar noon time in the UTC timezone. | def solar_noon_utc(self, date, longitude):
julianday = self._julianday(date.day, date.month, date.year)
newt = self._jday_to_jcentury(julianday + 0.5 + longitude / 360.0)
eqtime = self._eq_of_time(newt)
timeUTC = 720.0 + (longitude * 4.0) - eqtime
timeUTC = timeUTC/60.0
hour = int(timeUTC)
minute = int((timeUTC - hour) * 60)
second = int((((timeUTC - hour) * 60) - minute) * 60)
if second > 59:
second -= 60
minute += 1
elif second < 0:
second += 60
minute -= 1
if minute > 59:
minute -= 60
hour += 1
elif minute < 0:
minute += 60
hour -= 1
if hour > 23:
hour -= 24
date += datetime.timedelta(days=1)
elif hour < 0:
hour += 24
date -= datetime.timedelta(days=1)
noon = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)
return noon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solar_noon(self, date=None, local=True):\n \n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n noon = self.astral.solar_noon_utc(date, self.longitude)\n\n if local:\n return noon.astimezone(self.tz) \n else:\n return noon",
"def solar_noon_utc(LonDegE):\n _timezone = array([-180, -172.5, -157.5, -142.5, -127.5, -112.5, -97.5, -82.5, -67.5, -52.5, -37.5, -22.5, -7.5, 7.5, 22.5, 37.5, 52.5, 67.5, 82.5, 97.5, 112.5, 127.5, 142.5, 157.5, 172.5, 180]).repeat(2, 0)[1:-1].reshape(-1, 2)\n for i, (low, high) in enumerate(_timezone):\n if LonDegE >= low:\n if LonDegE <= high:\n return 12 -(-12 + i)",
"def _get_tz():\n return 'UTC'",
"def timezone():\n \n pass",
"def latest_synop_time()-> datetime:\n utc = datetime.utcnow()\n\n if utc.hour < 1:\n utc = utc - timedelta(days=1)\n utc = utc.replace(hour=18)\n elif utc.hour < 7:\n utc = utc.replace(hour=0)\n elif utc.hour < 13:\n utc = utc.replace(hour=6)\n elif utc.hour < 19:\n utc = utc.replace(hour=12)\n else:\n utc = utc.replace(hour=18)\n\n utc.replace(minute=0, second=0)\n return utc",
"def _getUTC(self, config = {} ):\n # Default implementation: get system local time\n return datetime.datetime.utcnow()",
"def _solar_time(date, lon, lat):\n # IDL is computing time_t as hours and fractional minutes\n time_t = ee.Date(date).get('hour').add(\n ee.Date(date).get('minute').divide(60))\n\n # This will return the hour floating point value\n # time_t = ee.Date(date).get('hour').add(ee.Date(date).getFraction('hour'))\n\n # CGM - DOY and hour could be images in order to make these expressions\n julian = _to_jd(date)\n\n # Sunrise time\n julian_ = time_t.divide(24.0).add(julian)\n j_cen = julian_.add(0.5 - 2451545.0).divide(36525.0)\n # CGM - Does the mod happen before or after the multiply\n lon_sun = j_cen.multiply(0.0003032).add(36000.76983) \\\n .multiply(j_cen).mod(360.0).add(280.46646).subtract(360)\n an_sun = j_cen.multiply(-0.0001537).add(35999.05029) \\\n .multiply(j_cen).add(357.52911)\n ecc = j_cen.multiply(0.0000001267).add(0.000042037) \\\n .multiply(j_cen).multiply(-1).add(0.016708634)\n ob_ecl = j_cen.multiply(-0.001813).add(0.00059) \\\n .multiply(j_cen).add(46.815) \\\n .multiply(j_cen).multiply(-1).add(21.448) \\\n .divide(60.0).add(26).divide(60).add(23)\n ob_corr = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).cos() \\\n .multiply(0.00256).add(ob_ecl)\n var_y = ob_corr.divide(2.0).multiply(deg2rad).tan().multiply(\n ob_corr.divide(2.0).multiply(deg2rad).tan())\n eq_t = (\n lon_sun.multiply(2.0).multiply(deg2rad).sin().multiply(var_y)\n .subtract(an_sun.multiply(deg2rad).sin().multiply(ecc).multiply(2.0))\n .add(an_sun.multiply(deg2rad).sin()\n .multiply(lon_sun.multiply(2.0).multiply(deg2rad).cos())\n .multiply(var_y).multiply(ecc).multiply(4.0))\n .subtract(lon_sun.multiply(4.0).multiply(deg2rad).sin()\n .multiply(var_y).multiply(var_y).multiply(0.5))\n .subtract(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(ecc).multiply(ecc).multiply(1.25))\n .multiply(4.0).multiply(rad2deg))\n sun_eq = (\n an_sun.multiply(deg2rad).sin().multiply(\n j_cen.multiply(0.000014).add(0.004817)\n .multiply(j_cen).multiply(-1).add(1.914602))\n .add(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(j_cen.multiply(-0.000101).add(0.019993)))\n .add(an_sun.multiply(3.0).multiply(deg2rad).sin().multiply(0.000289)))\n sun_true = sun_eq.add(lon_sun)\n sun_app = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).sin() \\\n .multiply(-0.00478).subtract(0.00569).add(sun_true)\n\n # CGM - Intentionally not converting back to degrees\n d = ob_corr.multiply(deg2rad).sin() \\\n .multiply(sun_app.multiply(deg2rad).sin()) \\\n .asin()\n\n # CGM - Functions below are lat/lon dependent and can be written as\n # ee.Image expressions\n # CGM - d is still in radians, not converting\n ha_t = lat.expression(\n 'acos((cos(90.833 * pi / 180) / (cos(lat) * cos(d))) - tan(lat) * tan(d))'\n ' * (180 / pi)',\n {'lat': lat, 'd': d, 'pi': math.pi})\n\n # print('\\n{:10s} {:.12f}'.format('julian_', julian_.getInfo()))\n # print('{:10s} {:.12f}'.format('time_t', time_t.getInfo()))\n # print('{:10s} {:.12f}'.format('j_cen', j_cen.getInfo()))\n # print('{:10s} {:.12f}'.format('lon_sun', lon_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('an_sun', an_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('ecc', ecc.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_ecl', ob_ecl.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_corr', ob_corr.getInfo()))\n # print('{:10s} {:.12f}'.format('var_y', var_y.getInfo()))\n # print('{:10s} {:.12f}'.format('eq_t', eq_t.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_eq', sun_eq.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_true', sun_true.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_app', sun_app.getInfo()))\n # print('{:10s} {:.12f}'.format('d', d.getInfo()))\n\n return d, eq_t, ha_t",
"def dawn_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n \n try:\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n \n dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dawn",
"def utcTime():\r\n return calendar.timegm(time.gmtime())",
"def nowUTC():\n return datetime.datetime.now(pytz.utc)",
"def _local_time_offset():\n if time.localtime().tm_isdst and time.daylight:\n return -time.altzone\n else:\n return -time.timezone",
"def __get_stock_time(stock_tz: timezone) -> datetime:\n return datetime.now().astimezone(stock_tz)",
"def solar_noon_local(LonDegE):\n return 12.",
"def utc_now():\n realtime = datetime.utcnow()\n realtime = pytz.utc.localize(realtime)\n return realtime",
"def get_current_india_time():\n india_offset = datetime.timedelta(hours=5, minutes=30)\n in_time = datetime.datetime.utcnow() + india_offset\n return in_time",
"def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)",
"def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)",
"def get_time():\n\teastern = timezone('US/Eastern')\n\tnow = datetime.datetime.now(eastern).time()\n\treturn(now)",
"def test_time_to_commute_retrieved_from_google_api_in_posix_is_converted_to_utc(self):\n result = calculate_time_of_commute(\n origin_name='Gatwick Airport',\n destination_name='Kings Cross St Pancras',\n )\n assert type(result) == datetime\n assert result.tzinfo is None # Assert it is a naive datetime",
"def utcnow():\n return datetime.utcnow().replace(tzinfo=UTC)",
"def time_NEURON():\n recorded_time = h.Vector()\n recorded_time.record(h._ref_t)\n return recorded_time",
"def get_utc_now():\n return datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(\"UTC\"))",
"def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)",
"def get_time():\n # Use this one for production:\n now_time = pendulum.now(tz=pendulum.timezone(\"America/New_York\"))\n # Use this one for testing and modify as needed:\n # now_time = pendulum.datetime(2019, 7, 21, 20, 00, tz='America/New_York')\n\n return now_time",
"def timezone():\n\n return time.timezone",
"def dusk_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dusk",
"def __correct_token_time(self, t_time=None):\n\n if t_time is None:\n t_time = time.time()\n\n if time.localtime(t_time).tm_isdst and time.daylight:\n return -time.altzone\n else:\n return -time.timezone",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def local_tz(self):\n return pytz.timezone(self.calendar.timezone)",
"def _local_timestamps(self) -> npt.NDArray[np.int64]:\n if self.tz is None or timezones.is_utc(self.tz):\n # Avoid the copy that would be made in tzconversion\n return self.asi8\n return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)"
] | [
"0.6777714",
"0.6724988",
"0.6343755",
"0.626581",
"0.62460506",
"0.59971875",
"0.59616363",
"0.594989",
"0.5928303",
"0.5893481",
"0.5877765",
"0.58015263",
"0.5749765",
"0.5749219",
"0.5719723",
"0.571865",
"0.571865",
"0.56694263",
"0.5617303",
"0.5612241",
"0.56063336",
"0.5605388",
"0.5579593",
"0.557632",
"0.55701196",
"0.5569826",
"0.55687845",
"0.552343",
"0.5520006",
"0.5497911"
] | 0.74279106 | 0 |
Calculate sunset time in the UTC timezone. | def sunset_utc(self, date, latitude, longitude):
julianday = self._julianday(date.day, date.month, date.year)
t = self._jday_to_jcentury(julianday)
eqtime = self._eq_of_time(t)
solarDec = self._sun_declination(t)
try:
hourangle = self._hour_angle_sunset(latitude, solarDec)
except:
raise AstralError('Sun remains below horizon on this day, at this location.')
delta = longitude - degrees(hourangle)
timeDiff = 4.0 * delta
timeUTC = 720.0 + timeDiff - eqtime
newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)
eqtime = self._eq_of_time(newt)
solarDec = self._sun_declination(newt)
hourangle = self._hour_angle_sunset(latitude, solarDec)
delta = longitude - degrees(hourangle)
timeDiff = 4 * delta
timeUTC = 720 + timeDiff - eqtime
timeUTC = timeUTC/60.0
hour = int(timeUTC)
minute = int((timeUTC - hour) * 60)
second = int((((timeUTC - hour) * 60) - minute) * 60)
if second > 59:
second -= 60
minute += 1
elif second < 0:
second += 60
minute -= 1
if minute > 59:
minute -= 60
hour += 1
elif minute < 0:
minute += 60
hour -= 1
if hour > 23:
hour -= 24
date += datetime.timedelta(days=1)
elif hour < 0:
hour += 24
date -= datetime.timedelta(days=1)
sunset = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)
return sunset | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sunset(self, date=None, local=True):\n \n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n sunset = self.astral.sunset_utc(date, self.latitude, self.longitude)\n\n if local:\n return sunset.astimezone(self.tz) \n else:\n return sunset",
"def sunset(cls, date):\n return (date + Clock.days_from_hours(18) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n (((1577917828/1582237828) / 360) *\n (- cls.ascensional_difference(date, cls.LOCATION) +\n (3/4 * cls.solar_sidereal_difference(date)))))",
"def utcTime():\r\n return calendar.timegm(time.gmtime())",
"def sundial_time(cls, tee):\n date = Clock.fixed_from_moment(tee)\n time = mod(tee, 1)\n q = ifloor(4 * time)\n if q == 0:\n a = cls.sunset(date - 1)\n b = cls.sunrise(date)\n t = Clock.days_from_hours(-6)\n elif q == 3:\n a = cls.sunset(date)\n b = cls.sunrise(date + 1)\n t = Clock.days_from_hours(18)\n else:\n a = cls.sunrise(date)\n b = cls.sunset(date)\n t = Clock.days_from_hours(6)\n return a + (2 * (b - a) * (time - t))",
"def GetSunriseSunset(latitude_deg, longitude_deg, utc_datetime, timezone):\n\n # Day of the year\n day = solar.GetDayOfYear(utc_datetime)\n\n # Solar hour angle\n SHA = ((timezone)* 15.0 - longitude_deg)\n\n # Time adjustment\n TT = (279.134+0.985647*day)*math.pi/180\n\n # Time adjustment in hours\n time_adst = ((5.0323 - 100.976*math.sin(TT)+595.275*math.sin(2*TT)+\n 3.6858*math.sin(3*TT) - 12.47*math.sin(4*TT) - 430.847*math.cos(TT)+\n 12.5024*math.cos(2*TT) + 18.25*math.cos(3*TT))/3600)\n \n # Time of noon\n TON = (12 + (SHA/15.0) - time_adst)\n \n sunn = (math.pi/2-(23.45*math.pi/180)*math.tan(latitude_deg*math.pi/180)*\n math.cos(2*math.pi*day/365.25))*(180/(math.pi*15))\n\n # Sunrise_time in hours\n sunrise_time = (TON - sunn + time_adst)\n \n # Sunset_time in hours\n sunset_time = (TON + sunn - time_adst) \n\n sunrise_time_dt = date_with_decimal_hour(utc_datetime, sunrise_time) \n sunset_time_dt = date_with_decimal_hour(utc_datetime, sunset_time) \n\n return sunrise_time_dt, sunset_time_dt",
"def stamp_time(utc):\n return utc.replace(hour=15, minute=30, second=0, microsecond=0)",
"def latest_synop_time()-> datetime:\n utc = datetime.utcnow()\n\n if utc.hour < 1:\n utc = utc - timedelta(days=1)\n utc = utc.replace(hour=18)\n elif utc.hour < 7:\n utc = utc.replace(hour=0)\n elif utc.hour < 13:\n utc = utc.replace(hour=6)\n elif utc.hour < 19:\n utc = utc.replace(hour=12)\n else:\n utc = utc.replace(hour=18)\n\n utc.replace(minute=0, second=0)\n return utc",
"def utcnow(cls):\n t = _time.time()\n return cls.utcfromtimestamp(t)",
"def set_utc(date_time):\n utc = datetime.timezone(datetime.timedelta(0))\n date_time = date_time.replace(tzinfo=utc)\n return date_time",
"def getUtcSeconde(self) -> int:\n ...",
"def _get_tz():\n return 'UTC'",
"def fromutc(self, dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self)\n return super(UTC, self).fromutc(dt)",
"def dusk_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dusk",
"def start_hour_utc(self) -> int:\n return pulumi.get(self, \"start_hour_utc\")",
"def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)",
"async def sunset(self, aware=False, today=False, days_offset=0) -> dt.datetime:\n return await self.AD.sched.sunset(aware, today=today, days_offset=days_offset)",
"def _get_sun_rise_set_time(self, sun_time):\n if sun_time:\n return datetime.fromtimestamp(sun_time).strftime(self.time_format)\n return sun_time",
"def get_utc(local_tm, tz):\n utc_tz = pytz.utc\n utc_now = datetime.utcnow().replace(tzinfo=utc_tz)\n local_tz = pytz.timezone(tz)\n local_now = local_tz.normalize(utc_now)\n local_alarm = local_now.replace(hour=local_tm.hour, minute=local_tm.minute)\n utc_alarm = utc_tz.normalize(local_alarm)\n return utc_alarm.time()",
"def utcnow():\n return datetime.utcnow().replace(tzinfo=UTC)",
"def utcnow():\r\n if utcnow.override_time:\r\n try:\r\n return utcnow.override_time.pop(0)\r\n except AttributeError:\r\n return utcnow.override_time\r\n return datetime.datetime.utcnow()",
"def utcnow():\n if utcnow.override_time:\n try:\n return utcnow.override_time.pop(0)\n except AttributeError:\n return utcnow.override_time\n return datetime.datetime.utcnow()",
"def utc(self):\n return self._utc",
"def utcnow_ts():\r\n return calendar.timegm(utcnow().timetuple())",
"def _utc_date(self):\n if self.date_stamp == '0':\n return '0'\n else:\n if '.' in self.date_stamp:\n t = datetime.datetime.strptime(self.date_stamp,\n '%Y%m%d%H%M%S.%f')\n else:\n t = datetime.datetime.strptime(self.date_stamp,\n '%Y%m%d%H%M%S')\n tdelta = datetime.timedelta(hours = int(self.tzone[1:3]),\n minutes = int(self.tzone[3:5]))\n \n if self.tzone[0] == '-':\n ut = t - tdelta\n return ut.strftime('%Y%m%d%H%M%S.%f')\n else:\n ut = t + tdelta\n return ut.strftime('%Y%m%d%H%M%S.%f')",
"def expiration_time_utc(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"expiration_time_utc\")",
"def ensure_utc_time(ts: datetime) -> datetime:\n if ts.tzinfo is None:\n return datetime(*ts.timetuple()[:6], tzinfo=UTC_TZ)\n elif str(ts.tzinfo) != str(UTC_TZ):\n return ts.astimezone(UTC_TZ)\n return ts",
"def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()",
"def get_unixtime(self):\n if not self.Complete():\n raise DateTimeError(\"get_unixtime requires complete timepoint\")\n zoffset = self.time.GetZoneOffset()\n if zoffset is None:\n raise DateTimeError(\"get_unixtime requires timezone\")\n elif zoffset == 0:\n zt = self\n else:\n zt = self.ShiftZone(zDirection=0)\n days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay()\n seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds()\n return 86400 * days + seconds",
"def getutv(self):\n t = datetime.datetime.now()\n utc_seconds = (time.mktime(t.timetuple()))\n utc_seconds = int(utc_seconds * 1000)\n return str(utc_seconds)",
"def clear_time_override():\r\n utcnow.override_time = None"
] | [
"0.65699303",
"0.64950114",
"0.63881093",
"0.61808205",
"0.61730003",
"0.6134258",
"0.60923696",
"0.60374415",
"0.60219973",
"0.5875478",
"0.5835468",
"0.5818931",
"0.5818664",
"0.58087045",
"0.5799634",
"0.57659054",
"0.57374483",
"0.5717565",
"0.5715173",
"0.5711675",
"0.57083106",
"0.57057077",
"0.5691319",
"0.5683911",
"0.5680497",
"0.5668587",
"0.5656173",
"0.5650784",
"0.5623827",
"0.5612923"
] | 0.74293166 | 0 |
Calculate dusk time in the UTC timezone. | def dusk_utc(self, date, latitude, longitude):
julianday = self._julianday(date.day, date.month, date.year)
if latitude > 89.8:
latitude = 89.8
if latitude < -89.8:
latitude = -89.8
t = self._jday_to_jcentury(julianday)
eqtime = self._eq_of_time(t)
solarDec = self._sun_declination(t)
try:
hourangle = self._hour_angle_sunset(latitude, solarDec)
except:
raise AstralError('Sun remains below horizon on this day, at this location.')
delta = longitude - degrees(hourangle)
timeDiff = 4.0 * delta
timeUTC = 720.0 + timeDiff - eqtime
newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)
eqtime = self._eq_of_time(newt)
solarDec = self._sun_declination(newt)
hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression)
delta = longitude - degrees(hourangle)
timeDiff = 4 * delta
timeUTC = 720 + timeDiff - eqtime
timeUTC = timeUTC/60.0
hour = int(timeUTC)
minute = int((timeUTC - hour) * 60)
second = int((((timeUTC - hour) * 60) - minute) * 60)
if second > 59:
second -= 60
minute += 1
elif second < 0:
second += 60
minute -= 1
if minute > 59:
minute -= 60
hour += 1
elif minute < 0:
minute += 60
hour -= 1
if hour > 23:
hour -= 24
date += datetime.timedelta(days=1)
elif hour < 0:
hour += 24
date -= datetime.timedelta(days=1)
dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)
return dusk | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def utcTime():\r\n return calendar.timegm(time.gmtime())",
"def _get_tz():\n return 'UTC'",
"def get_time():\n\teastern = timezone('US/Eastern')\n\tnow = datetime.datetime.now(eastern).time()\n\treturn(now)",
"def timezone():\n \n pass",
"def nowUTC():\n return datetime.datetime.now(pytz.utc)",
"def dawn_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n \n try:\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n \n dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dawn",
"def get_current_india_time():\n india_offset = datetime.timedelta(hours=5, minutes=30)\n in_time = datetime.datetime.utcnow() + india_offset\n return in_time",
"def timezone():\n\n return time.timezone",
"def get_time():\n # Use this one for production:\n now_time = pendulum.now(tz=pendulum.timezone(\"America/New_York\"))\n # Use this one for testing and modify as needed:\n # now_time = pendulum.datetime(2019, 7, 21, 20, 00, tz='America/New_York')\n\n return now_time",
"def _getUTC(self, config = {} ):\n # Default implementation: get system local time\n return datetime.datetime.utcnow()",
"def utc_now():\n realtime = datetime.utcnow()\n realtime = pytz.utc.localize(realtime)\n return realtime",
"def localtime2utc(date):\n return date + (datetime.utcnow() - datetime.now())",
"def get_utc_now():\n return datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(\"UTC\"))",
"def utcnow():\n return datetime.utcnow().replace(tzinfo=UTC)",
"def _get_datetime():\n pst_now = datetime.datetime.utcnow().astimezone(pytz.timezone(\"America/Los_Angeles\"))\n return pst_now.strftime(\"%a %b %e %H:%M %Z %G\")",
"def get_timezone():\n return dates.get_timezone(_get_tz())",
"def now_utc() -> datetime:\n return datetime.now(timezone.utc)",
"async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()",
"def utc_now():\n return datetime.now(tz=timezone.utc)",
"def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim",
"def getUtcSeconde(self) -> int:\n ...",
"def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()",
"def __get_stock_time(stock_tz: timezone) -> datetime:\n return datetime.now().astimezone(stock_tz)",
"def _local_time_offset():\n if time.localtime().tm_isdst and time.daylight:\n return -time.altzone\n else:\n return -time.timezone",
"def utcnow():\n utctime = datetime.datetime.utcnow()\n utcstamp = utctime.strftime(\"%Y-%m-%d %H:%M:%S\")\n return utcstamp",
"def utcnow(cls):\n t = _time.time()\n return cls.utcfromtimestamp(t)",
"def getutv(self):\n t = datetime.datetime.now()\n utc_seconds = (time.mktime(t.timetuple()))\n utc_seconds = int(utc_seconds * 1000)\n return str(utc_seconds)",
"def utc():\n return date_from_utc(dt.utcnow())",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def utcnow() -> datetime.datetime:\n return datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)"
] | [
"0.69508314",
"0.6756243",
"0.646933",
"0.64476836",
"0.64054435",
"0.6378315",
"0.6367498",
"0.6344899",
"0.630024",
"0.6293508",
"0.62492496",
"0.62225604",
"0.6172558",
"0.615256",
"0.6152383",
"0.6115891",
"0.61116207",
"0.610577",
"0.6104182",
"0.6066876",
"0.606603",
"0.60462725",
"0.6027401",
"0.6025502",
"0.5996779",
"0.59947246",
"0.5994004",
"0.5986161",
"0.5967244",
"0.5965447"
] | 0.7123817 | 0 |
Calculate ruhakaalam times in the UTC timezone. | def rahukaalam_utc(self, date, latitude, longitude):
if date is None:
date = datetime.date.today()
try:
sunrise = self.sunrise_utc(date, latitude, longitude)
sunset = self.sunset_utc(date, latitude, longitude)
except:
raise AstralError('Sun remains below horizon on this day, at this location.')
octant_duration = (sunset - sunrise) / 8
# Mo,Sa,Fr,We,Th,Tu,Su
octant_index = [1,6,4,5,3,2,7]
weekday = date.weekday()
octant = octant_index[weekday]
start = sunrise + (octant_duration * octant)
end = start + octant_duration
return {'start': start, 'end': end} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rahukaalam(self, date=None, local=True):\n\n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n rahukaalam = self.astral.rahukaalam_utc(date, self.latitude, self.longitude)\n\n if local:\n for key, dt in rahukaalam.items():\n rahukaalam[key] = dt.astimezone(self.tz)\n \n return rahukaalam",
"def timezone():\n \n pass",
"def set_time_by_timezone(df):\n df = set_city_time_by_timezone(df, 1078, 3)\n df = set_city_time_by_timezone(df, 22390, 4)\n df = set_city_time_by_timezone(df, 22430, 4)\n df = set_city_time_by_timezone(df, 22438, 5)\n return df",
"def get_ph_time(as_array=False):\n utc = timezone('UTC')\n phtz = timezone('Asia/Manila')\n now = utc.localize(datetime.utcnow())\n now = now.astimezone(phtz)\n if as_array:\n return [now.year, now.month, now.day, now.hour, now.minute, now.second]\n else:\n return datetime(now.year, now.month, now.day, now.hour, now.minute, now.second)",
"def utcTime():\r\n return calendar.timegm(time.gmtime())",
"def _get_tz():\n return 'UTC'",
"def get_utc(local_tm, tz):\n utc_tz = pytz.utc\n utc_now = datetime.utcnow().replace(tzinfo=utc_tz)\n local_tz = pytz.timezone(tz)\n local_now = local_tz.normalize(utc_now)\n local_alarm = local_now.replace(hour=local_tm.hour, minute=local_tm.minute)\n utc_alarm = utc_tz.normalize(local_alarm)\n return utc_alarm.time()",
"def hmstora(rah,ram,ras):\n\thrs = (float(rah)+(float(ram)/60)+(float(ras)/3600.0)) % 24\n\n\treturn 15*hrs",
"def _local_timestamps(self) -> npt.NDArray[np.int64]:\n if self.tz is None or timezones.is_utc(self.tz):\n # Avoid the copy that would be made in tzconversion\n return self.asi8\n return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)",
"def get_local(utc_time, tz):\n utc_tz = pytz.utc\n utc_now = datetime.utcnow().replace(tzinfo=utc_tz)\n utc_alarm = utc_now.replace(hour=utc_time.hour, minute=utc_time.minute)\n local_tz = pytz.timezone(tz)\n local_alarm = local_tz.normalize(utc_alarm)\n return local_alarm.time()",
"def get_times(my_vars):\n base_time = my_vars['base_time'].getValue()\n try:\n times=my_vars['time']\n except KeyError:\n times = my_vars['time_offset']\n\n ts = []\n for time in times:\n temp = datetime.utcfromtimestamp(base_time+time)\n if (temp.minute == 0) :\n ts.append(temp)\n return ts",
"def localtime2utc(date):\n return date + (datetime.utcnow() - datetime.now())",
"def _get_time_utc(time_utc_str):\n dt = datetime.strptime(time_utc_str, TIME_FORMAT)\n return int(calendar.timegm(dt.utctimetuple()))",
"def computeSunTime(self, latitude, longitude, startDate, endDate): \n self.sun = sun(lat=latitude, long=longitude)\n dateTime = datetime.datetime.combine(startDate, datetime.time(hour=8))\n while dateTime.date() <= endDate: \n daytimeStart, daytimeEnd = self.computeDaytimeStartEnd(dateTime)\n self.sunriseTimeDict[dateTime.date()] = daytimeStart\n self.sunsetTimeDict[dateTime.date()] = daytimeEnd\n dateTime += self.oneDayDelta",
"def ut1_utc_rate(self):\n values = self._interpolate_table(\"ut1_utc\", leap_second_correction=True, derivative_order=1)\n # values += self._corrections((\"ortho_eop\", iers.ortho_eop, 2, 1e-6), (\"utlibr\", iers.utlibr, 0, 1e-6))\n\n # Low frequency tides\n # if \"rg_zont2\" in self.models:\n # correction_cache = self._correction_cache.setdefault(\"rg_zont2\", dict())\n # # Julian centuries since J2000\n # t_julian_centuries = (self.time.tt.jd - 2451545.0) / 36525\n #\n # if self.time.isscalar:\n # mjd = self.time.tt.mjd\n # if mjd not in correction_cache:\n # correction_cache[mjd] = iers.rg_zont2(t_julian_centuries)[0]\n # dut1_corr = correction_cache[mjd]\n # else:\n # dut1_corr = list()\n # for t in self.time.tt:\n # if t.mjd not in correction_cache:\n # t_julian_centuries = (t.tt.jd - 2451545.0) / 36525\n # correction_cache[t.mjd] = iers.rg_zont2(t_julian_centuries)[0]\n # dut1_corr.append(correction_cache[t.mjd])\n #\n # values += dut1_corr\n # return values\n return values",
"def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)",
"def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)",
"def sunset_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n sunset = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return sunset",
"def test_timezones(self):\n a_user = User.objects.create()\n user = VSBUser.objects.create(user=a_user)\n\n today_datetime = timezone.datetime.today()\n today_datetime = timezone.datetime(year=today_datetime.year, month=today_datetime.month, day=today_datetime.day)\n\n tomorrow_late_EST = timezone.make_aware(today_datetime + timezone.timedelta(hours=23), timezone=pytz.timezone('US/Eastern'))\n tomorrow_last_UTC = (tomorrow_late_EST + timezone.timedelta(minutes=10)).astimezone(pytz.utc)\n ETC_event = CalenderEvent.objects.create(user=user, time=tomorrow_late_EST)\n UTC_event = CalenderEvent.objects.create(user=user, time=tomorrow_last_UTC)\n\n received = util.bucket_calenderevents(user.calenderevent_set)\n\n self.assertEqual(received, [[ETC_event, UTC_event]], msg=\"CalenderEvents.timezones: Timezones failed to align.\")",
"def ut1_utc(self):\n values = self._interpolate_table(\"ut1_utc\", leap_second_correction=True)\n values += self._corrections((\"ortho_eop\", iers.ortho_eop, 2, 1e-6), (\"utlibr\", iers.utlibr, 0, 1e-6))\n\n # low frequency tides\n if \"rg_zont2\" in self.models:\n correction_cache = self._correction_cache.setdefault(\"rg_zont2\", dict())\n # Julian centuries since J2000\n t_julian_centuries = (self.time.tt.jd - 2451545.0) / 36525\n\n if self.time.isscalar:\n mjd = self.time.tt.mjd\n if mjd not in correction_cache:\n correction_cache[mjd] = iers.rg_zont2(t_julian_centuries)[0]\n dut1_corr = correction_cache[mjd]\n else:\n dut1_corr = list()\n for t in self.time.tt:\n if t.mjd not in correction_cache:\n t_julian_centuries = (t.tt.jd - 2451545.0) / 36525\n correction_cache[t.mjd] = iers.rg_zont2(t_julian_centuries)[0]\n dut1_corr.append(correction_cache[t.mjd])\n\n values += dut1_corr\n return values",
"def tai_to_utc(tai, config, time_format=\"%Y/%j-%H:%M:%S\"):\n epoch = dt.datetime.strptime(config.config['runtime']['epoch'], \"%m/%d/%Y-%H:%M:%S\")\n try:\n utc = epoch + dt.timedelta(seconds=int(tai))\n except OverflowError:\n utc = epoch\n return utc.strftime(time_format)",
"def ensure_tucson_time():\n if 'TZ' not in os.environ.keys() or os.environ['TZ'] != 'US/Arizona':\n os.environ['TZ'] = 'US/Arizona'\n time.tzset()",
"def brasilia_time():\n brasilia_time = pd.Timestamp.now('UTC') - pd.Timedelta(hours=3)\n return brasilia_time",
"def time_to_live_utc(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"time_to_live_utc\")",
"def stamp_time(utc):\n return utc.replace(hour=15, minute=30, second=0, microsecond=0)",
"def localize_time(self, apitime):\n return self.feedzone.localize(apitime).astimezone(self.localzone)",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def round_trip_time(self):\n ...",
"def timezone():\n\n return time.timezone",
"def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim"
] | [
"0.6330134",
"0.61681116",
"0.60523003",
"0.59781736",
"0.5969716",
"0.56076247",
"0.55986744",
"0.55825686",
"0.5552908",
"0.5434768",
"0.53342485",
"0.53334755",
"0.53267014",
"0.5321076",
"0.5287703",
"0.5276938",
"0.5276938",
"0.5272016",
"0.52586037",
"0.5246418",
"0.52275836",
"0.52225786",
"0.52136314",
"0.5209741",
"0.52077013",
"0.5198716",
"0.5163273",
"0.51616657",
"0.51586884",
"0.5158453"
] | 0.6230036 | 1 |
Calculate the azimuth of the sun in the UTC timezone. | def solar_azimuth(self, dateandtime, latitude, longitude):
if latitude > 89.8:
latitude = 89.8
if latitude < -89.8:
latitude = -89.8
zone = -dateandtime.utcoffset().seconds / 3600.0
utc_datetime = dateandtime.astimezone(pytz.utc)
timenow = utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600)
JD = self._julianday(dateandtime.day, dateandtime.month, dateandtime.year)
t = self._jday_to_jcentury(JD + timenow / 24.0)
theta = self._sun_declination(t)
Etime = self._eq_of_time(t)
eqtime = Etime
solarDec = theta # in degrees
solarTimeFix = eqtime - (4.0 * longitude) + (60 * zone)
trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix
# in minutes
while trueSolarTime > 1440:
trueSolarTime = trueSolarTime - 1440
hourangle = trueSolarTime / 4.0 - 180.0
# Thanks to Louis Schwarzmayr for the next line:
if hourangle < -180:
hourangle = hourangle + 360.0
harad = radians(hourangle)
csz = sin(radians(latitude)) * sin(radians(solarDec)) + \
cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)
if csz > 1.0:
csz = 1.0
elif csz < -1.0:
csz = -1.0
zenith = degrees(acos(csz))
azDenom = (cos(radians(latitude)) * sin(radians(zenith)))
if (abs(azDenom) > 0.001):
azRad = ((sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))) / azDenom
if abs(azRad) > 1.0:
if azRad < 0:
azRad = -1.0
else:
azRad = 1.0
azimuth = 180.0 - degrees(acos(azRad))
if hourangle > 0.0:
azimuth = -azimuth
else:
if latitude > 0.0:
azimuth = 180.0
else:
azimuth = 0#
if azimuth < 0.0:
azimuth = azimuth + 360.0
return azimuth | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solar_azimuth(self, dateandtime=None):\n\n if self.astral is None:\n self.astral = Astral()\n\n if dateandtime is None:\n dateandtime = datetime.datetime.now(tz=self.tz)\n \n return self.astral.solar_azimuth(dateandtime, self.latitude, self.longitude)",
"def _sun_north_angle_to_z(frame):\n # Find the Sun center in HGS at the frame's observation time(s)\n sun_center_repr = SphericalRepresentation(0*u.deg, 0*u.deg, 0*u.km)\n # The representation is repeated for as many times as are in obstime prior to transformation\n sun_center = SkyCoord(sun_center_repr._apply('repeat', frame.obstime.size),\n frame=HGS, obstime=frame.obstime)\n\n # Find the Sun north in HGS at the frame's observation time(s)\n # Only a rough value of the solar radius is needed here because, after the cross product,\n # only the direction from the Sun center to the Sun north pole matters\n sun_north_repr = SphericalRepresentation(0*u.deg, 90*u.deg, 690000*u.km)\n # The representation is repeated for as many times as are in obstime prior to transformation\n sun_north = SkyCoord(sun_north_repr._apply('repeat', frame.obstime.size),\n frame=HGS, obstime=frame.obstime)\n\n # Find the Sun center and Sun north in the frame's coordinate system\n sky_normal = sun_center.transform_to(frame).data.to_cartesian()\n sun_north = sun_north.transform_to(frame).data.to_cartesian()\n\n # Use cross products to obtain the sky projections of the two vectors (rotated by 90 deg)\n sun_north_in_sky = sun_north.cross(sky_normal)\n z_in_sky = CartesianRepresentation(0, 0, 1).cross(sky_normal)\n\n # Normalize directional vectors\n sky_normal /= sky_normal.norm()\n sun_north_in_sky /= sun_north_in_sky.norm()\n z_in_sky /= z_in_sky.norm()\n\n # Calculate the signed angle between the two projected vectors\n cos_theta = sun_north_in_sky.dot(z_in_sky)\n sin_theta = sun_north_in_sky.cross(z_in_sky).dot(sky_normal)\n angle = np.arctan2(sin_theta, cos_theta).to('deg')\n\n # If there is only one time, this function's output should be scalar rather than array\n if angle.size == 1:\n angle = angle[0]\n\n return Angle(angle)",
"def get_mean_sun_angles(self) -> (float, float):\n # Get MTD XML file\n root, _ = self.read_mtd()\n\n # Open zenith and azimuth angle\n zenith_angle = float(root.findtext(\".//SolarZenith\"))\n azimuth_angle = float(root.findtext(\".//SolarAzimuth\"))\n\n return azimuth_angle, zenith_angle",
"def get_azimuth(self):\n self.degrees = self.azimuth_encoder.get_degrees()\n self.tele_azimuth = self.Calculations.convert_degrees(self.degrees)\n return self.tele_azimuth",
"def imu_get_azimuth(self):\n return self.imu.get_azimuth()",
"def sunrise_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n sunrise = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return sunrise",
"def get_azimuth(self, degrees=True):\n if degrees:\n return math.degrees(self.current_location.az)\n else:\n return self.current_location.az",
"def azimuth(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[0];",
"def set_azimuth(self):\n self.azimuth = self.Calculations.convert_to_azimuth( self.declination, self.right_ascension, self.Latitude, self.LHA)\n if self.azimuth < 0:\n self.azimuth = self.azimuth + 360.0\n return self.azimuth\n else:\n pass\n return self.azimuth\n print('azimuth set to', self.azimuth)",
"def getAzimuthAngle(self):\n return self._azimuth",
"def looks_azimuth(self) -> Optional[int]:\n return self._get_property(LOOKS_AZIMUTH_PROP, int)",
"def sunrise(cls, date):\n return (date + Clock.days_from_hours(6) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n ((1577917828/1582237828 / 360) *\n (cls.ascensional_difference(date, cls.LOCATION) +\n (1/4 * cls.solar_sidereal_difference(date)))))",
"def azimuth(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_azi = np.array([vec[0], vec[1], 0])\n vec_n = np.array([0, 1, 0])\n # update by Santosh\n # angle2vecs gives the smallest angle between the vectors\n # so for a west wall angle2vecs will give 90\n # the following 'if' statement will make sure 270 is returned\n x_vector = vec_azi[0]\n if x_vector < 0:\n return 360 - angle2vecs(vec_azi, vec_n)\n else:\n return angle2vecs(vec_azi, vec_n)",
"def resolution_azimuth(self) -> Optional[float]:\n return self._get_property(RESOLUTION_AZIMUTH_PROP, float)",
"def azimuth(self, right: GeoSpatialValue) -> ir.FloatingValue:\n return ops.GeoAzimuth(self, right).to_expr()",
"def alt_sunrise(cls, date):\n rise = cls.UJJAIN.dawn(date, angle(0, 47, 0))\n return 1/24 * 1/60 * iround(rise * 24 * 60)",
"def fun_azimuth(self):\n\n energy_kev = self.energy_kev.get()\n hkl = self.hkl_magnetic.get()\n hkl = hkl.replace(',', ' ') # remove commas\n hkl = hkl.replace('(', '').replace(')', '') # remove brackets\n hkl = hkl.replace('[', '').replace(']', '') # remove brackets\n hkl = np.fromstring(hkl, sep=' ')\n\n azi = self.azim_zero.get()\n azi = azi.replace(',', ' ') # remove commas\n azi = azi.replace('(', '').replace(')', '') # remove brackets\n azi = azi.replace('[', '').replace(']', '') # remove brackets\n azi = np.fromstring(azi, sep=' ')\n\n pol = self.polval.get()\n if pol == u'\\u03c3-\\u03c3':\n pol = 's-s'\n elif pol == u'\\u03c3-\\u03c0':\n pol = 's-p'\n elif pol == u'\\u03c0-\\u03c3':\n pol = 'p-s'\n else:\n pol = 'p-p'\n\n F0 = self.resF0.get()\n F1 = self.resF1.get()\n F2 = self.resF2.get()\n\n isres = self.isres.get()\n if isres:\n # Resonant scattering\n self.xtl.Plot.simulate_azimuth_resonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol,\n F0=F0, F1=F1, F2=F2)\n plt.show()\n else:\n # Non-Resonant scattering\n self.xtl.Plot.simulate_azimuth_nonresonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol)\n plt.show()",
"def mean_earth_sun_distance(utc_datetime): \n\n return (1 - (0.0335 * math.sin(360 * ((solar.GetDayOfYear(utc_datetime)) - 94)) / (365)))",
"def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)",
"def omega_sun_snodgrass90(lat):\n return differential_rotation(lat, 14.71, -2.39, -1.78)",
"def pixel_spacing_azimuth(self) -> Optional[float]:\n return self._get_property(PIXEL_SPACING_AZIMUTH_PROP, float)",
"def sunset_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n sunset = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return sunset",
"def rahukaalam_utc(self, date, latitude, longitude):\n \n if date is None:\n date = datetime.date.today()\n\n try:\n sunrise = self.sunrise_utc(date, latitude, longitude)\n sunset = self.sunset_utc(date, latitude, longitude)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n \n octant_duration = (sunset - sunrise) / 8\n\n # Mo,Sa,Fr,We,Th,Tu,Su\n octant_index = [1,6,4,5,3,2,7]\n \n weekday = date.weekday()\n octant = octant_index[weekday]\n \n start = sunrise + (octant_duration * octant)\n end = start + octant_duration\n \n return {'start': start, 'end': end}",
"def sun(self, date=None, local=True):\n \n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n sun = self.astral.sun_utc(date, self.latitude, self.longitude)\n\n if local:\n for key, dt in sun.items():\n sun[key] = dt.astimezone(self.tz)\n\n return sun",
"def azimuth(vv, v0, v1):\n with np.errstate(divide='ignore', invalid='ignore'):\n n0 = np.cross(v0, v1)\n n0 /= np.dual.norm(n0, axis=-1)[..., np.newaxis]\n nn = np.cross(v0, vv)\n nn /= np.dual.norm(nn, axis=-1)[..., np.newaxis]\n\n azi = np.arccos(np.sum(nn * n0, -1))\n if len(np.shape(azi)) > 0:\n azi[np.dot(vv, n0) < 0] *= -1\n # arbitrary angle where vv is (anti)parallel to v0\n azi[np.isnan(azi)] = 0\n elif np.isnan(azi):\n return 0\n elif np.dot(vv, v0) < 1 and azi > 0:\n azi *= -1\n\n return azi",
"def GetSunriseSunset(latitude_deg, longitude_deg, utc_datetime, timezone):\n\n # Day of the year\n day = solar.GetDayOfYear(utc_datetime)\n\n # Solar hour angle\n SHA = ((timezone)* 15.0 - longitude_deg)\n\n # Time adjustment\n TT = (279.134+0.985647*day)*math.pi/180\n\n # Time adjustment in hours\n time_adst = ((5.0323 - 100.976*math.sin(TT)+595.275*math.sin(2*TT)+\n 3.6858*math.sin(3*TT) - 12.47*math.sin(4*TT) - 430.847*math.cos(TT)+\n 12.5024*math.cos(2*TT) + 18.25*math.cos(3*TT))/3600)\n \n # Time of noon\n TON = (12 + (SHA/15.0) - time_adst)\n \n sunn = (math.pi/2-(23.45*math.pi/180)*math.tan(latitude_deg*math.pi/180)*\n math.cos(2*math.pi*day/365.25))*(180/(math.pi*15))\n\n # Sunrise_time in hours\n sunrise_time = (TON - sunn + time_adst)\n \n # Sunset_time in hours\n sunset_time = (TON + sunn - time_adst) \n\n sunrise_time_dt = date_with_decimal_hour(utc_datetime, sunrise_time) \n sunset_time_dt = date_with_decimal_hour(utc_datetime, sunset_time) \n\n return sunrise_time_dt, sunset_time_dt",
"def zodiac(cls, tee):\n return quotient(float(cls.solar_longitude(tee)), 30) + 1",
"def test_az_za():\n Nside = 128\n obs = observatory.Observatory(latitude, longitude, fov=20, nside=Nside)\n center = [0, 0]\n lon, lat = [5, 0]\n ind0 = hp.ang2pix(Nside, lon, lat, lonlat=True)\n lon, lat = hp.pix2ang(Nside, ind0, lonlat=True)\n za, az, pix = obs.calc_azza(center, return_inds=True)\n ind = np.where(pix == ind0)\n # lon = longitude of the source, which is set to 5deg off zenith (hence, zenith angle)\n assert np.isclose(np.degrees(za[ind]), lon)\n assert np.isclose(np.degrees(az[ind]), 90.0)",
"def _orientation(location, time='now'):\n obstime = parse_time(time)\n\n # Define the frame where its Z axis is aligned with local zenith\n local_frame = AltAz(obstime=obstime, location=location)\n\n return _sun_north_angle_to_z(local_frame)",
"def calc_surface_azimuth(xdir, ydir, B):\n B = radians(B)\n teta_z = degrees(asin(xdir / sin(B)))\n # set the surface azimuth with on the sing convention (E,N)=(+,+)\n if xdir < 0:\n if ydir <0:\n surface_azimuth = 180 + teta_z # (xdir,ydir) = (-,-)\n else: surface_azimuth = 360 + teta_z # (xdir,ydir) = (-,+)\n elif ydir < 0:\n surface_azimuth = 180 + teta_z # (xdir,ydir) = (+,-)\n else: surface_azimuth = teta_z # (xdir,ydir) = (+,+)\n return surface_azimuth # degree"
] | [
"0.67131555",
"0.6504118",
"0.64523375",
"0.6439935",
"0.6436878",
"0.641925",
"0.6345609",
"0.6120051",
"0.6109093",
"0.6081307",
"0.6055947",
"0.6041341",
"0.6014488",
"0.5996403",
"0.5920246",
"0.5873155",
"0.586566",
"0.5822988",
"0.58073103",
"0.5778373",
"0.573385",
"0.5699704",
"0.5693492",
"0.5680611",
"0.5665804",
"0.5636087",
"0.56360525",
"0.5617519",
"0.56039816",
"0.5575141"
] | 0.6639568 | 1 |
Calculates the phase of the moon on the specified date. | def moon_phase(self, date):
jd = self._julianday(date.day, date.month, date.year)
DT = pow((jd - 2382148), 2) / (41048480*86400)
T = (jd + DT - 2451545.0) / 36525
T2 = pow(T,2)
T3 = pow(T,3)
D = 297.85 + (445267.1115*T) - (0.0016300*T2) + (T3/545868)
D = radians(self._proper_angle(D))
M = 357.53 + (35999.0503*T)
M = radians(self._proper_angle(M))
M1 = 134.96 + (477198.8676*T) + (0.0089970*T2) + (T3/69699)
M1 = radians(self._proper_angle(M1))
elong = degrees(D) + 6.29*sin(M1)
elong -= 2.10*sin(M)
elong += 1.27*sin(2*D - M1)
elong += 0.66*sin(2*D)
elong = self._proper_angle(elong)
moon = int(floor(((elong + 6.43) / 360) * 28))
if moon == 28:
moon = 0
return moon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def moon_phase(self, date=None):\n self._set_site_date(date)\n self.moon.compute(self.site)\n return self.moon.moon_phase",
"def equation_of_time(cls, date):\n offset = cls.sine(cls.mean_position(date, cls.ANOMALISTIC_YEAR))\n equation_sun = (offset * angle(57, 18, 0) * (14/360 - (abs(offset) / 1080)))\n return ((cls.daily_motion(date) / 360) * (equation_sun / 360) * cls.SIDEREAL_YEAR)",
"def phase_to_day(phase):\n if phase < 0:\n phase += 2*np.pi\n return phase*(365./(2*np.pi))",
"def phase_to_day(phase):\n if phase < 0:\n phase += 2*np.pi\n return phase*(365./(2*np.pi))",
"def MoonPhase(time):\n return PairLongitude(Body.Moon, Body.Sun, time)",
"def calc_phase(self, time):\n dur = self.get_duration()\n phase = time / dur\n\n if self.enable_loop():\n phase -= np.floor(phase)\n else:\n phase = np.clip(phase, 0.0, 1.0)\n\n return phase",
"def calc_phase(p, t):\n\n return (t % p)/p",
"def phase(self, hjd):\n # 2009-09-28 14:07 IJC: Implemented object-oriented version\n return getorbitalphase(self, hjd)",
"def moon_phase(\n datetime_index,\n epsilon=1e-6,\n epoch=2444237.905,\n ecliptic_longitude_epoch=278.833540,\n ecliptic_longitude_perigee=282.596403,\n eccentricity=0.016718,\n moon_mean_longitude_epoch=64.975464,\n moon_mean_perigee_epoch=349.383063,\n):\n # set time to Noon if not otherwise given, as midnight is confusingly close to previous day\n if np.sum(datetime_index.hour) == 0:\n datetime_index = datetime_index + pd.Timedelta(hours=12)\n days = datetime_index.to_julian_date() - epoch\n\n # Mean anomaly of the Sun\n a = (360 / 365.2422) * days\n N = a - 360.0 * np.floor(a / 360.0)\n N = N + ecliptic_longitude_epoch - ecliptic_longitude_perigee\n # Convert from perigee coordinates to epoch 1980\n M = a - 360.0 * np.floor(N / 360.0)\n\n m = torad(M)\n e = m.copy()\n while 1:\n delta = e - eccentricity * np.sin(e) - m\n e = e - delta / (1.0 - eccentricity * np.cos(e))\n if abs(delta).max() <= epsilon:\n break\n\n Ec = sqrt((1 + eccentricity) / (1 - eccentricity)) * np.tan(e / 2.0)\n # True anomaly\n Ec = 2 * todeg(np.arctan(Ec))\n # Suns's geometric ecliptic longuitude\n a = Ec + ecliptic_longitude_perigee\n lambda_sun = a - 360.0 * np.floor(a / 360.0)\n\n # Calculation of the Moon's position\n\n # Moon's mean longitude\n a = 13.1763966 * days + moon_mean_longitude_epoch\n moon_longitude = a - 360.0 * np.floor(a / 360.0)\n\n # Moon's mean anomaly\n a = moon_longitude - 0.1114041 * days - moon_mean_perigee_epoch\n MM = a - 360.0 * np.floor(a / 360.0)\n\n # Moon's ascending node mean longitude\n # MN = fixangle(c.node_mean_longitude_epoch - 0.0529539 * day)\n\n evection = 1.2739 * np.sin(torad(2 * (moon_longitude - lambda_sun) - MM))\n\n # Annual equation\n annual_eq = 0.1858 * np.sin(torad(M))\n\n # Correction term\n A3 = 0.37 * np.sin(torad(M))\n\n MmP = MM + evection - annual_eq - A3\n\n # Correction for the equation of the centre\n mEc = 6.2886 * np.sin(torad(MmP))\n\n # Another correction term\n A4 = 0.214 * np.sin(torad(2 * MmP))\n\n # Corrected longitude\n lP = moon_longitude + evection + mEc - annual_eq + A4\n\n # Variation\n variation = 0.6583 * np.sin(torad(2 * (lP - lambda_sun)))\n\n # True longitude\n lPP = lP + variation\n\n # Calculation of the phase of the Moon\n\n # Age of the Moon, in degrees\n moon_age = lPP - lambda_sun\n\n # Phase of the Moon\n moon_phase = (1 - np.cos(torad(moon_age))) / 2.0\n return moon_phase\n # return pd.Series(moon_phase, index=datetime_index)",
"def phases(self,dataset):\n start = '1984-1-1'\n if dataset == \"ISCCP_raw\":\n stop = '2007-12-31'\n else:\n stop = '2009-12-31'\n X = getattr(self,dataset)(time=(start,stop))\n R,P = sc.fast_annual_cycle(X)\n return MV.masked_where(np.isnan(P),P)",
"def phaseEstimator(phases,omegas,T_s,k):\n length = phases.shape[0]\n pis = np.tile(2*np.pi,length)\n a = phases - T_s*k*omegas\n phaseShifts = np.mod(a,pis)\n b = phases-phaseShifts\n omega_hat = np.mod(b,pis)\n n = omega_hat/omegas\n estimatedTime = np.sum(n)/length\n \n estimatedPhase = phaseShifts + estimatedTime*omegas\n \n return estimatedPhase",
"def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase",
"def phase(self):\n return np.arctan(np.sum(np.imag(self.values)) / np.sum(np.real(self.values)))",
"def phase(self):\n\n self.theta = np.arctan(np.sqrt(self.P / (1 - self.P)))\n return self",
"def phase_Saturn_2(alpha):\n phase = 10.**(-0.4*(- 3.7e-04*alpha +6.16e-04*alpha**2.))\n return phase",
"def getPhase(phase):",
"def jovian_year(cls, date):\n return amod(quotient(cls.hindu_day_count(date), cls.ARYA_JOVIAN_PERIOD / 12) + 27, 60)",
"def phase_Earth(alpha):\n phase = 10.**(-0.4*(- 1.060e-3*alpha + 2.054e-4*alpha**2.))\n return phase",
"def date_ym_value(date: dt.datetime) -> int:\n return (100 * date.year) + date.month",
"def declination_degree(day_date, TY):\n\n return 23.45 * np.vectorize(sin)((2 * pi / (TY)) * (day_date - 81))",
"def phase(state, i):\n particles = bin(state >> i + 1).count(\"1\")\n return 1 if particles % 2 == 0 else -1",
"def timestep_from_date(self, this_date):\n this_timestep = this_date.year - self._date_at_timestep0.year\n return this_timestep",
"def fringes_morlet_phase(m1,m2, quasi_pi=False):\n ### cross spectrum\n cross_spec = np.conj(m1.cwt)*m2.cwt\n phi = np.angle(cross_spec)\n if quasi_pi:\n phi = np.mod(phi + np.pi/2, 2*np.pi)\n weight = abs(m1.cwt)*abs(m2.cwt)\n phase = np.sum(phi*weight, axis=0)/np.sum(weight, axis=0)\n if quasi_pi:\n phase -= np.pi/2\n return phase",
"def phase_offset(frq,start,base):\r\n \r\n if type(start)==datetime:\r\n dx = start - base\r\n dx = dx.total_seconds()\r\n else:\r\n dx = start -base\r\n \r\n return np.mod(dx*np.array(frq),2*np.pi)",
"def compute_phase(self, dt, phase_speed):\n num_time_steps = int(self._traj_len / phase_speed)\n\n phase = Phase(dt=self._dt, phase_speed=phase_speed, time_steps=num_time_steps)\n\n return phase",
"def getorbitalphase(planet, hjd, **kw):\n\n hjd = array(hjd).copy()\n if bool(planet.transit)==True:\n thiseph = planet.tt\n else:\n thiseph = planet.rveph(hjd.max())\n\n orbphase = ((hjd - thiseph) ) / planet.per\n orbphase -= int(orbphase.mean())\n\n return orbphase",
"def yoga(date):\n return ifloor(mod((HinduSolarDate.longitude(date) + HinduLunarDate.longitude(date)) / angle(0, 800, 0), 27)) + 1",
"def kuramoto_ODE_jac(self, t, y, arg):\n\n w, k = arg\n yt = y[:,None]\n dy = y-yt\n\n phase = [m*k[m-1]*np.cos(m*dy) for m in range(1,1+self.m_order)]\n phase = np.sum(phase, axis=0)\n\n for i in range(self.n_osc):\n phase[i,i] = -np.sum(phase[:,i])\n\n return phase",
"def phase(self):\n pass",
"def kuramoto_ODE(self, t, y, arg):\n w, k = arg\n yt = y[:,None]\n dy = y-yt\n\n phase = w.astype(self.dtype)\n if self.noise != None:\n n = self.noise().astype(self.dtype)\n dy += n\n \n for m, _k in enumerate(k):\n phase += np.sum(_k*np.sin((m+1)*dy),axis=1)\n \n return phase"
] | [
"0.7753409",
"0.64832145",
"0.6266615",
"0.6266615",
"0.5898424",
"0.5834775",
"0.5758287",
"0.5736949",
"0.5586432",
"0.5580027",
"0.5443291",
"0.5376677",
"0.53231025",
"0.53126574",
"0.5306432",
"0.5282793",
"0.52764946",
"0.5266146",
"0.5248463",
"0.5236514",
"0.5230845",
"0.5217797",
"0.5201141",
"0.51913184",
"0.5187528",
"0.5175733",
"0.5162414",
"0.5157162",
"0.51302814",
"0.5118767"
] | 0.7782467 | 0 |
Reorder 'shape' according to the chosen data layout to optimize data distribution. | def _optimizeshape(shape):
shape.sort()
if ORDER == 'C':
shape[:] = shape[::-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unchanged_shape(input_shape):\n return input_shape",
"def restore_backup_shape(self):\n\n self.shape = self.shape_backup",
"def backup_shape(self):\n\n self.shape_backup = np.copy(self.shape)",
"def set_shape(self, shape):\n self._shape = self._shape.merge_with(shape)",
"def change_orientation(self):\n self.shape = self.shape.T",
"def changeInputShape(self,shape):\n self.input_shape = shape",
"def data_shape():\n return DATA_SHAPE",
"def _init_nd_shape_and_axes_sorted(x, shape, axes):\n noaxes = axes is None\n shape, axes = _init_nd_shape_and_axes(x, shape, axes)\n\n if not noaxes:\n shape = shape[axes.argsort()]\n axes.sort()\n\n return shape, axes",
"def _reorder(self, unordered_data: np.ndarray) -> np.ndarray:\n if unordered_data is None:\n return unordered_data\n\n if self._memory_allocation == ShotOrder.circuit_first:\n return unordered_data.T.flatten()\n else:\n return unordered_data.flatten()",
"def reshape(data, shape, symmetric=False, layout='row', **kwargs):\n return Component(\n \"Reshape\",\n arguments={\n 'data': Component.of(data)\n },\n options={\n 'symmetric': symmetric,\n 'layout': layout,\n 'shape': shape\n },\n constraints=kwargs)",
"def data_shapes(self):",
"def _settle_shape(self, shape):\n if shape:\n for block in shape.blocks:\n self.array[block.row_position][block.column_position] = block\n self.remove_completed_lines()",
"def _normalize_shape(shape):\n\n if isinstance(shape, (np.integer, int)):\n if shape < 1:\n raise ValueError(\"shape value must be greater than 0: %d\"\n % shape)\n shape = (shape,) # N is a shorthand for (N,)\n try:\n shape = tuple(shape)\n except TypeError:\n raise TypeError(\"shape must be an integer or sequence: %r\"\n % (shape,))\n\n # XXX Get from HDF5 library if possible.\n # HDF5 does not support ranks greater than 32\n if len(shape) > 32:\n raise ValueError(\n f\"shapes with rank > 32 are not supported: {shape!r}\")\n\n return tuple(SizeType(s) for s in shape)",
"def expand_to_shape(data, shape, dtype=None, background=None):\n if dtype is None:\n dtype = data.dtype\n if shape==data.shape:\n return data.astype(dtype)\n if background is None:\n background = data.min()\n expanded_data = numpy.zeros(shape, dtype=dtype) + background\n slices = []\n rhs_slices = []\n for s1, s2 in zip (shape, data.shape):\n a, b = (s1-s2+1)//2, (s1+s2+1)//2\n c, d = 0, s2\n while a<0:\n a += 1\n b -= 1\n c += 1\n d -= 1\n slices.append(slice(a, b))\n rhs_slices.append(slice(c, d))\n try:\n expanded_data[tuple(slices)] = data[tuple (rhs_slices)]\n except ValueError:\n print data.shape, shape\n raise\n return expanded_data",
"def processed_shape(self, shape):\n return shape",
"def reshape(x, shape):\n return Reshape(shape)(x)",
"def translate_shape(shape, x_shift, y_shift):",
"def contract_to_shape(data, shape, dtype=None):\n if dtype is None:\n dtype = data.dtype\n if shape==data.shape:\n return data.astype(dtype)\n slices = []\n for s1, s2 in zip (data.shape, shape):\n slices.append(slice((s1-s2)//2, (s1+s2)//2))\n return data[tuple(slices)].astype(dtype)",
"def _special_handle_reshape(cls, op, X, W):\n node_name = op.name + \":shape\"\n return [\n numpy_helper.from_array(np.array(op.shape, dtype=np.int64),\n node_name)\n ]",
"def set_shape(self, shape):\n self.rows = shape[0]\n self.cols = shape[1]",
"def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int):\n oldh, oldw = old_shape\n scale = longest_edge * 1.0 / max(oldh, oldw)\n newh, neww = oldh * scale, oldw * scale\n newh = int(newh + 0.5)\n neww = int(neww + 0.5)\n return (newh, neww)",
"def reshape_output_shape_0(input_shape): \n shape_1 = input_shape[0]\n shape_2 = input_shape[1]\n shape_3 = input_shape[2]\n return(shape_1, shape_2, shape_3, 1)",
"def reshape(tensor, newshape):\n raise NotImplementedError",
"def _keras_update_shape(self, prep):\n\n # Run preprocessing on the training data\n X_transform = prep.fit_transform(self.X_train)\n\n # If the input shape has not been specified, it is simply the number of features in X_transform\n if 'input_shape' not in self.model.first_layer_kwargs:\n self.model.first_layer_kwargs['input_shape'] = tuple([X_transform.shape[1]])\n # Else update the input shape based on the number of features after preprocessing\n else:\n # Transform to a list to make the input_shape mutable\n self.model.first_layer_kwargs['input_shape'] = list(self.model.first_layer_kwargs['input_shape'])\n # Update the number of features based on X_transform\n if self.model.lags:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//(self.model.lags + (1 if self.model.current_sample_as_input else 0))\n else:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//np.prod(self.model.first_layer_kwargs['input_shape'][:-1])\n # Transform back to a tuple as required by Keras\n self.model.first_layer_kwargs['input_shape'] = tuple(self.model.first_layer_kwargs['input_shape'])\n \n # Ensure the Architecture has been updated\n self.model.architecture.iloc[0, 2]['input_shape'] = self.model.first_layer_kwargs['input_shape']\n \n # 2D, 3D and 4D data is valid. \n # e.g. The input_shape can be a tuple of (subsequences, timesteps, features), with subsequences and timesteps as optional.\n # A 4D shape may be valid for e.g. a ConvLSTM with (timesteps, rows, columns, features) \n if len(self.model.first_layer_kwargs['input_shape']) > 5:\n err = \"Unsupported input_shape: {}\".format(self.model.first_layer_kwargs['input_shape'])\n raise Exception(err)",
"def reshape(self, *shape):\n return F.Reshape.apply(self, shape)",
"def adjust_shape(placeholder, data):\n if not isinstance(data, np.ndarray) and not isinstance(data, list):\n return data\n if isinstance(data, list):\n data = np.array(data)\n\n placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]\n\n assert _check_shape(placeholder_shape, data.shape), \\\n 'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)\n\n return np.reshape(data, placeholder_shape)",
"def predataShape(self):\n self._predatashape['name']=self._name\n self._predatashape['location'] = self._location\n self._predatashape['origin'] = self._origin\n self._predatashape['width'] = self._width\n self._predatashape['height'] = self._height\n return self._predatashape",
"def reorder_input_dimensions(self, train_x):\n\n source = [0, 1, 2, 3]\n destination = [3, 0, 2, 1]\n return np.moveaxis(train_x, source, destination)",
"def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, order_z=0):\n assert len(data.shape) == 4, \"data must be (c, x, y, z)\"\n if is_seg:\n resize_fn = resize_segmentation\n kwargs = OrderedDict()\n else:\n resize_fn = resize\n kwargs = {'mode': 'edge', 'anti_aliasing': False}\n dtype_data = data.dtype\n shape = np.array(data[0].shape)\n new_shape = np.array(new_shape)\n if np.any(shape != new_shape):\n data = data.astype(float)\n if do_separate_z:\n print(\"separate z, order in z is\",\n order_z, \"order inplane is\", order)\n assert len(axis) == 1, \"only one anisotropic axis supported\"\n axis = axis[0]\n if axis == 0:\n new_shape_2d = new_shape[1:]\n elif axis == 1:\n new_shape_2d = new_shape[[0, 2]]\n else:\n new_shape_2d = new_shape[:-1]\n\n reshaped_final_data = []\n for c in range(data.shape[0]):\n reshaped_data = []\n for slice_id in range(shape[axis]):\n if axis == 0:\n reshaped_data.append(\n resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs))\n elif axis == 1:\n reshaped_data.append(\n resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs))\n else:\n reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order,\n **kwargs))\n reshaped_data = np.stack(reshaped_data, axis)\n if shape[axis] != new_shape[axis]:\n\n # The following few lines are blatantly copied and modified from sklearn's resize()\n rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]\n orig_rows, orig_cols, orig_dim = reshaped_data.shape\n\n row_scale = float(orig_rows) / rows\n col_scale = float(orig_cols) / cols\n dim_scale = float(orig_dim) / dim\n\n map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]\n map_rows = row_scale * (map_rows + 0.5) - 0.5\n map_cols = col_scale * (map_cols + 0.5) - 0.5\n map_dims = dim_scale * (map_dims + 0.5) - 0.5\n\n coord_map = np.array([map_rows, map_cols, map_dims])\n if not is_seg or order_z == 0:\n reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z,\n mode='nearest')[None])\n else:\n unique_labels = np.unique(reshaped_data)\n reshaped = np.zeros(new_shape, dtype=dtype_data)\n\n for i, cl in enumerate(unique_labels):\n reshaped_multihot = np.round(\n map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z,\n mode='nearest'))\n reshaped[reshaped_multihot > 0.5] = cl\n reshaped_final_data.append(reshaped[None])\n else:\n reshaped_final_data.append(reshaped_data[None])\n reshaped_final_data = np.vstack(reshaped_final_data)\n else:\n print(\"no separate z, order\", order)\n reshaped = []\n for c in range(data.shape[0]):\n reshaped.append(\n resize_fn(data[c], new_shape, order, **kwargs)[None])\n reshaped_final_data = np.vstack(reshaped)\n return reshaped_final_data.astype(dtype_data)\n else:\n print(\"no resampling necessary\")\n return data",
"def data_shape(self):\n raise NotImplementedError"
] | [
"0.601357",
"0.6012796",
"0.59265906",
"0.59111005",
"0.58404654",
"0.5758127",
"0.57051116",
"0.5653655",
"0.5619018",
"0.5616662",
"0.5612885",
"0.56128573",
"0.561095",
"0.5574861",
"0.5568789",
"0.55197614",
"0.5517362",
"0.551245",
"0.54615873",
"0.54567415",
"0.54471827",
"0.54460835",
"0.5418479",
"0.5415293",
"0.54097",
"0.54044056",
"0.54023105",
"0.5379557",
"0.53665763",
"0.5350459"
] | 0.7083111 | 0 |
True if ghost layer length is not zero. | def has_ghosts(self):
return not np.all(self.mesh.discretization.ghosts == 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def empty(self):\n return len(self.layers) == 0",
"def is_empty(self):\n return ch.prod(ch.tensor(self.x.shape)).item() == 0",
"def is_ghost(self):\n\t\treturn False",
"def is_trivial(self):\n return self.dims == 0",
"def is_empty(self) -> bool:\n return self.num_grna() == 0",
"def _is_empty(self):\n if self.allocated_spaces == 0:\n return True\n else:\n return False",
"def is_ghost(self):\n return self._is_ghost",
"def is_empty(self):\n return self._connected and self._length == 1 and self._degree > 1",
"def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()",
"def _is_empty(shape):\n return F.shape_mul(shape) == 0",
"def is_empty (self):\n return len(self.network) == 0",
"def empty(self) -> bool:\n return self.sk1_len==0",
"def empty(self):\n return self.numba_rtree._bounds_tree.shape[0] == 0",
"def is_buffer_empty(self): \n if self.buffer.shape == (0, 5):\n return True\n else:\n return False",
"def _is_empty(self):\n return self.size == 0",
"def empty(self):\r\n return self.getSize() == 0",
"def is_full(self):\n return self.remaining_space_in_hold() == 0",
"def isEmpty(self):\n return len(self.mask) == 0",
"def is_empty(self):\n if numpy.any(numpy.logical_not(self.shape)):\n return True\n if len(self.__m__) == 0:\n return True\n return False",
"def is_empty(self):\n\t\treturn (self._size == 0)",
"def is_empty(self):\n return self.fodder == 0",
"def is_null(self):\n return self.length2 < pygonal.EPSILON2",
"def is_empty(self):\n\t\treturn self._size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_empty(self):\n return self.size == 0",
"def is_zero(self):\n return self._x == 0 and self._y == 0",
"def is_empty(self):\n return len(self.__nodes) == 0"
] | [
"0.7561698",
"0.72112876",
"0.7065583",
"0.7046145",
"0.7030016",
"0.69214237",
"0.6830907",
"0.6792221",
"0.67822015",
"0.67809653",
"0.67568576",
"0.6733361",
"0.6685363",
"0.6656976",
"0.6638194",
"0.6637561",
"0.6615059",
"0.660577",
"0.65900713",
"0.6586594",
"0.658374",
"0.65624404",
"0.6556189",
"0.6555887",
"0.6555887",
"0.6555887",
"0.6555887",
"0.6555887",
"0.6554751",
"0.6553147"
] | 0.72264445 | 1 |
True if target and current object are equal and have the same parent. Equal means same mesh, same shape and same domain. | def is_consistent_with(self, target):
same_parent = self.parent() == target.parent()
# Note FP. Is it really required to have the
# same parent? Inclusion of all proc may be enough?
return npw.equal(self.shape, target.shape).all() and same_parent | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n return self.mesh == other.mesh and \\\n npw.equal(self.shape, other.shape).all() and \\\n self.domain == other.domain",
"def __eq__(self, other):\n parent_same = self.parent1.rid == other.parent1.rid \\\n and self.parent2.rid == other.parent2.rid\n\n parents_opposite = self.parent2.rid == other.parent1.rid \\\n and self.parent1.rid == other.parent2.rid\n\n return parent_same or parents_opposite",
"def is_same_as(self, other) -> bool:\n return self.x == other.x and self.y == other.y",
"def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)",
"def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False",
"def __eq__(self, obj):\r\n return (self.position == obj.position and self.left_cont == obj.left_cont\r\n and self.line == obj.line and self.right_cont == obj.right_cont)",
"def __eq__(self, other):\n return type(self) == type(other) and self.node is other.node",
"def is_same(self: _R, other: _R) -> bool:\n children = [i.render() for i in self.children]\n other_children = [i.render() for i in other.children]\n return other_children == children",
"def __eq__(self, other):\n return (type(self) == type(other) and\n self.puzzle == other.puzzle and\n all([x in self.children for x in other.children]) and\n all([x in other.children for x in self.children]))",
"def __eq__(self, other):\n return (type(self) == type(other) and\n self.n == other.n and self.m == other.m and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)",
"def isSameKindAs(self, *args):\n return _osgAnimation.RigGeometry_isSameKindAs(self, *args)",
"def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node",
"def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node",
"def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node",
"def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node",
"def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node",
"def isSameKindAs(self, *args):\n return _osgAnimation.Bone_isSameKindAs(self, *args)",
"def eq(self, other: Any) -> bool:\n # TODO: Rasswanth: Fix later after the comparison operation\n # relative\n # from .... import Tensor\n\n # if (\n # isinstance(self.child, Tensor)\n # and isinstance(other.child, Tensor)\n # and (self.child != other.child).child.any() # type: ignore\n # ):\n # return False\n\n # if (\n # isinstance(self.child, np.ndarray)\n # and isinstance(other.child, np.ndarray)\n # and (self.child != other.child).any()\n # ):\n # return False\n\n # if self.rank != other.rank:\n # return False\n\n # if self.ring_size != other.ring_size:\n # return False\n\n # if self.nr_parties != other.nr_parties:\n # return False\n\n # return True\n\n # ATTENTION: Why are we getting here now when we never did before?\n if not hasattr(other, \"child\"):\n return self.child == other\n\n return self.child == other.child",
"def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node",
"def identical_to(self, elem):\n \n return (self.n == elem.n) and (math.fabs(self.dx - elem.dx) < 0.001) and (math.fabs(self.dy - elem.dy) < 0.001) and (math.fabs(self.dz - elem.dz) < 0.001)",
"def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)",
"def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid)",
"def equals(self, other: InputTransform) -> bool:\n if hasattr(self, \"indices\") == hasattr(other, \"indices\"):\n if hasattr(self, \"indices\"):\n return (\n super().equals(other=other)\n and (self._d == other._d)\n and (self.indices == other.indices).all()\n )\n else:\n return super().equals(other=other) and (self._d == other._d)\n return False",
"def __eq__(self, other):\n return (type(self) == type(other) and\n (self.from_grid == other.from_grid) and\n (self.to_grid == other.to_grid))",
"def __eq__(self, other):\n return (type(self) == type(other) and\n self.from_grid == other.from_grid and\n self.to_grid == other.to_grid and\n self.m == other.m and\n self.n == other.n)",
"def equal(self,other):\n if(self.x == other.x) and (self.y == other.y):\n return True\n else:\n return False",
"def _node_equal(self, other):\n # We're not equal if other isn't a Node, or if other is a different class.\n if not isinstance(other, Node) or not isinstance(other, self.__class__):\n return False\n # Loop through all children, checking whether they are equal\n for self_child, other_child in zip(self.getChildren(), other.getChildren()):\n if not self_child == other_child:\n return False\n # If we get here, our two nodes much be equal\n return True",
"def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False",
"def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False",
"def identical_grid(self, other) -> bool:\n return (\n (\n self.crs is None\n or other.raster.crs is None\n or self.crs == other.raster.crs\n )\n and np.allclose(self.transform, other.raster.transform, atol=1e-06)\n and np.allclose(self.shape, other.raster.shape)\n )"
] | [
"0.70844996",
"0.66548616",
"0.66513604",
"0.6646044",
"0.64481336",
"0.6433953",
"0.63558435",
"0.63483995",
"0.6299958",
"0.6295003",
"0.62756413",
"0.6267289",
"0.6267289",
"0.6267289",
"0.6267289",
"0.6267289",
"0.626677",
"0.62649465",
"0.62575334",
"0.6204619",
"0.62005",
"0.62005",
"0.61947954",
"0.6190282",
"0.6183342",
"0.61703104",
"0.61490566",
"0.6131132",
"0.611621",
"0.61115634"
] | 0.7603044 | 0 |
True if current topo is complient with target. | def can_communicate_with(self, target):
if self == target:
return True
msg = 'You try to connect topologies belonging to'
msg += ' two different mpi tasks. Set taskids properly or use'
msg += ' InterBridge.'
assert self.task_id() == target.task_id(), msg
# Parent communicator
# Todo : define some proper conditions for compatibility
# between topo_from, topo_to and parent:
# - same size
# - same domain
# - common processus ...
# At the time we check that both topo have
# the same comm_origin.
return self.is_consistent_with(target) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True",
"def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)",
"def is_connected(self) -> bool:\n for node in self.nodes.values():\n if node.is_connected:\n return True\n return False",
"def is_strongly_connected(self):\n if self.order()==1:\n return True\n\n try:\n return self._backend.is_strongly_connected()\n\n except AttributeError:\n return len(self.strongly_connected_components()) == 1",
"def is_consistent_with(self, target):\n same_parent = self.parent() == target.parent()\n # Note FP. Is it really required to have the\n # same parent? Inclusion of all proc may be enough?\n return npw.equal(self.shape, target.shape).all() and same_parent",
"def is_gentarget(self, target):\r\n raise NotImplementedError",
"def is_connected(self):\n connected = False\n self.state = self.mesh.state()\n if self.state in (STATE_CHILD, STATE_ROUTER, STATE_LEADER, STATE_LEADER_SINGLE):\n connected = True\n return connected",
"def is_adjacent(self, remote_host_name):\n # Check if a topology is defined, otherwise use fully connected\n if self.topology is None:\n return True\n\n if self.name in self.topology:\n if remote_host_name in self.topology[self.name]:\n return True\n else:\n return False\n else:\n logging.warning(\n \"Node {} is not in the specified topology and is therefore \"\n \"assumed to have no neighbors\".format(self.name)\n )\n return False",
"def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False",
"def is_setup_connected(self):\n return bool(self.get_target_namespace())",
"def is_connected(self):\n return True",
"def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None",
"def failover_target(self) -> bool:\n return pulumi.get(self, \"failover_target\")",
"def is_active(self):\n if (\n '_transport' in self.__dict__ and\n self._transport.is_active()\n ):\n return True\n return False",
"def is_connected(self) -> bool:",
"def is_connected(self):\n return self._current_protocol is not None",
"def at_target(self):\n return self.location == self.target_location",
"def isConnectedTo(self, node):\n for arc in self._arcsFrom:\n if arc.getFinish() is node:\n return True\n return False",
"def is_local_client(self):\n return self.msg.is_local_client",
"def _quell_co2(self, flowable, context):\n if self._quell_biogenic is False:\n return False\n if flowable in self._bio_co2:\n if context.is_subcompartment(self._cm['from air']):\n return True\n if context.is_subcompartment(self._cm['Emissions']):\n return True\n return False",
"def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)",
"def has_target(self):\n return self._has_target",
"def bfs_is_connected(self):\n q = Queue.Queue()\n origins = [self.vertices()[0]]\n traveled = set(origins)\n while origins:\n for o in origins:\n for child in self.out_vertices(o):\n if child not in traveled:\n q.put(child)\n traveled.add(child)\n\n origins = []\n while not q.empty():\n origins.append(q.get())\n if len(traveled) == self.order():\n return True\n return False",
"def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False",
"def is_connected(self):\n vs = self.vertices()\n visited = self.bfs(vs[0])\n return len(visited) == len(vs)",
"def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val",
"def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected",
"def is_target(self):\n\t\treturn self.window and self.window.target is self",
"def is_community(self):\n context = aq_inner(self.context)\n for obj in aq_chain(context):\n if ICommunity.providedBy(obj):\n return True\n\n return False",
"def is_active(self):\n group_names = self.get_var(\"group_names\", default=[])\n master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names\n return super(OvsVersion, self).is_active() and master_or_node"
] | [
"0.63338524",
"0.61239785",
"0.6074758",
"0.5993989",
"0.59502983",
"0.5931595",
"0.59294623",
"0.59263664",
"0.5918986",
"0.58928543",
"0.5868036",
"0.5846863",
"0.58454454",
"0.5810941",
"0.5808112",
"0.58062047",
"0.57958764",
"0.5795005",
"0.5779186",
"0.5778966",
"0.577796",
"0.5774486",
"0.57744783",
"0.57719225",
"0.5763049",
"0.5757841",
"0.5755968",
"0.57556546",
"0.57448035",
"0.5744082"
] | 0.735502 | 0 |
Collect global indices of local meshes on each process of topo | def gather_global_indices(topo, toslice=True, root=None, comm=None):
if comm is None:
comm = topo.parent()
size = comm.size
start = topo.mesh.start()
end = topo.mesh.stop() - 1
# communicator that owns the topology
rank = comm.Get_rank()
dimension = topo.domain.dimension
iglob = npw.int_zeros((dimension * 2, size))
iglob_res = npw.int_zeros((dimension * 2, size))
iglob[0::2, rank] = start
iglob[1::2, rank] = end
# iglob is saved as a numpy array and then transform into
# a dict of slices since mpi send operations are much
# more efficient with numpy arrays.
if root is None:
comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])
else:
comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],
root=root)
if toslice:
return utils.arrayToDict(iglob_res)
else:
return iglob_res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )",
"def _exchange_ghosts_local(self):\n for d in xrange(self._dim):\n self._exchange_ghosts_local_d(d)",
"def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result",
"def gather_global_indices_overlap(topo=None, comm=None, dom=None,\n toslice=True, root=None):\n if topo is None:\n assert comm is not None and dom is not None\n size = comm.Get_size()\n rank = comm.Get_rank()\n dimension = dom.dimension\n iglob = npw.int_zeros((dimension * 2, size))\n iglob_res = npw.int_zeros((dimension * 2, size))\n iglob[1::2, rank] = -1\n if root is None:\n comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])\n else:\n comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],\n root=root)\n if toslice:\n return utils.arrayToDict(iglob_res)\n else:\n return iglob_res\n\n else:\n return TopoTools.gather_global_indices(topo, toslice, root, comm)",
"def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray):\n x = V.tabulate_dof_coordinates()\n local_dofs = dofs[dofs < V.dofmap.index_map.size_local * V.dofmap.index_map_bs]\n coords = x[local_dofs]\n num_nodes = len(coords)\n glob_num_nodes = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM)\n recvbuf = None\n if MPI.COMM_WORLD.rank == 0:\n recvbuf = np.zeros(3 * glob_num_nodes, dtype=np.float64)\n sendbuf = coords.reshape(-1)\n sendcounts = np.array(MPI.COMM_WORLD.gather(len(sendbuf), 0))\n MPI.COMM_WORLD.Gatherv(sendbuf, (recvbuf, sendcounts), root=0)\n glob_coords = MPI.COMM_WORLD.bcast(recvbuf, root=0).reshape((-1, 3))\n return glob_coords",
"def global_index(self):\n raise NotImplementedError",
"def global_to_local_map(self):\n Ngrid = 62\n map_file_to_domain = lambda (x,y,z): (x-1) + (y-1)*Ngrid + (z-1)*Ngrid*Ngrid\n\n if self._global_to_local_map is None: \n m = {}\n for k,v in self.ids_map.iteritems(): \n m[map_file_to_domain(k)] = v\n self._global_to_local_map = m\n return self._global_to_local_map",
"def mpi_index_maps(loc_idx, shape, topology, coords, comm):\n\n nprocs = comm.Get_size()\n\n # Gather data structures from all ranks in order to produce the\n # relevant mappings.\n dat_len = np.zeros(topology, dtype=tuple)\n for j in range(nprocs):\n dat_len[coords[j]] = comm.bcast(shape, root=j)\n if any(k == 0 for k in dat_len[coords[j]]):\n dat_len[coords[j]] = as_tuple([0]*len(dat_len[coords[j]]))\n\n # If necessary, add the time index to the `topology` as this will\n # be required to correctly construct various maps.\n if len(np.amax(dat_len)) > len(topology):\n topology = as_list(topology)\n coords = [as_list(l) for l in coords]\n for _ in range(len(np.amax(dat_len)) - len(topology)):\n topology.insert(0, 1)\n for e in coords:\n e.insert(0, 0)\n topology = as_tuple(topology)\n coords = as_tuple([as_tuple(i) for i in coords])\n dat_len = dat_len.reshape(topology)\n dat_len_cum = distributed_data_size(dat_len, coords, topology)\n\n # This 'transform' will be required to produce the required maps\n transform = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice):\n if i.step is not None:\n transform.append(slice(None, None, np.sign(i.step)))\n else:\n transform.append(slice(None, None, None))\n else:\n transform.append(slice(0, 1, None))\n transform = as_tuple(transform)\n\n global_size = dat_len_cum[coords[-1]]\n\n indices = np.zeros(global_size, dtype=tuple)\n global_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(indices, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n indices[index] = index\n it.iternext()\n global_si[:] = indices[transform]\n\n # Create the 'rank' slices\n rank_slice = []\n for j in coords:\n this_rank = []\n for k in dat_len[j]:\n this_rank.append(slice(0, k, 1))\n rank_slice.append(this_rank)\n # Normalize the slices:\n n_rank_slice = []\n for i in range(len(rank_slice)):\n my_coords = coords[i]\n if any([j.stop == j.start for j in rank_slice[i]]):\n n_rank_slice.append(as_tuple([None]*len(rank_slice[i])))\n continue\n if i == 0:\n n_rank_slice.append(as_tuple(rank_slice[i]))\n continue\n left_neighbours = []\n for j in range(len(my_coords)):\n left_coord = list(my_coords)\n left_coord[j] -= 1\n left_neighbours.append(as_tuple(left_coord))\n left_neighbours = as_tuple(left_neighbours)\n n_slice = []\n for j in range(len(my_coords)):\n if left_neighbours[j][j] < 0:\n start = 0\n stop = dat_len_cum[my_coords][j]\n else:\n start = dat_len_cum[left_neighbours[j]][j]\n stop = dat_len_cum[my_coords][j]\n n_slice.append(slice(start, stop, 1))\n n_rank_slice.append(as_tuple(n_slice))\n n_rank_slice = as_tuple(n_rank_slice)\n\n # Now fill each elements owner:\n owners = np.zeros(global_size, dtype=np.int32)\n send = np.zeros(global_size, dtype=np.int32)\n for i in range(len(n_rank_slice)):\n if any([j is None for j in n_rank_slice[i]]):\n continue\n else:\n owners[n_rank_slice[i]] = i\n send[:] = owners[transform]\n\n # Construct local_si\n local_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(local_si, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n owner = owners[index]\n my_slice = n_rank_slice[owner]\n rnorm_index = []\n for j, k in zip(my_slice, index):\n rnorm_index.append(k-j.start)\n local_si[index] = as_tuple(rnorm_index)\n it.iternext()\n return owners, send, global_si, local_si",
"def write_global_local_maps(dest,global_local,local_global):",
"def get_locations(self):\n self.locations = {} # reset dictionary\n for node in self.extant_p:\n if node.host not in self.locations:\n self.locations.update({node.host: []})\n self.locations[node.host].append(node)",
"def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices",
"def get_all_local_clustering_coef(g):\n local_cc = {}\n\n for n in nx.nodes(g):\n local_cc[n] = get_local_clustering_coef(g, n)\n\n return local_cc",
"def get_prescribed_indexes(self):\n global_prescribed = []\n for node in self.preprocessor.nodes.values():\n if node.there_are_prescribed_dofs:\n starting_position = node.global_index * DOF_PER_NODE_STRUCTURAL\n dofs = np.array(node.get_prescribed_dofs_bc_indexes()) + starting_position\n global_prescribed.extend(dofs)\n return global_prescribed",
"def agent_locs_idx(self):\n return tuple(self.agent_locs.T)",
"def atlas_clusters():\n pass",
"def get_all_master_idx_paths(self):\n paths = utilities.get_all_master_index_paths(rootdir=constants.flow_data_dir)\n return paths",
"def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]",
"def set_hypercubes_parents_indices(self):\n for hypercube in self.hypercubes.flatten():\n coordinates = []\n for coord in hypercube.coords:\n coordinates.append([2 * coord, 2 * coord + 1])\n for indices in list(itertools.product(*coordinates)):\n hypercube.parent_hypercubes_indices.append(tuple(indices))",
"def generate_all_locations(grid, shape):",
"def all_env_ids(self) -> np.ndarray:",
"def _setup_global_base(self):\n self._setup_facet_orientations()\n\n self._init_econn()\n\n n_dof = 0\n all_dofs = {}\n remaps = {}\n for ig, ap in self.aps.iteritems():\n ii = self.region.get_cells(ig)\n nd = nm.prod(ap.econn.shape)\n\n group = self.domain.groups[ig]\n remaps[ig] = prepare_remap(ii, group.shape.n_el)\n\n aux = nm.arange(n_dof, n_dof + nd, dtype=nm.int32)\n aux.shape = ap.econn.shape\n\n ap.econn[:] = aux\n all_dofs[ig] = aux\n\n n_dof += nd\n\n self.n_nod = n_dof\n\n self.n_bubble_dof = n_dof\n self.bubble_dofs = all_dofs\n self.bubble_remaps = remaps\n\n self.n_vertex_dof = self.n_edge_dof = self.n_face_dof = 0\n\n self._setup_esurface()",
"def update_global_identifiers(self, universe_test):\n self.cellNum += 1\n self.surfaceNum += 1\n self.materialNum += 1\n if universe_test:\n self.universe += 1",
"def get_local_ids(self,\n np.ndarray[uint32, mode='c', ndim=1] entities not None,\n int32 dent,\n np.ndarray[uint32, mode='c', ndim=1] incident not None,\n np.ndarray[uint32, mode='c', ndim=1] offsets not None,\n int32 dim):\n cdef Indices[1] _entities, _local_ids\n cdef MeshConnectivity _incident[1]\n cdef np.ndarray[uint32, mode='c', ndim=1] out\n\n if not entities.shape[0] > 0:\n return np.empty(0, dtype=np.uint32)\n\n _entities.num = entities.shape[0]\n _entities.indices = &entities[0]\n\n _incident.num = _entities.num\n _incident.n_incident = incident.shape[0]\n _incident.indices = &incident[0]\n _incident.offsets = &offsets[0]\n\n out = np.empty(_incident.n_incident, dtype=np.uint32)\n _local_ids.num = _incident.n_incident\n _local_ids.indices = &out[0]\n mesh_get_local_ids(self.mesh, _local_ids, _entities, dent, _incident, dim)\n\n return out",
"def nsi_internal_local_clustering(self, node_list):\n return self.nsi_cross_local_clustering(node_list, node_list)",
"def _exchange_ghosts_mpi(self):\n for d in xrange(self._dim):\n if d in self._cutdir_list:\n self._exchange_ghosts_mpi_d(d)\n else:\n self._exchange_ghosts_local_d(d)",
"def cal_globalIndexH(self):\n h_local = self.cal_localIndexH()\n h_global = np.sum(h_local)\n\n return h_global",
"def internal_global_clustering(self, node_list):\n clustering = self.local_clustering()\n internal_clustering = clustering[node_list].mean()\n return internal_clustering",
"def map_to_local(self, idxs):\n return F.zerocopy_from_dgl_ndarray(\n _CAPI_DGLNDArrayPartitionMapToLocal(\n self._partition, F.zerocopy_to_dgl_ndarray(idxs)\n )\n )",
"def _prog_field_indices(self):\n\n if self._pfi is not None:\n return self._pfi\n\n self.arbor._grow_tree(self)\n self._pfi = np.array([node.tree_id for node in self._prog_nodes])\n return self._pfi",
"def iter_node_map(self):\n return self.d_inv.keys()"
] | [
"0.6674267",
"0.607246",
"0.59368324",
"0.58657676",
"0.5651199",
"0.56479245",
"0.56453234",
"0.5612734",
"0.5533967",
"0.5532873",
"0.5474459",
"0.5465615",
"0.5442727",
"0.5435518",
"0.54162186",
"0.5397795",
"0.535609",
"0.5344314",
"0.534363",
"0.5338216",
"0.5314248",
"0.52974224",
"0.52903426",
"0.52683043",
"0.5259715",
"0.5232956",
"0.52313954",
"0.5181487",
"0.51801115",
"0.51647544"
] | 0.70098007 | 0 |
This functions does the same thing as gather_global_indices but may also work when topo is None. The function is usefull if you need to collect global indices on a topo define only on a subset of comm, when for the procs not in this subset, topo will be equal to None. In such a case, comm and dom are required. This may happen when you want to build a bridge between two topologies that do not handle the same number of processes but with an overlap between the two groups of processes of the topologies. In that case, a call to gather_global_indices(topo, comm, dom) will work on all processes belonging to comm, topo being None or not. The values corresponding to ranks not in topo will be empty slices. | def gather_global_indices_overlap(topo=None, comm=None, dom=None,
toslice=True, root=None):
if topo is None:
assert comm is not None and dom is not None
size = comm.Get_size()
rank = comm.Get_rank()
dimension = dom.dimension
iglob = npw.int_zeros((dimension * 2, size))
iglob_res = npw.int_zeros((dimension * 2, size))
iglob[1::2, rank] = -1
if root is None:
comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])
else:
comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],
root=root)
if toslice:
return utils.arrayToDict(iglob_res)
else:
return iglob_res
else:
return TopoTools.gather_global_indices(topo, toslice, root, comm) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gather_global_indices(topo, toslice=True, root=None, comm=None):\n if comm is None:\n comm = topo.parent()\n size = comm.size\n start = topo.mesh.start()\n end = topo.mesh.stop() - 1\n # communicator that owns the topology\n rank = comm.Get_rank()\n dimension = topo.domain.dimension\n iglob = npw.int_zeros((dimension * 2, size))\n iglob_res = npw.int_zeros((dimension * 2, size))\n iglob[0::2, rank] = start\n iglob[1::2, rank] = end\n # iglob is saved as a numpy array and then transform into\n # a dict of slices since mpi send operations are much\n # more efficient with numpy arrays.\n if root is None:\n comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])\n else:\n comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],\n root=root)\n\n if toslice:\n return utils.arrayToDict(iglob_res)\n else:\n return iglob_res",
"def mpi_index_maps(loc_idx, shape, topology, coords, comm):\n\n nprocs = comm.Get_size()\n\n # Gather data structures from all ranks in order to produce the\n # relevant mappings.\n dat_len = np.zeros(topology, dtype=tuple)\n for j in range(nprocs):\n dat_len[coords[j]] = comm.bcast(shape, root=j)\n if any(k == 0 for k in dat_len[coords[j]]):\n dat_len[coords[j]] = as_tuple([0]*len(dat_len[coords[j]]))\n\n # If necessary, add the time index to the `topology` as this will\n # be required to correctly construct various maps.\n if len(np.amax(dat_len)) > len(topology):\n topology = as_list(topology)\n coords = [as_list(l) for l in coords]\n for _ in range(len(np.amax(dat_len)) - len(topology)):\n topology.insert(0, 1)\n for e in coords:\n e.insert(0, 0)\n topology = as_tuple(topology)\n coords = as_tuple([as_tuple(i) for i in coords])\n dat_len = dat_len.reshape(topology)\n dat_len_cum = distributed_data_size(dat_len, coords, topology)\n\n # This 'transform' will be required to produce the required maps\n transform = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice):\n if i.step is not None:\n transform.append(slice(None, None, np.sign(i.step)))\n else:\n transform.append(slice(None, None, None))\n else:\n transform.append(slice(0, 1, None))\n transform = as_tuple(transform)\n\n global_size = dat_len_cum[coords[-1]]\n\n indices = np.zeros(global_size, dtype=tuple)\n global_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(indices, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n indices[index] = index\n it.iternext()\n global_si[:] = indices[transform]\n\n # Create the 'rank' slices\n rank_slice = []\n for j in coords:\n this_rank = []\n for k in dat_len[j]:\n this_rank.append(slice(0, k, 1))\n rank_slice.append(this_rank)\n # Normalize the slices:\n n_rank_slice = []\n for i in range(len(rank_slice)):\n my_coords = coords[i]\n if any([j.stop == j.start for j in rank_slice[i]]):\n n_rank_slice.append(as_tuple([None]*len(rank_slice[i])))\n continue\n if i == 0:\n n_rank_slice.append(as_tuple(rank_slice[i]))\n continue\n left_neighbours = []\n for j in range(len(my_coords)):\n left_coord = list(my_coords)\n left_coord[j] -= 1\n left_neighbours.append(as_tuple(left_coord))\n left_neighbours = as_tuple(left_neighbours)\n n_slice = []\n for j in range(len(my_coords)):\n if left_neighbours[j][j] < 0:\n start = 0\n stop = dat_len_cum[my_coords][j]\n else:\n start = dat_len_cum[left_neighbours[j]][j]\n stop = dat_len_cum[my_coords][j]\n n_slice.append(slice(start, stop, 1))\n n_rank_slice.append(as_tuple(n_slice))\n n_rank_slice = as_tuple(n_rank_slice)\n\n # Now fill each elements owner:\n owners = np.zeros(global_size, dtype=np.int32)\n send = np.zeros(global_size, dtype=np.int32)\n for i in range(len(n_rank_slice)):\n if any([j is None for j in n_rank_slice[i]]):\n continue\n else:\n owners[n_rank_slice[i]] = i\n send[:] = owners[transform]\n\n # Construct local_si\n local_si = np.zeros(global_size, dtype=tuple)\n it = np.nditer(local_si, flags=['refs_ok', 'multi_index'])\n while not it.finished:\n index = it.multi_index\n owner = owners[index]\n my_slice = n_rank_slice[owner]\n rnorm_index = []\n for j, k in zip(my_slice, index):\n rnorm_index.append(k-j.start)\n local_si[index] = as_tuple(rnorm_index)\n it.iternext()\n return owners, send, global_si, local_si",
"def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray):\n x = V.tabulate_dof_coordinates()\n local_dofs = dofs[dofs < V.dofmap.index_map.size_local * V.dofmap.index_map_bs]\n coords = x[local_dofs]\n num_nodes = len(coords)\n glob_num_nodes = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM)\n recvbuf = None\n if MPI.COMM_WORLD.rank == 0:\n recvbuf = np.zeros(3 * glob_num_nodes, dtype=np.float64)\n sendbuf = coords.reshape(-1)\n sendcounts = np.array(MPI.COMM_WORLD.gather(len(sendbuf), 0))\n MPI.COMM_WORLD.Gatherv(sendbuf, (recvbuf, sendcounts), root=0)\n glob_coords = MPI.COMM_WORLD.bcast(recvbuf, root=0).reshape((-1, 3))\n return glob_coords",
"def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )",
"def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result",
"def get_prescribed_indexes(self):\n global_prescribed = []\n for node in self.preprocessor.nodes.values():\n if node.there_are_prescribed_dofs:\n starting_position = node.global_index * DOF_PER_NODE_STRUCTURAL\n dofs = np.array(node.get_prescribed_dofs_bc_indexes()) + starting_position\n global_prescribed.extend(dofs)\n return global_prescribed",
"def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index",
"def serendipity_indices(\n total: int, linear: int, dim: int, done: typing.Optional[typing.List[int]] = None\n) -> typing.List[typing.List[int]]:\n if done is None:\n done = []\n if len(done) == dim:\n if done.count(1) >= linear:\n return [done]\n return []\n if len(done) == dim - 1:\n return serendipity_indices(total, linear, dim, done=done + [total - sum(done)])\n out = []\n for i in range(total - sum(done) + 1):\n out += serendipity_indices(total, linear, dim, done + [i])\n return out",
"def get_sgrna_global_indices(sgrna_df, seq_start, seq_end, strand, sg_positions=None):\n indexed_sgrna_df = sgrna_df.copy()\n indexed_sgrna_df['sgrna_global_start'] = calculate_global_position(strand, seq_start, seq_end,\n indexed_sgrna_df['sgrna_relative_start'])\n if sg_positions is not None:\n for pos in sg_positions:\n indexed_sgrna_df['sgrna_global_' + str(pos)] = traverse_global_position(strand,\n indexed_sgrna_df['sgrna_global_start'],\n pos-1)\n indexed_sgrna_df = indexed_sgrna_df.drop('sgrna_relative_start', axis=1)\n return indexed_sgrna_df",
"def dist_init(\n local_rank: int,\n num_procs: int,\n *func_args: list[Any],\n **func_kwargs: dict[str, Any],\n ) -> None:\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29503'\n os.environ['LOCAL_RANK'] = str(local_rank)\n # NOTE: unit tests don't support multi-node so\n # local_rank == global rank\n os.environ['RANK'] = str(local_rank)\n os.environ['WORLD_SIZE'] = str(num_procs)\n\n dist.init_process_group('gloo')\n\n run_func(*func_args, **func_kwargs)\n\n # Keep faster ranks from exiting and breaking process group\n dist.barrier()",
"def index(self):\n # Check is multiple orders were given\n try:\n orders = list(iter(self.orders))\n except TypeError:\n orders = [self.orders]\n sites = self._epistasismap.sites\n x = [i for i in range(1, len(sites)) if len(sites[i]) in orders]\n # Add the zeroth element if included\n if 0 in orders:\n x = [0] + x\n return np.array(x)",
"def global_index(self):\n raise NotImplementedError",
"def cal_globalIndexH(self):\n h_local = self.cal_localIndexH()\n h_global = np.sum(h_local)\n\n return h_global",
"def toglobal(self, attr=()):\n if self.comm is None:\n raise RuntimeError('This array is not a local image.')\n counts = []\n offsets = [0]\n for rank in range(self.comm.Get_size()):\n s = split_work(self.shape_global[0], rank=rank, comm=self.comm)\n n = (s.stop - s.start) * np.product(self.shape_global[1:])\n counts.append(n)\n offsets.append(offsets[-1] + n)\n offsets.pop()\n s = split_work(self.shape_global[0], comm=self.comm)\n n = s.stop - s.start\n output = self.empty(self.shape_global, dtype=self.dtype,\n comm=MPI.COMM_SELF)\n output.__array_finalize__(self)\n t = MPI.BYTE.Create_contiguous(self.dtype.itemsize)\n t.Commit()\n self.comm.Allgatherv([self[0:n], t], [output.view(np.byte), (counts, offsets), t])\n\n for a in attr:\n i = getattr(self, a, None)\n if i is None:\n continue\n o = np.empty(self.shape_global, dtype=i.dtype)\n t = MPI.BYTE.Create_contiguous(i.dtype.itemsize)\n t.Commit()\n self.comm.Allgatherv([i[0:n], t], [o, (counts, offsets), t])\n setattr(output, a, o)\n \n output.comm = MPI.COMM_SELF\n\n return output",
"def init_retrieval(self, distributed_port):\n\n logger.info(\"initializing retrieval\")\n\n # initializing a separate process group for retrievel as the default\n # nccl backend doesn't support gather/scatter operations while gloo\n # is too slow to replace nccl for the core gpu communication\n if dist.is_initialized():\n logger.info(\"dist initialized\")\n # needs to be set manually\n os.environ[\"GLOO_SOCKET_IFNAME\"] = self._infer_socket_ifname()\n # avoid clash with the NCCL port\n os.environ[\"MASTER_PORT\"] = str(distributed_port + 1)\n self.process_group = dist.new_group(ranks=None, backend=\"gloo\")\n\n # initialize retriever only on the main worker\n if not dist.is_initialized() or self._is_main():\n logger.info(\"dist not initialized / main\")\n self.retriever.init_index()\n\n # all processes wait untill the retriever is initialized by the main process\n if dist.is_initialized():\n torch.distributed.barrier(group=self.process_group)",
"def get_unprescribed_indexes(self):\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n all_indexes = np.arange(total_dof)\n return np.delete(all_indexes, self.prescribed_indexes)",
"def gpus_for_process(process_idx: int, num_gpus_per_process: int, gpu_mask: Optional[List[int]] = None) -> List[int]:\n\n available_gpus = get_available_gpus()\n if gpu_mask is not None:\n assert len(available_gpus) >= len(\n gpu_mask\n ), f\"Number of available GPUs ({len(available_gpus)}) is less than number of GPUs in mask ({len(gpu_mask)})\"\n available_gpus = [available_gpus[g] for g in gpu_mask]\n num_gpus = len(available_gpus)\n\n gpus_to_use = []\n if num_gpus == 0:\n return gpus_to_use\n\n first_gpu_idx = process_idx * num_gpus_per_process\n for i in range(num_gpus_per_process):\n index_mod_num_gpus = (first_gpu_idx + i) % num_gpus\n gpus_to_use.append(index_mod_num_gpus)\n\n log.debug(\n f\"Using GPUs {gpus_to_use} for process {process_idx} (actually maps to GPUs {[available_gpus[g] for g in gpus_to_use]})\"\n )\n return gpus_to_use",
"def get_global_rank(backend) -> int:\n if backend != 'mpi':\n return int(os.environ.get('RANK', 0))\n else:\n return int(os.environ.get('OMPI_COMM_WORLD_RANK', 0))",
"def _get_local_rank_size(comm):\n this_node = platform.node()\n ranks_nodes = comm.allgather((comm.Get_rank(), this_node))\n node2rankssofar = collections.defaultdict(int)\n local_rank = None\n for (rank, node) in ranks_nodes:\n if rank == comm.Get_rank():\n local_rank = node2rankssofar[node]\n node2rankssofar[node] += 1\n assert local_rank is not None\n return local_rank, node2rankssofar[this_node]",
"def testUnknownIndices(self):\n params = constant_op.constant(((0, 1, 2),))\n indices = array_ops.placeholder(dtypes.int32)\n gather_nd_t = array_ops.gather_nd(params, indices, batch_dims=1)\n shape = gather_nd_t.get_shape()\n self.assertIsNone(shape.ndims)\n self.assertIsNone(tensor_shape.dimension_value(shape[0]))",
"def get_local_params(self, par_global):\n return [\n par_global[a] if a is not None else b\n for a, b in zip(self._p_global_indices, self.p_local)\n ]",
"def get_sobol_indices(self, order):\n self._set_statistics()\n return self.statistics_object.get_sobol(order)",
"def indices(dimensions, dtype=int, sparse=False):\n\n if not isinstance(dimensions, (tuple, list)):\n pass\n elif len(dimensions) > 2 or len(dimensions) == 0:\n pass\n elif dtype != int:\n pass\n elif sparse:\n pass\n else:\n return dpnp_indices(dimensions)\n\n return call_origin(numpy.indices, dimensions, dtype, sparse)",
"def init_distributed(args: dict):\n\n if is_distributed(args):\n dist.init_process_group(backend=\"nccl\")\n torch.cuda.set_device(args.local_rank)",
"def _get_indices(self, indices: VecEnvIndices) -> Iterable[int]:\n if indices is None:\n indices = range(self.num_envs)\n elif isinstance(indices, int):\n indices = [indices]\n return indices",
"def FindDistributedPoints(self, p_int, , vtkIdList, p_int_1):\n ...",
"def getGlobalIdxVals( self, i : int ):\n return range(self._layout.starts[i],self._layout.ends[i])",
"def GetNodeCommonality(self):\n\n self.__do_essential_memebers_exist__()\n\n elements = self.elements.ravel()\n idx_sort = np.argsort(elements)\n sorted_elements = elements[idx_sort]\n vals, idx_start = np.unique(sorted_elements, return_index=True)\n\n # Sets of indices\n flat_pos = np.split(idx_sort, idx_start[1:])\n els = np.split(idx_sort // int(self.elements.shape[1]), idx_start[1:])\n pos = np.split(idx_sort % int(self.elements.shape[1]), idx_start[1:])\n\n # In case one wants to return only the duplicates i.e. filter keeping only items occurring more than once\n # vals, idx_start, count = np.unique(sorted_elements, return_counts=True, return_index=True)\n # vals = vals[count > 1]\n # res = filter(lambda x: x.size > 1, res)\n\n return els, pos, flat_pos",
"def get_unused_indices(program):\n used = get_used_indices(program)\n all_indices = set(range(len(program.var_types) - 1))\n return all_indices - used",
"def compute_variable_indexes(path, overwrite=True, multiproc=False):\n if multiproc is True:\n tf.keras.backend.clear_session()\n set_cpu_option()\n\n gin_bindings = [\n \"evaluation.evaluation_fn = @variables_idx\",\n \"variables_idx.num_train = 10000\", \"evaluation.random_seed = 2051556033\",\n \"dataset.name='auto'\", \"evaluation.name = 'variables index'\"\n ]\n path = pathlib.Path(path)\n result_path = path.parent.parent / \"metrics\" / \"variance\" / \"filtered_variables\"\n logger.info(\"Computing variable indexes of {}\".format(path.parent.parent))\n gin_evaluation(path, result_path, overwrite, gin_bindings)"
] | [
"0.78130454",
"0.55268973",
"0.5226445",
"0.51116353",
"0.50954676",
"0.48668343",
"0.4836725",
"0.47469142",
"0.4703786",
"0.4642443",
"0.45836034",
"0.45445976",
"0.45384085",
"0.45181572",
"0.44909367",
"0.4485813",
"0.4471921",
"0.44507834",
"0.4396532",
"0.4384738",
"0.43786052",
"0.43531397",
"0.43459877",
"0.43445668",
"0.43338853",
"0.43320948",
"0.43223488",
"0.43058595",
"0.43007943",
"0.428299"
] | 0.7679277 | 1 |
Return true if all mpi processes of child belong to parent | def is_parent(child, parent):
# Get the list of processes
assert child is not None
assert parent is not None
#child_ranks = [i for i in xrange(child.Get_size())]
child_group = child.Get_group()
parent_group = parent.Get_group()
inter_group = MPI.Group.Intersect(child_group, parent_group)
return child_group.Get_size() == inter_group.Get_size() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1",
"def contains_parent(self, pid):\n return pid in self._parent_ids",
"def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warning(\"Parent process is terminated abnormally. Process exits.\")\n cur_process.kill()",
"def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"",
"def i_am_root():\n try:\n return True if mpi_rank() == 0 else False\n except AttributeError:\n # not running MPI\n return True",
"def is_known(self, child):\r\n return child in self._parents",
"def __contains__(self, pid):\n return self.contains_child(pid) or self.contains_parent(pid)",
"def contains_child(self, pid):\n return pid in self._children_ids",
"def pure_mpi(self):\n return self.has_mpi and not self.has_omp",
"def is_child_of(self, *args):\n return _ida_hexrays.cexpr_t_is_child_of(self, *args)",
"def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False",
"def am_i_root():\n if WITH_MPI:\n return not ME\n else:\n return os.getpid() == MASTER_PID",
"def has_mpi(self):\n return bool(self.mpi_runner)",
"def hasChildren():",
"def true_partition(self):\n if 'NA' in self.mothers or 'NA' in self.fathers:\n warn('Warning: one or more individuals has at least one parent of unkown identity.')\n warn('All such individuals will be assigned to the same sibship group.')\n\n # concatenate mother and father names to create a vector of parent pairs.\n #parentage = np.array([str(self.mothers[o]) + '/' + str(self.fathers[o]) for o in range(noffs)])\n possible_families = np.unique(self.parents) # get a list of all unique parent pairs\n\n partitions = np.zeros(self.size).astype('int') # empty vector of zeros.\n for o in range(self.nfamilies):\n # for every possible family label individuals with an identical integer.\n partitions[self.parents == possible_families[o]] += o\n\n return partitions",
"def is_state_a_child(child: State, parent: State) -> bool:\n if child.x >= parent.x and child.y >= parent.y and child.x <= parent.x + parent.width and child.y<=parent.y+parent.height:\n return True\n return False",
"def is_mpi_env():\n try:\n import mpi4py\n except ImportError:\n return False\n\n try:\n import mpi4py.MPI\n except ImportError:\n return False\n\n if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:\n return False\n return True",
"def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False",
"def can_add_child(self, child):\n if not self.is_valid_child(child):\n return False\n if child.isa == u'PBXGroup':\n return len(func.take(\\\n lambda c: c.pbx_name == child.pbx_name and c.realpath() == child.realpath(),\\\n self.pbx_children)) == 0\n else:\n return len(func.take(lambda c:c.realpath() == child.realpath(), self.pbx_children)) == 0",
"def has_parents(self):\n return len(self._parents) > 0",
"def can_use_mpi_pool():\n return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI",
"def _refers_to_parent_table(self) -> bool:\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) -> None:\n nonlocal result\n c, f = binary.left, binary.right\n if (\n isinstance(c, expression.ColumnClause)\n and isinstance(f, expression.ColumnClause)\n and pt.is_derived_from(c.table)\n and pt.is_derived_from(f.table)\n and mt.is_derived_from(c.table)\n and mt.is_derived_from(f.table)\n ):\n result = True\n\n visitors.traverse(self.primaryjoin, {}, {\"binary\": visit_binary})\n return result",
"def any_parent_has_power(self, member_name):\n for parent in self.parents_of(member_name):\n if parent.has_power:\n return True\n \n return False",
"def is_parent(self):\n return not self.children",
"def is_consistent_with(self, target):\n same_parent = self.parent() == target.parent()\n # Note FP. Is it really required to have the\n # same parent? Inclusion of all proc may be enough?\n return npw.equal(self.shape, target.shape).all() and same_parent",
"def is_parent(self, item):\n if len(self.df.loc[self.df['parent_code']==item, :]): \n return True\n return False",
"def is_parent(self):\n if self.parent is not None:\n return False\n return True",
"def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi",
"def has_parent(self, index):\n return self.get_parent_index(index) < len(self.heap)",
"def hasparents(self):\n return bool(self.parents)"
] | [
"0.6905076",
"0.6545807",
"0.6406559",
"0.63527286",
"0.6351458",
"0.6243226",
"0.62221795",
"0.6163074",
"0.6159543",
"0.615525",
"0.6027681",
"0.5970372",
"0.5956478",
"0.5952117",
"0.5878544",
"0.58695066",
"0.5852495",
"0.579879",
"0.57776415",
"0.5767349",
"0.57645994",
"0.5742941",
"0.5737655",
"0.57161695",
"0.57145613",
"0.5667657",
"0.5655679",
"0.56465036",
"0.5643793",
"0.5592586"
] | 0.82558614 | 0 |
Number of processess common to comm_1 and comm_2 | def intersection_size(comm_1, comm_2):
if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:
return None
group_1 = comm_1.Get_group()
group_2 = comm_2.Get_group()
inter_group = MPI.Group.Intersect(group_1, group_2)
return inter_group.Get_size() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def common_count(self, node_1, node_2):\n return int(len(set(nx.neighbors(self.graph, node_1)).intersection(set(nx.neighbors(self.graph, node_2)))))",
"def communities_with_protesters(partition, active_nodes):\n return len(set([partition[node] for node in active_nodes]))",
"def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)",
"def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)",
"def calc_process_cohesion(partitions, graph):\n ch = 0\n for part in partitions:\n crc = calc_community_relation_cohesion(part, graph)\n cic = calc_community_information_cohesion(part, graph)\n ch = ch + (crc * cic)\n ch = ch / len(partitions)\n return ch",
"def protesting_communities(partition, active_nodes):\n communities = defaultdict(int)\n for node in active_nodes:\n communities[partition[node]] += 1\n return communities",
"def count_common_connections(network, user_A, user_B):\n count = 0\n if user_A not in network or user_B not in network:\n return False\n for person in network[user_A][0]:\n if person in network[user_B][0]:\n count += 1\n return count",
"def calc_community_information_cohesion(partition, graph):\n pre_suc = list()\n for vertex in partition:\n pre_suc.extend(get_unique_predecessors_successors(vertex, graph))\n pre_suc = get_duplicates(pre_suc)\n if len(pre_suc) == 0:\n cic = 0\n else:\n cic = len(pre_suc) / len(partition)\n return cic",
"def compare_comm(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result == res[0]",
"def neigh_comm(n):\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc",
"def count_common_subgraphs(graph1, graph2, n1, n2,\n node_attrib='label', edge_attrib='label'):\n for graph in (graph1, graph2):\n assert nx.is_directed_acyclic_graph(graph)\n \n if graph1.node[n1][node_attrib] != graph2.node[n2][node_attrib]:\n return 0\n\n n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)\n n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)\n\n if not n1_children or not n2_children:\n return 0\n else:\n result = 1 # neutral element of multiplication\n for n1_target, n2_target in common_dependency_targets(graph1, graph2, n1, n2,\n node_attrib=node_attrib):\n result *= (count_common_subgraphs(graph1, graph2,\n n1_target, n2_target,\n node_attrib='label',\n edge_attrib='label') + 2)\n return result - 1",
"def part1(programs):\n count = 0\n for program in programs:\n if program.connected(0)[0]:\n count += 1\n\n return count",
"def commonCharacterCount(s1, s2):\n return sum(min(s1.count(x),s2.count(x)) for x in set(s1))",
"def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)",
"def count_common_connections(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n common_connections = 0\n for conn in network[user_A]['connections']:\n if conn in network[user_B]['connections']:\n common_connections += 1\n return common_connections",
"def countMatches(g1, g2):\n if g1 is None or g2 is None or len(g1) == 0 or len(g1[0]) == 0: # sanity check\n return 0\n count = 0\n for i in range(len(g1)):\n for j in range(len(g1[0])):\n if g1[i][j] == g2[i][j] == 1 and search_grid(g1, g2, i, j):\n count = count + 1\n return count",
"def commonality(left_struc, right_struc):\n assert type(left_struc) is type(right_struc), (left_struc, right_struc)\n assert left_struc and right_struc, (left_struc, right_struc)\n if type(left_struc) is dict:\n (overlap, left, right) = compute_keysets(left_struc, right_struc)\n com = float(len(overlap))\n tot = len(overlap.union(left, right))\n else:\n assert type(left_struc) in (list, tuple), left_struc\n com = 0.0\n for elem in left_struc:\n if elem in right_struc:\n com += 1\n tot = max(len(left_struc), len(right_struc))\n\n return com / tot",
"def _common_prefix(sequence1, sequence2):\n i = 0\n for elem1, elem2 in zip(sequence1, sequence2):\n if elem1 != elem2:\n return i\n i += 1\n\n # Return length of sequence if sequences are identical\n return min(len(sequence1), len(sequence2))",
"def common_token_len(self, w1, d1, w2, d2):\n w1_tk = set(self.__stem_Tokens(w1))\n w2_tk = set(self.__stem_Tokens(w2))\n common_len = len(w1_tk.intersection(w2_tk))\n return common_len",
"def compare_groups(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result in res[:-1]",
"def get_common_words_count(arr1, arr2):\n return len(list(set(arr1).intersection(arr2)))",
"def n_components(self):\n return 1",
"def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()",
"def get_relatedness(theme1,theme2):\r\n nAB=0\r\n nAB_plus=0\r\n for sentence_1 in theme1.sentences:\r\n for sentence_2 in theme2.sentences:\r\n if cmp(sentence_1[2],sentence_2[2])==0:\r\n nAB=nAB+1\r\n if sentence_1[3]==sentence_2[3]:\r\n nAB_plus=nAB_plus+1\r\n if nAB==0:\r\n return 0\r\n else:\r\n return float(nAB_plus)/float(nAB)",
"def position_counter(strains):\n with database.make_connection() as connection:\n pos = []\n for strain in strains:\n # Get every variant position\n cursor = r.table(TABLE).filter({'StrainID': strain}).pluck(\n 'Position').run(connection)\n cur = [strain['Position'] for strain in cursor]\n pos = pos+cur\n common = filter_counts(pos, len(strains))\n return common",
"def calc_community_relation_cohesion(partition, graph):\n n_overlaps = get_number_of_overlaps(partition, graph)\n if n_overlaps > 0 and len(partition) > 1:\n avg_ov = ((n_overlaps * 2) / len(partition))\n crc = avg_ov / (len(partition) * len(partition) - 1)\n elif n_overlaps == 0:\n crc = 0\n elif len(partition) == 1:\n crc = n_overlaps\n else:\n crc = 0\n return crc",
"def getDimensions(self, spSys1, spSys2):\n\n proton1, proton2 = spSys1.getAtoms()[0], spSys2.getAtoms()[0]\n\n assignments1 = [atom for assi in self.getProton1Assignments()\n for atom in assi.getAtoms()]\n\n assignments2 = [atom for assi in self.getProton2Assignments()\n for atom in assi.getAtoms()]\n\n dimensions1 = [proton1 in assignments1, proton1 in assignments2]\n dimensions2 = [proton2 in assignments1, proton2 in assignments2] \n\n dims = [None, None]\n\n if dimensions1.count(1) == 1 and dimensions2.count(1) == 1:\n dims[0] = dimensions1.index(1) + 1\n dims[1] = dimensions2.index(1) + 1\n \n\n elif dimensions1.count(1) == 1 and dimensions2.count(1) == 2:\n\n dims[0] = dimensions1.index(1) + 1\n dims[1] = 1\n if dims[0] == 1:\n dims[1] = 2\n\n elif dimensions1.count(1) == 2 and dimensions2.count(1) == 1:\n dims[1] = dimensions2.index(1) + 1\n dims[0] = 1\n if dims[1] == 1:\n dims[0] = 2\n\n elif dimensions1.count(1) == 2 and dimensions2.count(1) == 2:\n dims = [1,2]\n h1 = proton1.getHeteroAtom()\n h2 = proton2.getHeteroAtom()\n \n h1assi = [atom for assi in self.getHetero1Assignments()\n for atom in assi.getAtoms()]\n \n h2assi = [atom for assi in self.getHetero2Assignments()\n for atom in assi.getAtoms()]\n\n if h1assi:\n if h2 in h1assi:\n dims = [2, 1]\n \n if h2assi:\n if h1 in h2assi:\n dims = [2, 1]\n \n if None in dims:\n raise ValueError, 'spin pair could not be assigned to dimensions'\n\n else:\n return tuple(dims)",
"def getNumPoints(self, l1, l2):\n n1 = self.pointcounts[l1]\n n2 = self.pointcounts[l2]\n self.pointcounts[('Cl_%d' % self.labelcount, l1, l2)] = n1 + n2\n return (n1, n2)",
"def get_co_occurrences(self, word1, word2):\n raise NotImplementedError(\"Word2Vec model does not support co-occurrence counting\")",
"def numcheck(list1, list2):\r\n set1 = set(list1)\r\n set2 = set(list2)\r\n #set3 contains all items common to set1 and set2\r\n set3 = set1.intersection(set2)\r\n # return number of matching items\r\n return len(set3)"
] | [
"0.71106726",
"0.64436406",
"0.6228776",
"0.6228776",
"0.62096745",
"0.6076676",
"0.60679585",
"0.59844345",
"0.59690386",
"0.5953923",
"0.5925899",
"0.5922579",
"0.59189546",
"0.5907016",
"0.58420223",
"0.5753761",
"0.57528996",
"0.5745631",
"0.574225",
"0.5684263",
"0.56555855",
"0.56460875",
"0.56446713",
"0.5644042",
"0.5632925",
"0.562713",
"0.5591475",
"0.55852157",
"0.55772763",
"0.5530146"
] | 0.71126 | 0 |
Compare two mpi communicators. Returns true if the two communicators are handles for the same group of proc and for the same communication context. | def compare_comm(comm_1, comm_2):
assert comm_1 != MPI.COMM_NULL
assert comm_2 != MPI.COMM_NULL
result = MPI.Comm.Compare(comm_1, comm_2)
res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]
return result == res[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compare_groups(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result in res[:-1]",
"def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1",
"def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()",
"def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)",
"def __eq__(self, other):\n return self.conn == other.conn and self.p1 == other.p1 and self.p2 == other.p2",
"def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi",
"def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)",
"def Mirrorprocs(p1, p2):\n return False",
"def _fake_message_compare(m1, m2):\r\n m1 = m1.serialize()\r\n m2 = m2.serialize()\r\n diff = False\r\n for i in range(len(m1)):\r\n if m1[i] is None:\r\n continue\r\n if m1[i] != m2[i]:\r\n diff = True\r\n break\r\n return not diff",
"def isHandle(self):\n return self.type in mpi_handle_types",
"def mutexPropositions(prop1, prop2, mutexActions):\n for a1 in prop1.getProducers():\n for a2 in prop2.getProducers():\n if Pair(a1, a2) not in mutexActions:\n return False\n return True",
"def _on_same_device(self, other: \"PArray\") -> bool:\n this_device = self._current_device_index\n return this_device in other._array",
"def object_communicator():\n comm = MPI.COMM_WORLD",
"def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]",
"def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)",
"def __eq__(self, other):\n for ls, lo in zip(self.leaderboard_names, other.leaderboard_names):\n if ls != lo:\n return False\n for ls, lo in zip(self.leaderboard_groups, other.leaderboard_groups):\n if ls != lo:\n return False\n if self.top_left != other.top_left:\n return False\n if self.bottom_right != other.bottom_right:\n return False\n return True",
"def __eq__(self, other):\n if not isinstance(other, MessageGroup):\n return False\n\n return self.__dict__ == other.__dict__",
"def e_paralelo(self, other):\n if (self == other) or (self.normaliza() == other.normaliza()):\n return True\n else:\n return False",
"def pure_mpi(self):\n return self.has_mpi and not self.has_omp",
"def compare(self, other_group):\n x_bounds = self.bounding_box_x_len == other_group.bounding_box_x_len\n y_bounds = self.bounding_box_y_len == other_group.bounding_box_y_len\n same_num_cells = self.num_colored_cells == other_group.num_colored_cells\n if not (x_bounds and y_bounds and same_num_cells):\n return False\n for row_ind in range(len(other_group.cells)):\n for col_ind in range(len(other_group.cells[0])):\n if other_group.cells[row_ind][col_ind] != self.cells[row_ind][col_ind]:\n return False\n return True",
"def is_mpi_env():\n try:\n import mpi4py\n except ImportError:\n return False\n\n try:\n import mpi4py.MPI\n except ImportError:\n return False\n\n if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:\n return False\n return True",
"def __eq__(self, other):\n if not isinstance(other, WrappedChannel):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return isinstance(other, type(self)) and set(self.channels) == set(other.channels)",
"def __eq__(self, other):\n return isinstance(other, Procedure) and self.__uuid == other.uuid",
"def are_equal(self, sp1, sp2):\n return True",
"def are_equal(self, sp1, sp2):\n return sp1 == sp2",
"def are_connected(self, name1, name2):",
"def intersection_size(comm_1, comm_2):\n if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:\n return None\n group_1 = comm_1.Get_group()\n group_2 = comm_2.Get_group()\n inter_group = MPI.Group.Intersect(group_1, group_2)\n return inter_group.Get_size()",
"def __eq__(self, other):\r\n return (type(self) == type(other) and\r\n other.broadcastable == self.broadcastable)",
"def check_comm(instance):\n\n comm = instance.__dict__.get(\"commodity\")\n\n accounted_comm = set()\n\n for c in comm:\n\n for r in instance.reactions:\n r_dict = r.__dict__\n\n for label, species in r_dict.items():\n\n if instance.__dict__.get(\"recombination\") == Recomb_1:\n product = r_dict.get(\"left2\")\n\n else:\n product = r_dict.get(\"right2\")\n\n if product == c:\n accounted_comm.add(c)\n\n if set(comm) == accounted_comm:\n return True\n else:\n print(\"Commodity:\", set(comm))\n print(\"Commodity products made:\", accounted_comm)\n return False"
] | [
"0.74933475",
"0.61300033",
"0.6039855",
"0.59951395",
"0.5934048",
"0.58377117",
"0.5620288",
"0.55401844",
"0.5411162",
"0.5404919",
"0.53369236",
"0.5305763",
"0.53046554",
"0.52937305",
"0.52503824",
"0.52165604",
"0.52162445",
"0.51967853",
"0.51824045",
"0.5172295",
"0.516735",
"0.51657116",
"0.51644564",
"0.51506144",
"0.5148348",
"0.5147662",
"0.5116553",
"0.50939196",
"0.5057151",
"0.5055618"
] | 0.76038 | 0 |
Compare the groups of two mpi communicators. Returns true if each comm handles the same group of mpi processes. | def compare_groups(comm_1, comm_2):
assert comm_1 != MPI.COMM_NULL
assert comm_2 != MPI.COMM_NULL
result = MPI.Comm.Compare(comm_1, comm_2)
res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]
return result in res[:-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compare_comm(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result == res[0]",
"def same_group(self,i,j):\n if self.group_number(i) == self.group_number(j):\n return True\n else:\n return False",
"def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()",
"def compare(self, other_group):\n x_bounds = self.bounding_box_x_len == other_group.bounding_box_x_len\n y_bounds = self.bounding_box_y_len == other_group.bounding_box_y_len\n same_num_cells = self.num_colored_cells == other_group.num_colored_cells\n if not (x_bounds and y_bounds and same_num_cells):\n return False\n for row_ind in range(len(other_group.cells)):\n for col_ind in range(len(other_group.cells[0])):\n if other_group.cells[row_ind][col_ind] != self.cells[row_ind][col_ind]:\n return False\n return True",
"def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1",
"def equals(self, other):\n if not isinstance(other, PermutationGroup):\n return False\n\n set_self_gens = set(self.generators)\n set_other_gens = set(other.generators)\n\n # before reaching the general case there are also certain\n # optimisation and obvious cases requiring less or no actual\n # computation.\n if set_self_gens == set_other_gens:\n return True\n\n # in the most general case it will check that each generator of\n # one group belongs to the other PermutationGroup and vice-versa\n for gen1 in set_self_gens:\n if not other.contains(gen1):\n return False\n for gen2 in set_other_gens:\n if not self.contains(gen2):\n return False\n return True",
"def intersection_size(comm_1, comm_2):\n if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:\n return None\n group_1 = comm_1.Get_group()\n group_2 = comm_2.Get_group()\n inter_group = MPI.Group.Intersect(group_1, group_2)\n return inter_group.Get_size()",
"def comm_group(self):\n return self._gcomm",
"def _check_group(self):\n if len(self.groups) != 2:\n raise ValueError(\"There have to be two groups!\")\n\n # Check the number of atoms in each group is the same\n n_group1 = 0\n for key, value in self.groups[0].items():\n n_group1 += value\n\n n_group2 = 0\n for key, value in self.groups[1].items():\n n_group2 += value\n\n if n_group1 != n_group2:\n f1 = self._group2formula(self.groups[0])\n f2 = self._group2formula(self.groups[1])\n msg = \"The two groups have to have the same number of atoms.\\n\"\n msg += \"Group 1: {} Group 2: {}\".format(f1, f2)\n raise ValueError(msg)",
"def check_for_group():\r\n if first_list[0][0] == second_list[0][0]:\r\n try:\r\n result = first_list[0][0], str(int(first_list[0][1]) + int(second_list[0][1]))\r\n except ValueError:\r\n result = first_list[0][0], str(float(first_list[0][1]) + float(second_list[0][1]))\r\n result_list.append(result)\r\n first_list.remove(first_list[0])\r\n second_list.remove(second_list[0])\r\n return True\r\n return False",
"def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)",
"def __eq__(self, other):\n if not isinstance(other, MessageGroup):\n return False\n\n return self.__dict__ == other.__dict__",
"def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi",
"def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)",
"def is_potential_group(self, player: int, row: int, col: int, row_diff: int, col_diff: int):\n opponent = 1 - player\n for _ in range(4):\n square = Square(row, col)\n if not self.is_valid(square):\n return False\n if self.state[opponent][row][col]:\n # If there is a token that belongs to the opponent in this group,\n # then this group is not a potential group that belongs to the given player.\n return False\n row, col = row + row_diff, col + col_diff\n return True",
"def _compare_groups_and_labels(self, groups, labels):\n # function that compares two lists without taking into account the order\n def comp_lists(l1, l2):\n len_match = len(l1) == len(l2)\n return len_match and np.all([g1 == g2 for g1, g2 in zip(l1, l2)])\n\n # comparison of the given groups\n groups_same = comp_lists(groups, self.selected_values['group_keys'])\n\n # if groups are the same, then compare the labels\n if groups_same:\n len_match = len(labels) == len(self.selected_values['labels_keys'])\n tmp = [comp_lists(g1, g2)\n for g1, g2 in zip(labels,\n self.selected_values['labels_keys'])]\n return len_match and np.all(tmp)\n else:\n return False",
"def group_diff(options, db):\n nested_rvals = []\n for ip in options.gmp:\n nested_rvals.append(get_ip_parents(ip, db))\n # get just the list of groups, stripping out the networks.\n group1 = [x[0] for x in nested_rvals[0]]\n group2 = [x[0] for x in nested_rvals[1]]\n common = sorted(list(set(group1) & set(group2)))\n diff1 = sorted(list(set(group1) - set(group2)))\n diff2 = sorted(list(set(group2) - set(group1)))\n return common, diff1, diff2",
"def check_comm(instance):\n\n comm = instance.__dict__.get(\"commodity\")\n\n accounted_comm = set()\n\n for c in comm:\n\n for r in instance.reactions:\n r_dict = r.__dict__\n\n for label, species in r_dict.items():\n\n if instance.__dict__.get(\"recombination\") == Recomb_1:\n product = r_dict.get(\"left2\")\n\n else:\n product = r_dict.get(\"right2\")\n\n if product == c:\n accounted_comm.add(c)\n\n if set(comm) == accounted_comm:\n return True\n else:\n print(\"Commodity:\", set(comm))\n print(\"Commodity products made:\", accounted_comm)\n return False",
"def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]",
"def __eq__(self, other):\n if not isinstance(other, ShowServerGroupResult):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n group = self.group\n if not isinstance(other, group.dtype):\n return False\n return tuple.__eq__(self, other)",
"def __gt__(self, other):\n if other.groupnumber > self.groupnumber:\n return True\n else:\n return False",
"def is_group(self):\n return self._is_group",
"def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True",
"def is_converged(clusters1, clusters2, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n if clusters1[i][j] != clusters2[i][j]:\r\n return False\r\n return True",
"def mpi_procs(self):\n return self._mpi_procs",
"def __eq__(self, other):\n for ls, lo in zip(self.leaderboard_names, other.leaderboard_names):\n if ls != lo:\n return False\n for ls, lo in zip(self.leaderboard_groups, other.leaderboard_groups):\n if ls != lo:\n return False\n if self.top_left != other.top_left:\n return False\n if self.bottom_right != other.bottom_right:\n return False\n return True",
"def __eq__(self, other):\n return self.conn == other.conn and self.p1 == other.p1 and self.p2 == other.p2",
"def is_mpi_env():\n try:\n import mpi4py\n except ImportError:\n return False\n\n try:\n import mpi4py.MPI\n except ImportError:\n return False\n\n if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:\n return False\n return True",
"def __eq__(self, other):\n if not isinstance(other, IamDomainGroupAllOf):\n return False\n\n return self.to_dict() == other.to_dict()"
] | [
"0.7372509",
"0.61725867",
"0.60191995",
"0.597187",
"0.5756517",
"0.56709945",
"0.5612317",
"0.5581606",
"0.5556505",
"0.5444479",
"0.5426828",
"0.541855",
"0.5415436",
"0.54028773",
"0.53394014",
"0.52934015",
"0.52109265",
"0.519534",
"0.5183995",
"0.51758873",
"0.506649",
"0.50646776",
"0.50601214",
"0.5037635",
"0.5022083",
"0.50203854",
"0.5006094",
"0.4997834",
"0.49949437",
"0.4984624"
] | 0.8524684 | 0 |
Find the values of ranks in target from ranks in source. | def convert_ranks(source, target):
assert source != MPI.COMM_NULL and target != MPI.COMM_NULL
g_source = source.Get_group()
g_target = target.Get_group()
size_source = g_source.Get_size()
r_source = [i for i in xrange(size_source)]
res = MPI.Group.Translate_ranks(g_source, r_source, g_target)
return {r_source[i]: res[i] for i in xrange(size_source)} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))",
"def rank_results(result_index, source2target):\n result2rank = defaultdict(lambda: [])\n for term, targets in result_index.items():\n ranked = sorted(targets, key=lambda tup: tup[1], reverse=True)\n ranks = rankdata([t[1] for t in ranked], method='min').tolist()\n ranks.reverse()\n for index, target in enumerate(ranked):\n if target[0] in source2target[term]:\n result2rank[term].append(ranks[index])\n return result2rank",
"def find_dst_value2(target: int, targets: list):\n targets.sort()\n i, j = 0, len(targets)-1\n while i < j:\n left_value = targets[i]\n right_value = targets[j]\n if left_value + right_value == target:\n return left_value, right_value\n if left_value + right_value > target:\n j -= 1\n elif left_value + right_value < target:\n i += 1",
"def findClosestNodes(self, target: hash.hash.Hash):\n # TODO: make more efficient\n # See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table\n \n nodes = []\n \n for bucket in self.buckets:\n nodes = nodes + bucket.nodes\n\n nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))\n\n return nodes[:config.K]",
"def findSimilarityScore(self, source, destination):\n\n\n if self.similarityScores is not None:\n return self.similarityScores[source][destination]\n\n # Project graph (if a meta path was provided)\n if self.metaPath is None:\n projectedGraph = self.graph\n else:\n if self.metaPath[0] == self.metaPath[-1]: # Homogeneous projection?\n projectedGraph = self.metaPathUtility.createHomogeneousProjection(self.graph, self.metaPath)\n else:\n projectedGraph = self.metaPathUtility.createHeterogeneousProjection(self.graph, self.metaPath)\n\n # Build initial similarity scores\n self.similarityScores = defaultdict(dict)\n nodes = self.graph.getNodes()\n for a, b in itertools.product(nodes, nodes):\n self.similarityScores[a][b] = 1 if a is b else 0\n\n self.similarityScores = self.__simRank(projectedGraph, self.similarityScores, SimRankStrategy.k)\n\n return self.similarityScores[source][destination]",
"def _map_dims_(\n cls,\n source_array: List[int],\n target_array: List[int],\n source_idx: int,\n start_target_idx: int,\n source_to_target_map: DIMENSION_MAP,\n target_to_source_map: DIMENSION_MAP,\n ) -> Tuple[bool, int]:\n res, last_target_index = cls._can_reach_number_by_multiply(\n number_to_reach=source_array[source_idx], array=target_array, start_idx=start_target_idx\n )\n if not res:\n return (res, last_target_index)\n source_to_target_map[source_idx] = list(range(start_target_idx, last_target_index + 1))\n for idx in range(start_target_idx, last_target_index + 1):\n target_to_source_map[idx] = [source_idx]\n return (res, last_target_index)",
"def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores",
"def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)",
"def sources(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if self.rank == self.midpoint - 1 and partner == self.right:\n partners = set()\n elif self.rank == self.midpoint - 1 and partner == self.right - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n else:\n partner = self.left + (self.rank - self.midpoint)\n if self.rank == self.right - 1 and partner == self.midpoint:\n partners = set()\n elif self.rank == self.right - 1 and partner == self.midpoint - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n\n return partners",
"def two_sum(self, nums, target):\n\n # idea: for each num, check if it complements a previously seen one\n # (keeping track of them in a dictionary)\n seek = {}\n\n for ind, element in enumerate(nums):\n if element in seek:\n return [seek[element], ind]\n else:\n seek[target - element] = ind\n\n return []",
"def twoSum(self, nums: List[int], target: int) -> List[int]:\n # Use a dict to record visited numbers\n d = {}\n for i, n in enumerate(nums):\n m = target - n\n if m in d:\n return [d[m], i]\n else:\n d[n] = i",
"def sinks(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if partner == self.right:\n partner -= 1\n else:\n partner = self.left + (self.rank - self.midpoint)\n if partner == self.midpoint:\n partner -= 1\n\n return {partner}",
"def as_paired_ranks(x, y):\n n = len(x)\n paired = zip(x,y)\n x = list(x)\n y = list(y)\n x.sort()\n y.sort()\n rank_val_map_x = dict(zip(x, range(n)))\n rank_val_map_y = dict(zip(y, range(n)))\n ranked = []\n for i in range(n):\n ranked += [[rank_val_map_x[paired[i][0]], rank_val_map_y[paired[i][1]]]]\n return ranked",
"def similarity(self, source, target):\n results = { m.name: m.similarity(source, target) for m in self.metrics }\n return results",
"def get_targets(\n self, source: Tuple[str, str], relation: Optional[str] = None\n ) -> List[Node]:\n return self.get_common_targets([source], relation)",
"def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = two_way_skar(d, [source, target], others)\n return uniques",
"def searchRange4(self, nums: List[int], target: int) -> List[int]:\n def bisearch_l() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] >= target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n def bisearch_r() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n return [bisearch_l(), bisearch_r()]",
"def find_targetnodes(self):\n\n self.connect_backwards()\n\n targetnodes = []\n for n in self.find_datanodes():\n if len(n.receives_from) > 0:\n targetnodes.append(n)\n return targetnodes",
"def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes",
"def getResult(targets, i=None):",
"def ranking_loss(scores, targets):\n costs = targets[1]\n true_ants = targets[2]\n weights = targets[4] if len(targets) == 5 else None\n true_ant_score = torch.gather(scores, 1, true_ants)\n top_true, _ = true_ant_score.max(dim=1)\n tmp_loss = scores.add(1).add(\n top_true.unsqueeze(1).neg()\n ) # 1 + scores - top_true\n if weights is not None:\n tmp_loss = tmp_loss.mul(weights)\n tmp_loss = tmp_loss.mul(costs)\n loss, _ = tmp_loss.max(dim=1)\n out_score = torch.sum(loss)\n return out_score / n",
"def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for i, n in enumerate(nums):\n d[n]=i\n \n for i, n in enumerate(nums):\n m = target - n\n if m in d and d[m] != i:\n return [i,d[m]]\n return []",
"def match_chunk_permuted(src, target, indices, match_bounds=False):\n\n ds = src.datashape.copy()\n ds.dim_low = list(ds.dim_low)\n ds.dim_high = list(ds.dim_high)\n ds_target = target.datashape.copy()\n ds_target.dim_low = list(ds_target.dim_low)\n ds_target.dim_high = list(ds_target.dim_high)\n\n hi1 = ds.dim_high\n hi2 = ds_target.dim_high\n\n # lookup array dounds if schema is unbound\n if match_bounds:\n if any(l is None for l in hi1):\n tops = src.unpack('_').max().toarray()\n hi1 = [int(tops['%s_max' % l][0]) for l in src.dim_names]\n if any(l is None for l in hi2):\n tops = target.unpack('_').max().toarray()\n hi2 = [int(tops['%s_max' % l][0]) for l in target.dim_names]\n\n for i, j in indices:\n if not isinstance(i, int):\n i = target.dim_names.index(i)\n if not isinstance(j, int):\n j = src.dim_names.index(j)\n ds.chunk_size[j] = target.datashape.chunk_size[i]\n ds.chunk_overlap[j] = target.datashape.chunk_overlap[i]\n if match_bounds:\n l = min(ds.dim_low[j], ds_target.dim_low[i])\n h = max(hi1[j], hi2[i])\n\n ds.dim_low[j] = l\n ds.dim_high[j] = h\n ds_target.dim_low[i] = l\n ds_target.dim_high[i] = h\n\n if ds.schema != src.datashape.schema:\n src = src.redimension(ds.schema)\n if ds_target.schema != target.datashape.schema:\n target = target.redimension(ds_target.schema)\n\n return src, target",
"def twoSum(self, nums, target):\n\n seen = {}\n for position, num in enumerate(nums):\n remaining = target - num\n if remaining in seen:\n return [seen[remaining], position]\n seen[num] = position\n return []",
"def solutionByOthers(self, nums, target):\n nums.sort()\n results = []\n\n self._findNSum( nums, target, 4, [], results )\n return results",
"def _target(self, data):\n relative_values = abs(data - data.mean())\n index = relative_values.idxmax()\n value = relative_values[index]\n return index, value",
"def match_nodes(source_node, target_node):\n\n node_position = cmds.xform(source_node, q=True, ws=True, t=True)\n node_rotation = cmds.xform(source_node, q=True, ws=True, ro=True)\n cmds.xform(target_node, ws=True, t=node_position)\n cmds.xform(target_node, ws=True, ro=node_rotation)",
"def find_mutual_nn(self):\n best_match_src = self.scores.argmax(1) # Best match for each source word\n best_match_trg = self.scores.argmax(0) # Best match for each source word\n\n # ONELIENER\n # paired_idx = [(i,best_match_src[i]) for i in range(self.ns) if best_match_trg[best_match_src[i]] == i]\n # paired_words = [(self.src_words[i],self.trg_words[j]) for (i,j) in paired_idx]\n paired = []\n for i in range(self.ns):\n m = best_match_src[i]\n if best_match_trg[m] == i:\n paired.append((i,m))\n\n paired_toks = []\n if self.src_words and self.trg_words:\n paired_toks = [(self.src_words[i],self.trg_words[j]) for (i,j) in paired]\n else:\n paired_toks = paired\n return paired_toks",
"def get_matches(self, first, second):\n matches = self._match_table.dropna(0)[\n [first.position.id, second.position.id]].astype(int).values\n return matches",
"def match_scatter_curves(target_data, source_data):\n\n # Create list of calculated I values matched to the nearest experimental q\n # Remember that the arrays in python start at 0, those in Fortran at 1\n last_source = len(source_data)\n last_target = len(target_data)\n\n # Initialize array to hold the calculated I values matched to\n # experimental Q values\n matched_I = np.zeros(last_target, dtype=float)\n\n # Use the old fortran routine to match the data sets by q value\n # matched_no is the number of datapoints which contain matched data\n matched_no = sjp_util.qrange_match(target_data[:, 0], source_data[:, 0],\n source_data[:, 1], last_target,\n last_source, matched_I)\n\n matched_I.resize(matched_no)\n\n return matched_I"
] | [
"0.6195965",
"0.61417454",
"0.57947445",
"0.554299",
"0.5502083",
"0.53975844",
"0.533245",
"0.528168",
"0.5239981",
"0.5233992",
"0.5226889",
"0.5212047",
"0.5168843",
"0.5151811",
"0.5149276",
"0.5147061",
"0.5105847",
"0.5105718",
"0.50940347",
"0.5074896",
"0.505985",
"0.505242",
"0.5049568",
"0.50463647",
"0.5042757",
"0.5032336",
"0.50239444",
"0.5018989",
"0.5016964",
"0.50137633"
] | 0.64136374 | 0 |
Compute drift score as the percentage of overlapping probabilities | def compute_drift_score(ref_col_prob, col_prob):
return sum(abs(np.asarray(ref_col_prob) - np.array(col_prob)) * 100) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate(self):\n\n gt = self.ground_truth.flatten().astype(np.int8)\n seg = self.segmentation.flatten().astype(np.int8)\n\n probability_difference = np.absolute(gt - seg).sum()\n probability_joint = (gt * seg).sum()\n\n if probability_joint != 0:\n return probability_difference / (2. * probability_joint)\n else:\n return -1",
"def calc_match_probability(obs, pred1):\n \n # Throw away any non-atom columns\n obs_reduced = obs.loc[:, self.pars[\"atom_set\"].\n intersection(obs.columns)]\n pred1_reduced = pred1.loc[self.pars[\"atom_set\"].\n intersection(pred1.index)]\n \n # Calculate shift differences for each observed spin system\n delta = obs_reduced - pred1_reduced\n \n # Make a note of NA positions in delta, and set them to zero \n # (this avoids warnings when using norm.cdf later)\n na_mask = delta.isna()\n delta[na_mask] = 0\n \n if self.pars[\"prob_method\"] == \"delta_correlation\":\n overall_prob = pd.Series(index=delta.index)\n overall_prob[:] = 1\n \n d_mean = pd.read_csv(\"../data/d_mean.csv\", header=None, \n index_col=0).loc[delta.columns,1]\n d_cov = (pd.read_csv(\"../data/d_cov.csv\", index_col=0).\n loc[delta.columns,delta.columns])\n \n mvn = multivariate_normal(d_mean, d_cov)\n \n overall_prob = mvn.logpdf(delta)\n \n # Penalise missing shifts, unless also missing in predictions\n overall_prob = (overall_prob + log10(default_prob) * \n (na_mask.sum(axis=1) - pred1_reduced.isna().sum()))\n \n else:\n prob = delta.copy()\n prob.iloc[:,:] = 1\n \n for c in delta.columns:\n if self.pars[\"prob_method\"] == \"cdf\":\n # Use the cdf to calculate the probability of a \n # delta *at least* as great as the actual one\n prob[c] = log10(2) + norm.logcdf(-1*abs(\n pd.to_numeric(delta[c])), scale=atom_sd[c]*sf)\n elif self.pars[\"prob_method\"] == \"pdf\":\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf) \n elif shift_correlation:\n print(\"shift_correlation not yet implemented. Defaulting to pdf.\")\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf)\n else:\n print(\"Method for calculating probability not recognised. Defaulting to pdf.\")\n prob[c] = norm.logpdf(pd.to_numeric(delta[c]), \n scale=atom_sd[c]*sf)\n \n # In positions where data was missing, use default probability\n prob[na_mask] = log10(default_prob)\n \n # Calculate penalty for a HADAMAC mismatch\n if use_hadamac:\n # If the i-1 aa type of the predicted residue matches the \n # HADAMAC group of the observation, probability is 1.\n # Otherwise, probability defaults to 0.01\n prob[\"SS_classm1\"] = 0.01\n if type(pred1[\"Res_typem1\"])==str: # dummies have NaN\n prob.loc[obs[\"SS_classm1\"].str.find(\n pred1[\"Res_typem1\"])>=0, \"SS_classm1\"] = 1\n \n # Calculate overall probability of each row\n overall_prob = prob.sum(skipna=False, axis=1)\n \n return(overall_prob)",
"def scoring_function(times):\n sorted_times = sorted(times)\n \n diffs = []\n for i in range(len(sorted_times)-1):\n diff = sorted_times[i+1]- sorted_times[i]\n \n if diff == 0.0: # overlaps cannot happen score with a large penalty\n diffs.append(-100)\n elif diff <= 1.0: # punish small differences\n diffs.append(-2)\n elif diff > 4.0: # Gaps greater than 4 are large enough and considered OK\n diffs.append(4.0)\n else:\n diffs.append(diff)\n \n return sum(diffs)",
"def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score",
"def drift_score(self):\n if self.measured_val is None:\n return 0.0\n\n if self.rebalance_type == self.REBALANCE_TYPE_ABSOLUTE:\n return (self.measured_val - self.configured_val) / self.rebalance_thr\n else:\n return ((self.measured_val - self.configured_val) / self.configured_val) / self.rebalance_thr",
"def calculate_precinct_score(pt, dstrct):\n return pt.F(dstrct)",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore",
"def dice_score(ground_truth, prediction):\r\n\r\n # Normalize\r\n prediction /= np.amax(prediction)\r\n ground_truth /= np.amax(ground_truth)\r\n\r\n true_positive_mask = np.logical_and(ground_truth==1, prediction==1)\r\n false_positive_mask = np.logical_and(ground_truth==0, prediction==1)\r\n false_negative_mask = np.logical_and(ground_truth==1, prediction==0)\r\n\r\n TP = np.count_nonzero(true_positive_mask)\r\n FP = np.count_nonzero(false_positive_mask)\r\n FN = np.count_nonzero(false_negative_mask)\r\n\r\n DSC = 2*TP / (2*TP + FP + FN)\r\n\r\n return DSC",
"def pct_match(self, s1, s2, comp_length):\n\n matches = self.max_freq[s1:s1+comp_length] \\\n == self.max_freq[s2:s2+comp_length]\n return np.ma.sum(matches) / np.ma.count(matches)",
"def RPS(y_true, y_pred) -> float:\n output = 0.\n data_num = len(y_true)\n for i in range(data_num):\n times = len(y_true[i]) - 1 \n cumulative_sum = 0.\n score = 0.\n for time in range(times):\n cumulative_sum += y_true[i,time] - y_pred[i,time]\n score += cumulative_sum ** 2\n score /= times\n output += score\n \n output /= data_num\n return output",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)",
"def calculate_probability(self):\n return 0",
"def getScore(self, seq, start):\n sum = 0.0\n seqdata = seq.getSequence()[start : start+self.cols]\n for pos in range(len(seqdata)):\n q = self.counts[pos].getFreq(seqdata[pos])\n if q == 0:\n q = 0.0001 # to avoid log(0) == -Infinity\n logodds = math.log(q / self.background.getFreq(seqdata[pos]))\n sum += logodds\n return sum",
"def delta(tval, tp_confidences, fp_confidences, num_samples):\n tp_percentage = \\\n np.sum([1 for x in tp_confidences if x > tval]) / num_samples\n if fp_confidences:\n fp_percentage = np.sum([1 for x in fp_confidences if x > tval]) / \\\n len(fp_confidences)\n else:\n fp_percentage = 0\n optimal_tp = len(tp_confidences) / num_samples\n delta_value = (tp_percentage - optimal_tp) ** 2 + fp_percentage ** 2\n return delta_value, tp_percentage, fp_percentage",
"def percentOverlap(x1, x2):\n nonZeroX1 = np.count_nonzero(x1)\n nonZeroX2 = np.count_nonzero(x2)\n minX1X2 = min(nonZeroX1, nonZeroX2)\n percentOverlap = 0\n if minX1X2 > 0:\n percentOverlap = float(np.dot(x1.T, x2)) / float(minX1X2)\n return percentOverlap",
"def mape(true, predictions):\n true = np.array(true)\n predictions = np.array(predictions) \n return np.mean(np.abs((true - predictions)) / true) * 100",
"def calculate_percent_match(primers,\n seq_count,\n exclude_seq_count=1):\n # Calculate percent of sequences that are 'hit' by each primer\n for n in range(len(primers)):\n # Calculate percent perfect match\n primers[n].percent_match=float(primers[n].match_count/seq_count)\n primers[n].non_specific_percent=\\\n float(primers[n].non_specific_hits/exclude_seq_count)\n \n return primers",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [number+1 for number in range(num_die_sides)]\n die_seqs = list(gen_all_sequences(outcomes, num_free_dice))\n for idx in range(len(die_seqs)):\n seq = list(die_seqs[idx])\n seq.extend(list(held_dice))\n die_seqs[idx] = tuple(seq)\n scr = 0.0\n for seq in die_seqs:\n scr += score(seq) \n return scr / len(die_seqs)",
"def scoreRsrc( self, rr ):\r\n result = 0.0\r\n for tt in self.getSched( )[rr.getid( )]:\r\n for se in tt:\r\n result += 1\r\n print( \"INFO: Value for %s: %s \" % ( rr, result ) )\r\n return( result )",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)",
"def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0",
"def _calc_multiple_alignment_score(wrapped_data : tuple) -> int: \n (start, finish) = wrapped_data \n score_sum = 0.\n for dna_record in tqdm(dna_sequences[start : finish + 1], total=(finish + 1 - start), desc=\"Training process\"):\n score_sum += self.aligner.score(seq, dna_record.seq)\n return score_sum",
"def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))",
"def _calculate_score(predictions: np.ndarray, correct: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(np.log(predictions + 1) - np.log(correct + 1))) / len(correct))",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)",
"def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result",
"def detection_score(self, y_true, y_pred):\n ospa_score = ospa(y_true, y_pred, self.minipatch)\n return 1 - ospa_score",
"def discrepancy_score(self, t, s):\n left = np.mean(self.dists[(t, s)])\n right = np.mean(self.dists[(s, t)])\n return 0.5 * (left + right)"
] | [
"0.6732613",
"0.6518283",
"0.6426524",
"0.6424136",
"0.6334499",
"0.6254541",
"0.6188233",
"0.61744064",
"0.6095827",
"0.6077343",
"0.6045876",
"0.5990594",
"0.5986692",
"0.59864265",
"0.59614843",
"0.5960128",
"0.5936167",
"0.5935803",
"0.59078395",
"0.590509",
"0.58760846",
"0.58743113",
"0.58386606",
"0.5834036",
"0.5828479",
"0.5812159",
"0.580872",
"0.5802769",
"0.5790495",
"0.5785718"
] | 0.7712985 | 0 |
Combine training and inference datasets as one data frame | def combine_train_infer(train_file, infer_dir):
train_df = pd.read_feather(train_file)
time_range = range(len([f for f in os.listdir(infer_dir) if 'feather' in f]))
infer_df_list = [pd.read_feather(f'{infer_dir}/{t}.feather') for t in time_range]
comb_df_list = []
train_df.index = [-1] * len(train_df)
comb_df_list.append(train_df)
for t in time_range:
df = infer_df_list[t]
df.index = [t] * len(df)
comb_df_list.append(df)
return pd.concat(comb_df_list), train_df, infer_df_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_training_data():\n \n X = pd.read_csv('../data/train_values.csv').set_index('sequence_id')\n y = pd.read_csv('../data/train_labels.csv').set_index('sequence_id')\n return X, y",
"def triples(self):\n return pd.concat((self._load_train(), self._load_valid(), self._load_test()))",
"def _prepare_inference_data(self, df: pd.DataFrame) -> pd.DataFrame:\n # TODO Is the target encoding necessary?\n if len(set(self.target) - set(df.columns)) > 0:\n if self.config.task == \"classification\":\n df.loc[:, self.target] = np.array([self.label_encoder.classes_[0]] * len(df)).reshape(-1, 1)\n else:\n df.loc[:, self.target] = np.zeros((len(df), len(self.target)))\n df, _ = self.preprocess_data(df, stage=\"inference\")\n return df",
"def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y",
"def get_training_and_validation_df():\n df = get_cleaned_processed_df()\n val_df = pd.DataFrame.from_csv(VALIDATION_DATA_PATH)\n y_train = df.pop(\"label\")\n y_val = val_df.pop(\"label\")\n\n df, val_df = complete_columns(df, val_df)\n df.fillna(0, inplace=True)\n val_df.fillna(0, inplace=True)\n df = fill_text_features(df)\n val_df = fill_text_features(val_df)\n\n df = drop_text_features(df)\n val_df = drop_text_features(val_df)\n return df.values, y_train, val_df.values, y_val",
"def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr",
"def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}",
"def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset",
"def load_data_wrapper():\n\ttr_d, te_d = load_data()\n\ttraining_inputs = [np.reshape(x, (4, 1)) for x in tr_d[0]]\n\ttraining_results = [vectorized_result(y) for y in tr_d[1]]\n\ttraining_data = zip(training_inputs, training_results)\n\ttest_inputs = [np.reshape(x, (4, 1)) for x in te_d[0]]\n\ttest_data = zip(test_inputs, te_d[1])\n\treturn (training_data, test_data)",
"def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test",
"def train_and_eval_all_models():\n\n clfShape, accShape = shape_symmetry_train_classifier()\n clfTexture, accTexture = texture_symmetry_train_classifier()\n clfFinal, accFinal = combined_symmetry_train_classifier()\n\n return accShape, accTexture, accFinal",
"def phase_two_data():\n from pathlib import Path\n try:\n import cPickle as pickle\n except ImportError:\n import pickle\n \n from annotation import parse_fulltext\n from features import ALL_FEATURES\n \n from feature_template import apply_templates\n from feature_selection import filter_by_frequency\n from feature_encoding import encode\n\n # Feature templates considered if heading by 1:\n # ----------------------------\n # Position + Voice\n # Path length + Clause layer\n # 1 Predicate + Path\n # Path + Position + Voice\n # Path + Position + Voice + Predicate\n # 1 Head word stem + Predicate\n # 1 Head word stem + Predicate + Path\n # 1 Head word stem + Phrase\n # Clause layer + Position + Predicate\n templates = [tuple([f.name]) for f in ALL_FEATURES] + \\\n [('path_to_frame', 'frame'), ('head_stem', 'frame'), ('head_stem', 'frame', 'path_to_frame'), ('head_stem', 'phrase_type')]\n \n size = 40\n instances = []\n for i, p in enumerate(Path(\"/cs/fs2/home/hxiao/Downloads/fndata-1.5/fulltext/\").glob(\"*.xml\")):\n if i == size:\n break\n sys.stderr.write(\"Processing file: '%s'\\n\" %p.absolute())\n annotations = parse_fulltext(str(p.absolute()))\n instances += make_training_data(ALL_FEATURES, annotations)\n\n sys.stderr.write(\"Feature selection...\\n\")\n x, y = zip(*instances)\n x = apply_templates(x, templates)\n features = filter_by_frequency(x, 5)\n sys.stderr.write(\"Feature encoding...\\n\")\n x, feature_map = encode(x, features)\n \n sys.stderr.write(\"Dumping data...\\n\") \n pickle.dump((x, y, ALL_FEATURES, templates, feature_map), open('dump/test_data.pkl', 'w'))\n import pdb\n pdb.set_trace()\n print len(instances)",
"def load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = zip(validation_inputs, va_d[1])\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n return (training_data, validation_data, test_data)",
"def load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = zip(validation_inputs, va_d[1])\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n return (training_data, validation_data, test_data)",
"def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count",
"def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values",
"def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)",
"def generate_data(self):\n\n column_num = 1\n src_path = self.src_paths_after_pre_process\n target_path = self.tgt_paths_after_pre_process\n\n src_ds = load_textline_dataset([src_path], column_num)\n\n src_ds = src_ds[0]\n\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n src_ds = src_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n src_size_ds = src_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n src_ds = src_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n if self.infer_without_label:\n data_set = tf.data.Dataset.zip((src_ds, src_size_ds))\n\n else:\n tgt = load_textline_dataset([target_path], column_num)\n tgt = tgt[0]\n tgt_out_ds = tgt.map(lambda x: x + ' ' + self.END_TOKEN)\n tgt_in_ds = tgt.map(lambda x: self.START_TOKEN + ' ' + x)\n\n tgt_in_ds = tgt_in_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len, self.\n text_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_size_ds = tgt_in_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_ds = tgt_in_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n inp_ds = tf.data.Dataset.zip(\n (src_ds, src_size_ds, tgt_in_ds, tgt_in_size_ds))\n\n if self.use_label_vocab:\n target_vocab_file_path = self.label_vocab_file_paths[0]\n else:\n target_vocab_file_path = self.text_vocab_file_path\n tgt_out_ds = tgt_out_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len,\n target_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_out_ds = tgt_out_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n data_set = tf.data.Dataset.zip((inp_ds, tgt_out_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n data_size = get_file_len(self.src_paths_after_pre_process)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set",
"def load_data_wrapper():\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = list(zip(training_inputs, training_results))\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = list(zip(validation_inputs, va_d[1]))\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = list(zip(test_inputs, te_d[1]))\n return (training_data, validation_data, test_data)",
"def load_data_wrapper():\n\n tr_d, va_d, te_d = load_data()\n\n training_inputs = [np.reshape(a=x, newshape=(784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs, training_results)\n # Need to do list(zip(...)) instead of just zip(...) in Python 3\n # training_data = list(zip(training_inputs, training_results))\n\n validation_inputs = [np.reshape(a=x, newshape=(784, 1)) for x in va_d[0]]\n validation_data = zip(validation_inputs, va_d[1])\n # validation_data = list(zip(validation_inputs, va_d[1]))\n\n test_inputs = [np.reshape(a=x, newshape=(784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n # test_data = list(zip(test_inputs, te_d[1]))\n\n return training_data, validation_data, test_data",
"def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)",
"def load_data(params):\n train_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_train_' + params['stemming'] + '.csv']))\n dev_df = pd.read_csv(os.path.join(*[dataset_path, params['tokenizer'] + '_dev_' + params['stemming'] + '.csv']))\n train_data, label_encode = data_prep(train_df, params, if_resample=True)\n dev_data, _ = data_prep(dev_df, params)\n return train_data, dev_data, label_encode",
"def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)",
"def load_dataset(data_dir, model_params, inference_mode=False):\n\n # normalizes the x and y columns using the training set.\n # applies same scaling factor to valid and test set.\n\n if isinstance(model_params.data_set, list):\n datasets = model_params.data_set\n else:\n datasets = [model_params.data_set]\n\n train_strokes = None\n valid_strokes = None\n test_strokes = None\n\n for dataset in datasets:\n if data_dir.startswith('http://') or data_dir.startswith('https://'):\n data_filepath = '/'.join([data_dir, dataset])\n tf.logging.info('Downloading %s', data_filepath)\n response = requests.get(data_filepath)\n data = np.load(six.BytesIO(response.content), encoding='latin1')\n else:\n data_filepath = os.path.join(data_dir, dataset)\n data = np.load(data_filepath, encoding='latin1', allow_pickle=True)\n tf.logging.info('Loaded {}/{}/{} from {}'.format(\n len(data['train']), len(data['valid']), len(data['test']),\n dataset))\n if train_strokes is None:\n train_strokes = data['train']\n valid_strokes = data['valid']\n test_strokes = data['test']\n else:\n train_strokes = np.concatenate((train_strokes, data['train']))\n valid_strokes = np.concatenate((valid_strokes, data['valid']))\n test_strokes = np.concatenate((test_strokes, data['test']))\n\n all_strokes = np.concatenate((train_strokes, valid_strokes, test_strokes))\n num_points = 0\n for stroke in all_strokes:\n num_points += len(stroke)\n avg_len = num_points / len(all_strokes)\n tf.logging.info('Dataset combined: {} ({}/{}/{}), avg len {}'.format(\n len(all_strokes), len(train_strokes), len(valid_strokes),\n len(test_strokes), int(avg_len)))\n\n # calculate the max strokes we need.\n max_seq_len = utils.get_max_len(all_strokes)\n # overwrite the hps with this calculation.\n model_params.max_seq_len = max_seq_len\n\n tf.logging.info('model_params.max_seq_len %i.', model_params.max_seq_len)\n\n eval_model_params = sketch_rnn_model.copy_hparams(model_params)\n\n eval_model_params.use_input_dropout = 0\n eval_model_params.use_recurrent_dropout = 0\n eval_model_params.use_output_dropout = 0\n eval_model_params.is_training = 1\n\n if inference_mode:\n eval_model_params.batch_size = 1\n eval_model_params.is_training = 0\n\n sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)\n sample_model_params.batch_size = 1 # only sample one at a time\n sample_model_params.max_seq_len = 1 # sample one point at a time\n\n train_set = utils.DataLoader(\n train_strokes,\n model_params.batch_size,\n max_seq_length=model_params.max_seq_len,\n random_scale_factor=model_params.random_scale_factor,\n augment_stroke_prob=model_params.augment_stroke_prob)\n\n normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()\n train_set.normalize(normalizing_scale_factor)\n\n valid_set = utils.DataLoader(\n valid_strokes,\n eval_model_params.batch_size,\n max_seq_length=eval_model_params.max_seq_len,\n random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n valid_set.normalize(normalizing_scale_factor)\n\n test_set = utils.DataLoader(\n test_strokes,\n eval_model_params.batch_size,\n max_seq_length=eval_model_params.max_seq_len,\n random_scale_factor=0.0,\n augment_stroke_prob=0.0)\n test_set.normalize(normalizing_scale_factor)\n\n tf.logging.info('normalizing_scale_factor %4.4f.', normalizing_scale_factor)\n\n result = [\n train_set, valid_set, test_set, model_params, eval_model_params,\n sample_model_params\n ]\n return result",
"def train_test_data_df(train_data_file, test_data_file):\n dtype_dict = {\n \"age\": np.int32,\n \"education-num\": np.int32,\n \"capital-gain\": np.int32,\n \"capital-loss\": np.int32,\n \"hours-per-week\": np.int32\n }\n cols = [i for i in range(15) if i != 2]\n train_data = pd.read_csv(train_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n train_data = train_data.dropna(axis=0, how=\"any\")\n test_data = pd.read_csv(test_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n test_data = test_data.dropna(axis=0, how=\"any\")\n return train_data, test_data",
"def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader",
"def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()",
"def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):",
"def create_train_feats():\n features = read_process_labelled(AUDIO_DIR, debug=True)\n df = pd.DataFrame(features)\n p = './Features/dataset_features/data_features.csv'\n df.to_csv(p, index=False)\n return p",
"def _prepare_ml_data(X, y, to_optimize=False):\n size_test = 1\n y_test = None\n if to_optimize:\n size_test = CONFIG.OPTIMIZE_PARAMS['size'] + 1\n y_test = y.iloc[-size_test:]\n X_train = X.iloc[:-size_test]\n y_train = y.iloc[:-size_test]\n X_test = X.iloc[-size_test:]\n return X_train, y_train, X_test, y_test"
] | [
"0.6369666",
"0.63546294",
"0.63196236",
"0.62988764",
"0.6283459",
"0.62768555",
"0.6249582",
"0.6194825",
"0.6187834",
"0.61582404",
"0.6153339",
"0.6152186",
"0.6096578",
"0.6096578",
"0.6080581",
"0.60757023",
"0.6061717",
"0.6044782",
"0.60430914",
"0.6011937",
"0.5976109",
"0.5970796",
"0.5957939",
"0.5935491",
"0.59323853",
"0.5929659",
"0.59218836",
"0.5921361",
"0.59130055",
"0.59090245"
] | 0.66650474 | 0 |
Call the shell script that handles BLAST database formatting. | def format_blast(makeblastdb_path, fname):
# The script is written in shell, so this function just calls it and
# checks the output
# Build the shell command
cmd = ['bash', DBFORMAT_SCRIPT, makeblastdb_path, fname]
# Execute the script
# shell=False to ensure that we aren't executing commands from untrusted
# sources
p = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return (out, err) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command_dbtool(self):\n dbtool.main(*self.args())",
"def makeblastdb(files, db_name, db_type):\n with open(db_name + \".pin\", \"w\") as f:\n f.write(\"\\n\".join(db_name))\n return subprocess.run([\"makeblastdb\", \"-in\", db_name + \".pin\", \"-dbtype\", db_type)",
"def blastn_commandline(cls):\n command = generate_path(\"../../blast/ncbi-blast*/bin/blastn\")\n fasta = generate_path(\"tmp/validate.fasta\")\n db = generate_path(\"data/blast/ValidationDB\")\n results = generate_path(\"tmp/validate.xml\")\n\n subprocess.call(\n '%s -query %s -db %s -outfmt 5 -out %s -best_hit_score_edge 0.05 '\n '-best_hit_overhang 0.1' % (\n command, fasta, db, results\n ), shell=True\n )",
"def blastp(database, query, output_to_file = False, output_file = None,\n overwrite = False, outfmt = 7):\n if output_to_file:\n if os.path.exists(output_file) and not overwrite:\n return output_file\n cmd = 'blastp -db {} -query {} -outfmt {} -out {} -num_alignments 1'.\\\n format(database, query, outfmt, output_file)\n else:\n cmd = 'blastp -db {} -query {} -outfmt {} -num_alignments 1'.format(\n database, query, outfmt)\n\n printed_output = subprocess.check_output(cmd, shell=True)\n if output_to_file:\n return output_file\n return printed_output",
"def blast_database(target, dbtype, output_to_file = False, output_file = None,\n overwrite = False):\n if output_to_file:\n if os.path.exists(output_file) and not overwrite:\n return output_file\n cmd = 'makeblastdb -in {} -dbtype {} -out {}'.format(target, dbtype, output_file)\n else:\n cmd = 'makeblastdb -in {} -dbtype {}'.format(target, dbtype)\n printed_output = subprocess.check_output(cmd, shell=True)\n\n if output_to_file:\n return output_file\n\n return printed_output",
"def main():\n count = 0\n\n # Read in the required files and filenames.\n predicted_proteins, protein_db, output_file_aug_to_fasta, \\\n output_file_proteins_to_db, blastp_output, output_to_file, \\\n overwrite = call_files()\n\n # Write all entries in the AUGUSTUS output to a FASTA file\n for record in split_records_aug(predicted_proteins):\n if count == 0:\n mode = 'w'\n else:\n mode = 'a'\n write_fasta(record, output_file_aug_to_fasta, mode)\n count += 1\n\n # Create a blast database and carry out a blastp search\n blast_db = blast_database(protein_db, 'prot', True,\n output_file_proteins_to_db, overwrite)\n\n blastp_file = blastp(output_file_proteins_to_db, output_file_aug_to_fasta,\n True, blastp_output, overwrite, 7)\n\n # Parse the blastp results for the desired information\n blast_results = parse_blastp_output(blastp_output)\n\n # Print the results\n print_output(blast_results)",
"def main():\n task_init(authorization_action='runbibformat',\n authorization_msg=\"BibReformat Task Submission\",\n description=\"\"\"\nBibReformat formats the records and saves the produced outputs for\nlater retrieval.\n\nBibReformat is usually run periodically via BibSched in order to (1)\nformat new records in the database and to (2) reformat records for\nwhich the meta data has been modified.\n\nBibReformat has to be run manually when (3) format config files have\nbeen modified, in order to see the changes in the web interface.\n\nAlthough it is not necessary to run BibReformat to display formatted\nrecords in the web interface, BibReformat allows to improve serving\nspeed by precreating the outputs. It is suggested to run\nBibReformat for 'HB' output.\n\nOption -m cannot be used at the same time as option -c.\nOption -c prevents from finding records in private collections.\n\nExamples:\n bibreformat Format all new or modified records (in HB).\n bibreformat -o HD Format all new or modified records in HD.\n bibreformat -o HD,HB Format all new or modified records in HD and HB.\n\n bibreformat -a Force reformatting all records (in HB).\n bibreformat -c 'Photos' Force reformatting all records in 'Photos' collection (in HB).\n bibreformat -c 'Photos' -o HD Force reformatting all records in 'Photos' collection in HD.\n\n bibreformat -i 15 Force reformatting record 15 (in HB).\n bibreformat -i 15:20 Force reformatting records 15 to 20 (in HB).\n bibreformat -i 15,16,17 Force reformatting records 15, 16 and 17 (in HB).\n\n bibreformat -n Show how many records are to be (re)formatted.\n bibreformat -n -c 'Articles' Show how many records are to be (re)formatted in 'Articles' collection.\n\n bibreformat -oHB -s1h Format all new and modified records every hour, in HB.\n\"\"\", help_specific_usage=\"\"\" -o, --formats \\t Specify output format/s (default HB)\n -n, --noprocess \\t Count records to be formatted (no processing done)\nReformatting options:\n -a, --all \\t Force reformatting all records\n -c, --collection \\t Force reformatting records by collection\n -f, --field \\t Force reformatting records by field\n -p, --pattern \\t Force reformatting records by pattern\n -i, --id \\t Force reformatting records by record id(s)\nPattern options:\n -m, --matching \\t Specify if pattern is exact (e), regular expression (r),\n \\t partial (p), any of the words (o) or all of the words (a)\n\"\"\",\n version=__revision__,\n specific_params=(\"ac:f:p:lo:nm:i:\",\n [\"all\",\n \"collection=\",\n \"matching=\",\n \"field=\",\n \"pattern=\",\n \"format=\",\n \"noprocess\",\n \"id=\"]),\n task_submit_check_options_fnc=task_submit_check_options,\n task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,\n task_run_fnc=task_run_core)",
"def main():\n cur, conn = connect('dwh.cfg')\n \n set_schema = schema_queries[1]\n cur.execute(set_schema)\n \n print('Loading Staging Tables.')\n load_staging_tables(cur, conn)\n \n print('Inserting Rows.')\n insert_tables(cur, conn)\n\n \n conn.close()",
"def build_db(db_fasta, out_db, input_type='fasta'):\n subprocess.run(\n 'makeblastdb -dbtype nucl -in %s -input_type %s -parse_seqids -out %s'\n % (db_fasta, input_type, out_db),\n shell=True,\n env={'PATH': BLAST_PATH}\n )",
"def make_user_database():\n createblast_out, createblast_error = Popen([\"makeblastdb\", \"-in\", args.blast_database, \"-dbtype\", \"nucl\"], stdout=PIPE, stderr=PIPE).communicate()\n admin_log(createblast_out, createblast_error, \"create database:\")",
"def db_shell(ctx, db_key=None):\n ctx.run('pgcli -h {db_host} -d {db_name} -U {db_user}'.format(**get_database_settings(db_key)), pty=True)",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['STAGE'].values()))\n cur = conn.cursor()\n \n #remove the existing tables\n drop_tables(cur, conn)\n \n #replace the tables with new ones\n create_tables(cur, conn)\n \n #add missing postcode value into table\n default_missing_values(cur, conn)\n \n conn.close()",
"def _create_execute_blastdbcmd(execute_command):\n\n def execute_blastdbcmd(input_file: str, sequence_file: str, database: str):\n cmd = \"{} -db {} -entry_batch {} > {}\".format(\n BLASTDBCMD_CMD, database, input_file, sequence_file)\n execute_command(cmd)\n\n return execute_blastdbcmd",
"def formatdb(fastadata={},fname=\"\"):\n if not fastadata and fname:\n OSsystem(\"%s -i %s\" % (FORMATDB_PATH,fname))\n elif fastadata and fname:\n pass\n else:\n raise \"inproper input\"\n return fname",
"def format():\n isort = 'isort -rc *.py app/'\n yapf = 'yapf -r -i *.py app/'\n\n print('Running {}'.format(isort))\n subprocess.call(isort, shell=True)\n\n print('Running {}'.format(yapf))\n subprocess.call(yapf, shell=True)",
"def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code",
"def populate_db_command():\n print(\"Populating DB with sample data.\")\n populate_db()\n print \"Done\"",
"def format_bash(self,query_results):\n data=query_results.data\n \n name=\"ddb\"\n\n print (\"{0}_row_length={1}\".format(name,len(data)))\n print (\"{0}_column_length={1}\".format(name,len(query_results.columns)))\n print (\"\")\n\n column_index=0\n for column in query_results.columns:\n print(\"{0}_columns['{1}']='{2}'\".format(name,column_index,column))\n column_index+=1\n\n\n row_index=0\n for row in data:\n for column_index in range(0,len(query_results.columns)):\n print('{0}_data[{1}][{2}]=\"{3}\"'.format(name,row_index,column_index,row['data'][column_index]))\n row_index+=1\n # TODO return output for this\n return \"\"",
"def main():\r\n db = connect_database()\r\n with db:\r\n if sys.argv[1] == \"-s\":\r\n select_all(db, sys.argv[2])\r\n elif sys.argv[1] == \"-i\":\r\n cus_data = []\r\n for i in range(2, len(sys.argv)):\r\n cus_data.append(sys.argv[i])\r\n insert_customer(db, cus_data)\r\n elif sys.argv[1] == \"-c\":\r\n create_tables()\r\n elif sys.argv[1] == \"-pw\":\r\n pop_waiting(db, sys.argv[2])\r\n elif sys.argv[1] == \"-ph\":\r\n pop_help(db, sys.argv[2])\r\n elif sys.argv[1] == \"-r\":\r\n refresh_tables(db)\r\n elif sys.argv[1] == \"-e\":\r\n export_helped_table(db)\r\n else:\r\n print errorArgument\r\n db.close()",
"def init_db_command():\n init_db()\n # click.command() defines a command line command called init-db that calls the init_db function and shows a success message to the user. \n click.echo('Initialized the database.')",
"def createdb(dbname):\n os.system(\"createdb -w %s\" % dbname)",
"def run_db(args):\n # print(\"running chronqc_db\")\n chronqc_db.main(args)",
"def create_blast_db(self):\n print(\"Creating blast db\")\n if self.mask:\n command = 'dustmasker -in ' + self.seq_file + ' -infmt fasta '\n command += '-outfmt maskinfo_asn1_bin -out ' + self.seq_file + '_dust.asnb'\n subprocess.check_output(command, shell=True) # identifying low-complexity regions.\n\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-mask_data ' + self.seq_file + '_dust.asnb '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome without low-complexity regions\"'\n subprocess.check_output(command, shell=True) # Overwriting the genome file.\n else:\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome unmasked\"'\n subprocess.check_output(command, shell=True)",
"def initdb_command():\n init_db()",
"def initdb_command():\n init_db()",
"def structure_and_repopulate_db() -> None:\n with open('db.sql', encoding=\"utf-8\") as f:\n commands = f.read().strip().split(';')\n commands = [command.strip() for command in commands]\n for command in commands:\n my_cursor.execute(command)\n my_db.commit()\n print('Source structure created, data repopulated')",
"def initdb_command():\r\n init_db()\r\n print('Initialized the database.')",
"def initdb_command():\n init_db()\n print('Initialized the database.')",
"def initdb_command():\n init_db()\n print('Initialized the database.')",
"def initdb_command():\n init_db()\n print('Initialized the database.')"
] | [
"0.615836",
"0.6075764",
"0.59140235",
"0.58454984",
"0.58075655",
"0.5771629",
"0.5752413",
"0.57355833",
"0.5695151",
"0.5693321",
"0.5688143",
"0.5677002",
"0.5629271",
"0.56139135",
"0.56128824",
"0.55926937",
"0.55580354",
"0.5531508",
"0.55243224",
"0.55176175",
"0.55169505",
"0.55017024",
"0.5488147",
"0.5454912",
"0.5454912",
"0.5454865",
"0.5450833",
"0.5420355",
"0.5420355",
"0.5420355"
] | 0.7314453 | 0 |
Returns tag of the first matched ListofValues. For each element in ``series`` returned is the tag of the listofvalues in the dictionary of LoVs ``taglov`` which first matches the element with one of its values OR value from donor with the same index OR ``na``. | def which_tag(series: pd.Series,
taglov: Union[TagLoV, Any],
na: Any,
donor: pd.Series = None,
method: Optional[Union[Callable, str]] = None,
**kwargs):
if series.empty:
return series
if not isinstance(taglov, TagLoV):
taglov = TagLoV(taglov)
lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)
tags_plus = np.array((na, *taglov.tags))
result = pd.Series(tags_plus[lov_idx_plus], index=series.index)
if isinstance(donor, pd.Series): # take unmatched values from donor
unmatched_idx = series.index[~lov_idx_plus.astype(bool)]
if not unmatched_idx.empty:
take_idx = unmatched_idx.intersection(donor.index)
if not take_idx.empty:
result[take_idx] = donor[take_idx]
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tag_one(self, tokens, index, history):\n tag = None\n for tagger in self._taggers:\n tag = tagger.choose_tag(tokens, index, history)\n if tag is not None:\n break\n return tag",
"def find_usefull_tags(tags, tagmodel, tag_count_vect):\n\n final_tags = []\n for tag in tags:\n if tag == None:\n continue\n else:\n tagpd = pd.Series(tag)\n tag_feature = tag_count_vect.transform(tagpd)\n result = tagmodel.predict(tag_feature)\n\n result = result.tolist() \n result = str(result)\n if result == '[1]':\n final_tags.append(tag)\n final_tags = list(dict.fromkeys(final_tags))\n return(final_tags)",
"def find_first_tag(self, tag):\n for lm, _ in self.search(tag=tag):\n return lm",
"def __aggregate(self, series):\n if series.name in self.__non_redundant_entity_attributes or series.name in self.__redundant_entity_attributes: # Textual entities\n merged_sensitive_terms = list()\n for sensitive_terms in series.dropna():\n merged_sensitive_terms = merged_sensitive_terms + sensitive_terms\n return merged_sensitive_terms if len(merged_sensitive_terms) > 0 else None # Return merged result, or None\n else:\n if series.nunique() > 1: # Since there are more values, pack them into a list / frozenset\n if series.name in self.__textual_attributes or series.name in self.__config.get_insensitive_attributes():\n return list(series.array)\n else:\n return frozenset(series.array)\n else:\n return series.unique()[0] # Else return just this single value",
"def useSeriesAbove(requestContext, seriesList, value, search, replace):\n newSeries = []\n\n for series in seriesList:\n newname = re.sub(search, replace, series.name)\n if max(series) > value:\n n = evaluateTarget(requestContext, newname)\n if n is not None and len(n) > 0:\n newSeries.append(n[0])\n\n return newSeries",
"def which_lov(series: pd.Series,\n patterns: Sequence[Sequence[Any]],\n method: Optional[Union[Callable, str]] = None,\n **kwargs) -> np.ndarray:\n elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]\n if not elov:\n return np.zeros(series.size, int)\n num, value = zip(*elov)\n lov_idx_plus = np.concatenate(([0], num))\n if method is None:\n mm = series.to_numpy() == np.array(value)[:, np.newaxis]\n elif not callable(method): # assume name of pd.Series.str method\n ptns = pd.Series(value)\n kwargs['na'] = False\n do_match = getattr(series.str, method)\n mm = ptns.apply(do_match, **kwargs).values\n else:\n mm = method(series, value, **kwargs)\n return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]",
"def tag(self, tokens):\n if overridden(self.tag_sents):\n return self.tag_sents([tokens])[0]",
"def get_tag_value(\n service: str,\n tags: List[Any],\n tag_key: str,\n) -> str:\n capitalize = capitalize_tag_kv(service)\n matches = [\n t[f\"{'V' if capitalize else 'v'}alue\"]\n for t in tags\n if t[f\"{'K' if capitalize else 'k'}ey\"] == tag_key\n ]\n if len(matches) != 1:\n log_error(\n f\"Oops it looks like we're unable to find a match for tag {tag_key}.\"\n \"Please open an issue to help us get this fixed!\",\n )\n raise Abort()\n\n return matches[0]",
"def _get_tagged_value(self, key):\n return self._tagged_values_dict[key]",
"def return_tag_tokens(self, tags_indexes, observations):\n tag_pred = []\n for tag_index in tags_indexes:\n tag_pred.append(observations.T.index[tag_index])\n return tag_pred",
"def get_tag_options(label_matches):\r\n\ttag_options = []\r\n\tfor key in label_matches.keys():\r\n\t\tif key[1] not in tag_options:\r\n\t\t\ttag_options.append(key[1])\r\n\treturn tag_options",
"def best_sequence(self, T, pos, psi, phi, fix_tags=[]):\n for idx, m in fix_tags:\n phi[idx - 1, m] = 100\n # if fix_idx:\n # phi[fix_idx - 1, fix_m] = 100\n msgs, pointers = max_product(T, pos, psi, phi, True)\n tags_dict = get_best_tags(T, msgs, pointers)\n tags = []\n for i in range(1, len(T) + 1):\n tags.append(self.get_tag(tags_dict[str(i)]))\n return tags",
"def predict(self, tokens: TokenSeq) -> PosSeq:\n _, pos_tags = self.predict_greedy(tokens)\n # _, _, pos_tags = self.predict_viterbi(tokens)\n return pos_tags",
"def find_series(self, key):\n # TODO: this could be more efficient if we pushed it down into Java\n return self.filter(lambda x: x[0] == key).first()[1]",
"def _series_handler(self, values, style, caller, *args):\n\n behaviors = {\"over\": values.ge,\n \"under\": values.lt}\n\n evaluated = values[behaviors.get(caller)(self.margin)]\n\n if style == \"values\":\n return evaluated\n else:\n return list(evaluated.index)",
"def get_default_tag(self, tags):\n tags_counter = Counter()\n for tag in tags:\n tags_counter[tag] += 1\n\n if len(tags_counter) == 2 and list(tags_counter.values())[0] == list(tags_counter.values())[1]:\n return ut.find_positive_tag(tags_counter.keys())\n\n return tags_counter.most_common(1)[0][0]",
"def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag",
"def get_tag(self, xaf, name, not_found_value=None,\n counter_str_value='latest', force_step_name=None,\n force_plugin_name=None):\n tag_name = self.__get_tag_name(name, counter_str_value,\n force_step_name, force_plugin_name)\n return xaf.tags.get(tag_name, not_found_value)",
"def _tag_tokens(self, targets: list, tokens: list, tags: dict=BIO_TAGS, bert_tokenizer=None, verbose: bool=False):\n if bert_tokenizer is not None:\n tokenizer = bert_tokenizer\n\n if len(targets) > 0:\n tags_list = []\n for tgt in targets:\n t_list = []\n inside = False\n found = False\n if bert_tokenizer is not None:\n tgt_terms = tokenizer.tokenize(tgt[1]) \n else:\n tgt_terms = self._tokenize_line(tgt[1])\n\n if verbose:\n print(tgt_terms)\n\n for i in range(len(tokens)):\n if tokens[i] == tgt_terms[0] and not found: \n # token is the beginning (B) of target terms sequence\n t_list.append(tags[\"B\"])\n if len(tgt_terms) > 1 and tokens[i:i+len(tgt_terms)] == tgt_terms:\n # check if the matching token is not a repetition of the term\n # and is the actual target term, if so the correct sequence is found \n inside = True\n found = True\n\n elif inside == True:\n # multi words terms\n if tokens[i] in tgt_terms[1:-1] and len(tgt_terms) > 2:\n # token is inside (I) the target terms sequence\n t_list.append(tags[\"I\"])\n\n elif tokens[i] == tgt_terms[-1]:\n # token is the last (L) target term\n t_list.append(tags[\"I\"]) # tags[\"L\"] \n inside = False\n\n # when the last tgt_word is repeated inside the tgt_terms \n inside = False\n\n else:\n # token is outside (O) the target terms sequence\n t_list.append(tags[\"O\"])\n\n tags_list.append(torch.Tensor(t_list))\n\n # merge tags\n tags_tensor = torch.stack(tags_list)\n res = torch.min(tags_tensor, dim=0)\n if verbose:\n print(\"targets:\", targets)\n print(\"tokens:\", tokens, \"-- len:\", len(tokens))\n print(\"tags:\", tags_list)\n #print(\"tags:\", tags_tensor.size())\n #print(\"res:\", res.values.size())\n \n return res.values\n\n else:\n return [tags[\"O\"] for t in tokens]",
"def best_sequence_brute(self, T, pos, psi, phi):\n ms = self.get_all_tag_seq(len(T))\n log_scores = tr.zeros(len(ms), dtype=tr.float64)\n for i in range(len(ms)):\n log_scores[i] = self.log_score(T, pos, ms[i], psi, phi)\n best = ms[tr.argmax(log_scores)]\n tags = []\n for i in range(len(T)):\n tags.append(self.get_tag_index(best[i]))\n return tags",
"def get_tag_values(self, event):\n raise NotImplementedError",
"def _analyze_values(self,i,j,tags):\n ti = tags[i]\n tj = tags[j]\n si = get_simplified_pos(ti)\n sj = get_simplified_pos(tj)\n\n dt = abs(i-j)-1\n if dt >= 5: dt = 5\n if dt >= 10: dt = 10\n if dt >= 15: dt = 15\n dt = str(dt)\n\n if i == 0: \n tpi,spi = '-S-','-S-'\n else:\n tpi = tags[i-1]\n spi = get_simplified_pos(tpi)\n\n if j == len(tags)-1: \n tnj,snj = '-E-','-E-'\n else:\n tnj = tags[j+1]\n snj = get_simplified_pos(tnj)\n\n if i != j-1:\n tni = tags[i+1]\n sni = get_simplified_pos(tni)\n tpj = tags[j-1]\n spj = get_simplified_pos(tpj)\n else:\n tni,sni = '-M-','-M-'\n tpj,spj = '-M-','-M-'\n\n return si,sj,dt,tpi,tni,tpj,tnj,spi,sni,spj,snj",
"def get_or_create_specific_tag_without_bilateral(**kwargs):\n ix = kwargs.pop('ix')\n channel = kwargs.pop('channel')\n tag_number = kwargs.pop('tag_number')\n\n if ix.tags_policy == 'ix_managed':\n used_tags = Tag.objects.filter(\n ix=ix).exclude(status='AVAILABLE').order_by('tag')\n\n else:\n pe_channel = get_pe_channel_by_channel(channel=channel, ix=ix)\n tag_domain = pe_channel.channel_port if pe_channel else None\n used_tags = Tag.objects.filter(\n ix=ix,\n tag_domain=tag_domain).exclude(\n status='AVAILABLE').order_by('tag')\n\n if used_tags.filter(tag=tag_number):\n return 0\n else:\n free_tags = get_tag_without_bilateral(\n ix=ix, channel=channel)\n if free_tags.filter(tag=tag_number):\n return free_tags.get(tag=tag_number)\n else:\n tag = instantiate_tag(\n channel=channel, ix=ix, tag_number=tag_number)\n return tag",
"def get(self, label_sn):\n tags = self.list()\n return [\n tag\n for tag\n in tags\n if str(label_sn) in tag.get('args', {}).values()\n ]",
"def tag(self, tokens):\n (yyhat, _) = self.tag_with_features(tokens)\n return yyhat",
"def choose_ltv(self, label):\n tids = self.node_tids[label]\n vals = self.node_vals[label]\n losses = [self.tid_losses_dct[tid] for tid in tids]\n\n # -- try to return the value corresponding to one of the\n # trials that was previously chosen\n tid_set = set(tids)\n for tid in self.best_tids:\n if tid in tid_set:\n idx = tids.index(tid)\n rval = losses[idx], tid, vals[idx]\n break\n else:\n # -- choose a new best idx\n ltvs = sorted(zip(losses, tids, vals))\n best_idx = int(self.rng.geometric(1.0 / self.avg_best_idx)) - 1\n best_idx = min(best_idx, len(ltvs) - 1)\n assert best_idx >= 0\n best_loss, best_tid, best_val = ltvs[best_idx]\n self.best_tids.append(best_tid)\n rval = best_loss, best_tid, best_val\n return rval",
"def _get_head_tags(\n self,\n head_tag: torch.Tensor,\n child_tag: torch.Tensor,\n head_indices: torch.Tensor,\n ) -> torch.Tensor:\n batch_size = head_tag.size(0)\n # shape (batch_size, 1)\n range_vector = torch.arange(batch_size, device=head_tag.device).unsqueeze(1)\n\n # This next statement is quite a complex piece of indexing, which you really need\n # to read the docs to understand. See here:\n # https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing\n # In effect, we are selecting the indices corresponding to the heads of each word\n # from the sequence length dimension for each element in the batch.\n\n # shape (batch_size, sequence_length, tag_dim)\n selected_head_tag = head_tag[range_vector, head_indices]\n selected_head_tag = selected_head_tag.contiguous()\n # shape (batch_size, sequence_length, num_head_tags)\n head_tag_logits = self.tag_bilinear(selected_head_tag, child_tag)\n return head_tag_logits",
"def FirstTrue(values, default=None):\n for value in values:\n if value:\n return value\n return default",
"def tag(self, tokens):\n numRows = len(self.tags)\n numCols = len(tokens)\n # initialize tables for dynamic programming\n table = array([[0] * numCols] * numRows, dtype=float32)\n trace = array([[None] * numCols] * numRows)\n \n # fill in the base cases, i.e. the first column\n for row in range(numRows):\n currentTag = self.tags[row]\n currentWord = tokens[0] if tokens[0] in self.vocab else '<OOV>'\n table[row][0] = self.tag_tag_probs['<START>'].prob(currentTag) * self.tag_word_probs[currentTag].prob(currentWord)\n trace[row][0] = '<START>'\n \n # fill the rest of the table\n # iterate through by columns\n for col in range(1, numCols):\n for row in range(numRows):\n currentTag = self.tags[row]\n currentWord = tokens[col] if tokens[col] in self.vocab else '<OOV>'\n maxProbability = 0.0;\n maxPrevRow = 0\n \n # iterate through the previous column and find the maximum probability\n # as well as the previous tag that led to the maximum probability\n for prevRow in range(numRows):\n prevTag = self.tags[prevRow]\n probability = table[prevRow][col-1] * self.tag_tag_probs[prevTag].prob(currentTag) * self.tag_word_probs[currentTag].prob(currentWord)\n if probability > maxProbability:\n maxProbability = probability\n maxPrevRow = prevRow\n \n table[row][col] = maxProbability\n trace[row][col] = maxPrevRow\n \n returnList = []\n # retrace and construct the tag list\n maxIndex = argmax(table, axis=0)[-1]\n # insert the last (token, tag) pair\n returnList.insert(0, (tokens[-1], self.tags[maxIndex]))\n # loop through the trace table and prepend each (token, tag) pair\n i = numCols - 1\n index = trace[maxIndex][numCols-1]\n while i > 0:\n returnList.insert(0, (tokens[i-1], self.tags[index]))\n i -= 1\n index = trace[index][i]\n \n return returnList",
"def values(self):\n store = getMainStore()\n tagIDs = self._getTagIDs()\n where = self._getWhereClause(tagIDs)\n return store.find((Tag, TagValue), *where)"
] | [
"0.61236465",
"0.5222297",
"0.52140766",
"0.49421668",
"0.49218923",
"0.49208298",
"0.47892955",
"0.47874418",
"0.47012714",
"0.46522093",
"0.4625487",
"0.46219954",
"0.45822042",
"0.45629826",
"0.45510745",
"0.45039132",
"0.44938043",
"0.44818074",
"0.44803494",
"0.44793394",
"0.44693768",
"0.4417272",
"0.44152057",
"0.44027513",
"0.43906337",
"0.43906015",
"0.4380242",
"0.4373906",
"0.43715787",
"0.43593135"
] | 0.7129057 | 0 |
prepro 200x235x3 uint8 frame into 8300 (83x100) 1D float vector | def prepro(I):
# """ prepro 200x235x3 uint8 frame into 10000 (100x100) 1D float vector """
I = I[35:200] # crop - remove 35px from start & 35px from end of image in x, to reduce redundant parts of image (i.e. after ball passes paddle)
I = I[::2,::2,0] # downsample by factor of 2
I[I == 43] = 0 # erase background (background type 1)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def preprocess(self, frame: np.ndarray) -> torch.TensorType:\n tensor = cv.resize(frame, (self.IMGSZ, self.IMGSZ)) \n tensor = tensor.transpose(2, 0, 1)\n tensor = torch.from_numpy(tensor)\n tensor = torch.unsqueeze(tensor, 0)\n tensor = tensor.half() if self.half else tensor.float()\n tensor = tensor / 255.0\n tensor = tensor.to(self.device)\n\n return tensor",
"def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data",
"def _decode_35708(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29974:\n start_byte += n_bytes\n n_bytes = 4\n var_size = struct.unpack('<I', data[start_byte:\n start_byte + n_bytes])[0]\n start_byte += n_bytes\n n_bytes = var_size\n\n return np.frombuffer(data[start_byte:start_byte + n_bytes],\n dtype=np.float64)",
"def normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames /= (255 / 2)\n new_frames -= 1\n\n return new_frames",
"def vid2tensor( self, current_frame):",
"def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im",
"def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im",
"def denormalize_frames(frames):\n new_frames = frames + 1\n new_frames *= (255 / 2)\n # noinspection PyUnresolvedReferences\n new_frames = new_frames.astype(np.uint8)\n\n return new_frames",
"def pre_handler(frame):\n img_data, _im0 = preprocess(frame, IMAGE_HEIGHT, IMAGE_WIDTH, False)\n return kdp_wrapper.convert_float_to_rgba(img_data, 8, 520, True)",
"def convert_to_vector(img_arr):\n img = img_arr[0:248, 0:248, 0]\n img = img.flatten()\n return img",
"def to_uint8(f):\n from numpy import array, clip, uint8\n\n img = array(clip(f,0,255),uint8)\n return img",
"def _image_to_vector(image):\n return image.flatten().astype(float)",
"def _process_data(data: np.ndarray) -> np.ndarray:\r\n result: np.ndarray = np.empty(shape=(0, 0))\r\n i = 0\r\n while i < (len(data) - 1):\r\n # Found beginning of frame\r\n if data[i] > 127:\r\n # Extract one sample from 2 bytes\r\n intout = (np.bitwise_and(data[i], 127)) * 128\r\n i += 1\r\n intout = intout + data[i]\r\n result = np.append(result, intout)\r\n i += 1\r\n return result",
"def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))",
"def preprocess(image):\n image = rgb2yuv(image)\n return image",
"def decode_frame(self, buf):\n import numpy as np\n from cv2 import cvtColor\n\n w, h = self._resolution\n arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))\n arr = cvtColor(arr, 93) # NV21 -> BGR\n return arr",
"def _convert_frame_data(jpeg_data):\n decoded_frames = tf.image.decode_jpeg(jpeg_data)\n return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)",
"def Motion_estimate_reverse_1frame(ref0_frame,ref1_frame,P_frame,block_size):\n \n nb_blocks = width//block_size*height//block_size\n \n vect_field = np.array(P_frame[:nb_blocks*3])\n vect_field = vect_field.reshape((height//block_size,width//block_size,3))\n \n frame_error = DCT_inverse(np.array(P_frame[nb_blocks*3:]),offset=0)\n tar_Y = frame_error[ :sep1].reshape(height,width)\n tar_U = frame_error[sep1:sep2].reshape(height//2,width//2)\n tar_V = frame_error[sep2: ].reshape(height//2,width//2)\n \n ref_frame = [ref0_frame,ref1_frame]\n \n for X in range(0,height//block_size):\n for Y in range(0,width//block_size):\n xa, xz = X*block_size,(X+1)*block_size\n ya, yz = Y*block_size,(Y+1)*block_size\n \n ref,vx,vy = vect_field[X,Y,:]\n \n pxa, pxz = xa+vx,xz+vx\n pya, pyz = ya+vy,yz+vy\n \n patch_Y = ref_Y[ref][pxa:pxz,pya:pyz]\n patch_U = ref_U[ref][pxa//2:pxz//2,pya//2:pyz//2]\n patch_V = ref_V[ref][pxa//2:pxz//2,pya//2:pyz//2]\n \n tar_Y[xa:xz,ya:yz] += patch_Y\n tar_U[xa//2:xz//2,ya//2:yz//2] += patch_U\n tar_V[xa//2:xz//2,ya//2:yz//2] += patch_V\n\n target_frame = np.concatenate((tar_Y.flatten(),\n tar_U.flatten(),\n tar_V.flatten()))\n return target_frame",
"def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)",
"def to_data(x):\n if torch.cuda.is_available():\n x = x.cpu()\n x = x.data.numpy()\n x = ((x +1)*255 / (2)).astype(np.uint8) # rescale to 0-255\n return x",
"def get_signal_gwgds1072au(a_signal_packed: bytes, a_scale : float ) -> list:\n the_return = None\n the_signal_packed=a_signal_packed\n the_scale=a_scale\n the_signal_sequence=[]\n the_signal=0.0 #TODO reminder check this before allowing it\n the_info=[]\n n=4\n bla=0\n blb=bla+n\n print(the_signal_packed)\n JX=unpack('>%sh' % 2 ,the_signal_packed[bla:blb])\n for ii in range(0,2003):\n the_info.append(unpack('>%sh' % 2 ,the_signal_packed[bla:blb])[0])\n bla=bla+n\n blb=blb+n\n #TODO get the potential scale\n #TODO get the offset\n #TODO get the time scale\n\n return the_info",
"def preprocess_frame(self, frame):\n state = torch.Tensor(frame)\n return gpuify(state, self.gpu_id)",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent",
"def vector_convert(self, pos) :\n delta = AUTO_width2//3\n bright_list = []\n for i in range(-1,2):\n for j in range(-1,2) :\n b = 0\n count = 0\n for x in range(max(0, pos[0] + i*delta), min(self.m_x, pos[0] + (i+1)*delta)):\n for y in range(max(0, pos[1] + j*delta), min(self.m_y, pos[1] + (j+1)*delta)):\n b += self.current_array[x][y]\n count += 1\n if count == 0 :\n bright_list.append(0)\n else :\n if b == 0 : #prevent 0 divde\n b = 1\n bright_list.append(b/count)\n bright_list = np.array(bright_list)\n m = np.max(bright_list)/self.current_total_avg\n bright_list = bright_list/np.min(bright_list) -1\n bright_list = np.append(bright_list, m)\n return bright_list",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent",
"def snd_rcv(self, u):\n self._soc.send(struct.pack(self._u_fmt, *u))\n data = self._soc.recv(self._buf_size)\n return np.array(struct.unpack(self._x_fmt, data), dtype=np.float32)",
"def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1",
"def preprocess(x):\n if x.shape[-1] < 16000 * 8:\n raise ValueError(\n \"Cannot preprocess tensor less than 8 seconds in duration.\"\n )\n vad = VadChunk(*get_vad(\"both\"))\n return vad(x)",
"def float32_to_uint8(inputs):\n return np.uint8(np.clip(np.round(inputs * 255), 0, 255))",
"def convert_to_continuos_f0(f0):\n # get uv information as binary\n uv = np.float32(f0 != 0)\n\n # get start and end of f0\n if (f0 == 0).all():\n logging.warning(\"all of the f0 values are 0.\")\n return uv, f0\n start_f0 = f0[f0 != 0][0]\n end_f0 = f0[f0 != 0][-1]\n\n # padding start and end of f0 sequence\n start_idx = np.where(f0 == start_f0)[0][0]\n end_idx = np.where(f0 == end_f0)[0][-1]\n f0[:start_idx] = start_f0\n f0[end_idx:] = end_f0\n\n # get non-zero frame index\n nz_frames = np.where(f0 != 0)[0]\n\n # perform linear interpolation\n f = interp1d(nz_frames, f0[nz_frames])\n cont_f0 = f(np.arange(0, f0.shape[0]))\n\n return uv, cont_f0"
] | [
"0.6424506",
"0.6271997",
"0.6116359",
"0.6106592",
"0.6086693",
"0.6025813",
"0.6025813",
"0.59201217",
"0.586013",
"0.5815289",
"0.5804463",
"0.564739",
"0.5637826",
"0.56362927",
"0.56270623",
"0.5616849",
"0.5541839",
"0.5488203",
"0.54803914",
"0.54658365",
"0.54447323",
"0.5426965",
"0.5415887",
"0.5402738",
"0.5395356",
"0.5375945",
"0.5370641",
"0.53697014",
"0.53526384",
"0.53460836"
] | 0.6721163 | 0 |
Test _arrange_test_result method with only one module. | def test_arrange_test_result_one_module(self):
pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)
pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)
pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)
fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)
fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)
ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)
reporter_1 = result_reporter.ResultReporter()
reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])
reporter_2 = result_reporter.ResultReporter()
reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])
info_dict = {}
aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])
expect_summary = {aei._STATUS_IGNORED_KEY : 1,
aei._STATUS_FAILED_KEY : 2,
aei._STATUS_PASSED_KEY : 3}
self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_arrange_test_result_multi_module(self):\n group_a_pass_1 = self._create_test_result(group_name='grpup_a',\n status=test_runner_base.PASSED_STATUS)\n group_b_pass_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.PASSED_STATUS)\n group_c_pass_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.PASSED_STATUS)\n group_b_fail_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.FAILED_STATUS)\n group_c_fail_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.FAILED_STATUS)\n group_c_ignore_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])\n\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])\n\n expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_c_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_arrange_test_result_multi_runner(self):\n runner_a_pass_1 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_2 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_3 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_b_fail_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_fail_2 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.IGNORED_STATUS)\n\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 0}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_module(self):\n pass",
"def test_get_results(self):\n pass",
"def test_basic_execution(self):",
"def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]",
"def test_package(self):\n pass",
"def test_get_scenarios_expanded(self):\n pass",
"def test_get_scenarios(self):\n pass",
"def after_test(self, test_results):\n pass",
"def test_2():",
"def test_our_add(self):\n\n # arrange\n x = 2\n y = 3\n expected_result = 5\n\n # act; assert\n self.assertEqual(self.our_module.add(x, y), expected_result)",
"def test_composition(self):",
"def test_3():",
"def pytest_can_run_together(item1, item2):",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)",
"def runTest(self):\n self.setUp()\n self.test_modul1()",
"def run_case(self, **kwargs):\n module_name = kwargs.get('module_name', None)\n if self.result:\n self.success_msg.append('>>>%s PASSED' % module_name or sys.modules[__name__])\n else:\n self.fail_msg.insert(0, '>>>%s FAILED' % module_name or sys.modules[__name__])",
"def suite_ended(self, module):",
"def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))",
"def test_require():",
"def getTestResults():",
"def test_one():\n run_mergesort([1], [1])",
"def test_1():",
"def runtest(self):",
"def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise",
"def _test(self):",
"def _test(self):",
"def _test(self):"
] | [
"0.7826515",
"0.6627111",
"0.6378898",
"0.6037766",
"0.58032465",
"0.5796728",
"0.57169634",
"0.5695606",
"0.5693893",
"0.56901085",
"0.5688211",
"0.5681043",
"0.5648296",
"0.56379336",
"0.5626959",
"0.56251144",
"0.56239635",
"0.5623645",
"0.5619237",
"0.5617106",
"0.56157196",
"0.5597624",
"0.55765915",
"0.55617803",
"0.55575514",
"0.5538702",
"0.5538075",
"0.5525763",
"0.5525763",
"0.5525763"
] | 0.8463349 | 0 |
Test _arrange_test_result method with multi module. | def test_arrange_test_result_multi_module(self):
group_a_pass_1 = self._create_test_result(group_name='grpup_a',
status=test_runner_base.PASSED_STATUS)
group_b_pass_1 = self._create_test_result(group_name='grpup_b',
status=test_runner_base.PASSED_STATUS)
group_c_pass_1 = self._create_test_result(group_name='grpup_c',
status=test_runner_base.PASSED_STATUS)
group_b_fail_1 = self._create_test_result(group_name='grpup_b',
status=test_runner_base.FAILED_STATUS)
group_c_fail_1 = self._create_test_result(group_name='grpup_c',
status=test_runner_base.FAILED_STATUS)
group_c_ignore_1 = self._create_test_result(group_name='grpup_c',
status=test_runner_base.IGNORED_STATUS)
reporter_1 = result_reporter.ResultReporter()
reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])
reporter_2 = result_reporter.ResultReporter()
reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])
info_dict = {}
aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])
expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,
aei._STATUS_FAILED_KEY : 0,
aei._STATUS_PASSED_KEY : 1}
self.assertEqual(
expect_group_a_summary,
info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])
expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,
aei._STATUS_FAILED_KEY : 1,
aei._STATUS_PASSED_KEY : 1}
self.assertEqual(
expect_group_b_summary,
info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])
expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,
aei._STATUS_FAILED_KEY : 1,
aei._STATUS_PASSED_KEY : 1}
self.assertEqual(
expect_group_c_summary,
info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])
expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,
aei._STATUS_FAILED_KEY : 2,
aei._STATUS_PASSED_KEY : 3}
self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_arrange_test_result_multi_runner(self):\n runner_a_pass_1 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_2 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_3 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_b_fail_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_fail_2 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.IGNORED_STATUS)\n\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 0}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_get_results(self):\n pass",
"def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)",
"def test_module(self):\n pass",
"def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]",
"def test_composition(self):",
"def test_basic_execution(self):",
"def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results",
"def test_3():",
"def test_list_group(self):\n pass",
"def main():\n test_merge_quick_sort()\n test_compare()",
"def runTest(self):\n self.setUp()\n self.test_modul1()",
"def test_get_scenarios(self):\n pass",
"def getTestResults():",
"def test_get_scenarios_expanded(self):\n pass",
"def test_one():\n run_mergesort([1], [1])",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def runtest(self):",
"def after_test(self, test_results):\n pass",
"def process_module_list(self, modules):",
"def test_subworkflows_info_in_modules_repo(self):\n self.subworkflow_install.install(\"bam_sort_stats_samtools\")\n mods_info = nf_core.subworkflows.SubworkflowInfo(self.nfcore_modules, \"bam_sort_stats_samtools\")\n mods_info.local = True\n mods_info_output = mods_info.get_component_info()\n console = Console(record=True)\n console.print(mods_info_output)\n output = console.export_text()\n\n assert \"Subworkflow: bam_sort_stats_samtools\" in output\n assert \"Inputs\" in output\n assert \"Outputs\" in output",
"def test_2():",
"def runTests(self):\n \n pass",
"def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]",
"def test_test_group_parameters(self):\n pass",
"def master_test_suite( pkg_mod_iter ):\n master_suite= unittest.TestSuite()\n for package, module_iter in pkg_mod_iter:\n for filename, module in module_iter:\n print( package+\".\"+module )\n suite= doctest.DocTestSuite( package+\".\"+module )\n print( \" \", suite )\n master_suite.addTests( suite )\n runner= unittest.TextTestRunner( verbosity=1 )\n runner.run( master_suite )",
"def test_modules_in_function_return_type_hint_multiple(dependency_testing_model) -> None:\n func: Callable = dependency_testing_model.nested_multiple_returns_hint\n expected_modules = {'urllib3', 'PIL'}\n extracted_modules: Set[str] = md.modules_in_function_signature(func)\n assert extracted_modules == expected_modules",
"def pytest_after_group_items(session, config, items):",
"def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x"
] | [
"0.8273559",
"0.71171457",
"0.59634614",
"0.59479153",
"0.58932537",
"0.5843216",
"0.57562256",
"0.57490045",
"0.57445383",
"0.5740733",
"0.57388484",
"0.5723813",
"0.57204384",
"0.5707784",
"0.5704445",
"0.57039195",
"0.56746626",
"0.56736004",
"0.5648056",
"0.56323606",
"0.5616576",
"0.5611849",
"0.56104773",
"0.5606904",
"0.560013",
"0.55976397",
"0.55842614",
"0.55831844",
"0.555388",
"0.5548459"
] | 0.8312768 | 0 |
Test _arrange_test_result method with multi runner. | def test_arrange_test_result_multi_runner(self):
runner_a_pass_1 = self._create_test_result(runner_name='runner_a',
status=test_runner_base.PASSED_STATUS)
runner_a_pass_2 = self._create_test_result(runner_name='runner_a',
status=test_runner_base.PASSED_STATUS)
runner_a_pass_3 = self._create_test_result(runner_name='runner_a',
status=test_runner_base.PASSED_STATUS)
runner_b_fail_1 = self._create_test_result(runner_name='runner_b',
status=test_runner_base.FAILED_STATUS)
runner_b_fail_2 = self._create_test_result(runner_name='runner_b',
status=test_runner_base.FAILED_STATUS)
runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',
status=test_runner_base.IGNORED_STATUS)
reporter_1 = result_reporter.ResultReporter()
reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])
reporter_2 = result_reporter.ResultReporter()
reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])
info_dict = {}
aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])
expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,
aei._STATUS_FAILED_KEY : 0,
aei._STATUS_PASSED_KEY : 3}
self.assertEqual(
expect_group_a_summary,
info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])
expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,
aei._STATUS_FAILED_KEY : 2,
aei._STATUS_PASSED_KEY : 0}
self.assertEqual(
expect_group_b_summary,
info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])
expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,
aei._STATUS_FAILED_KEY : 2,
aei._STATUS_PASSED_KEY : 3}
self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_arrange_test_result_multi_module(self):\n group_a_pass_1 = self._create_test_result(group_name='grpup_a',\n status=test_runner_base.PASSED_STATUS)\n group_b_pass_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.PASSED_STATUS)\n group_c_pass_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.PASSED_STATUS)\n group_b_fail_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.FAILED_STATUS)\n group_c_fail_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.FAILED_STATUS)\n group_c_ignore_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])\n\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])\n\n expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_c_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results",
"def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures",
"def after_test(self, test_results):\n pass",
"def main():\n test_merge_quick_sort()\n test_compare()",
"def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()",
"def test_get_results(self):\n pass",
"def getTestResults():",
"def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]",
"def runTests(self):\n \n pass",
"def test_list_runs(self):\n pass",
"def _handler_test_run_tests(self, *args, **kwargs):\n next_state = None\n result = None\n\n tc_pass = False\n tt_pass = False\n tp_pass = False\n tc_result = None\n tt_result = None\n tp_result = None\n\n test_result = {}\n\n try:\n tc_pass, tc_result = self._do_cmd_resp('tc', timeout=200)\n tt_pass, tt_result = self._do_cmd_resp('tt', timeout=200)\n tp_pass, tp_result = self._do_cmd_resp('tp', timeout=200)\n \n except Exception as e:\n test_result['exception'] = e\n test_result['message'] = 'Error running instrument tests.'\n \n finally:\n test_result['cond_test'] = 'Passed' if tc_pass else 'Failed'\n test_result['cond_data'] = tc_result\n test_result['temp_test'] = 'Passed' if tt_pass else 'Failed'\n test_result['temp_data'] = tt_result\n test_result['pres_test'] = 'Passed' if tp_pass else 'Failed'\n test_result['pres_data'] = tp_result\n test_result['success'] = 'Passed' if (tc_pass and tt_pass and tp_pass) else 'Failed'\n \n self._driver_event(DriverAsyncEvent.TEST_RESULT, test_result)\n next_state = SBE37ProtocolState.COMMAND\n \n return (next_state, result)",
"def run_stage_loop(cls, _opts, tests_results, put_next_stage):\n for _, result in tests_results:\n put_next_stage(result)",
"def runtest(self):",
"def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x",
"def test_one():\n run_mergesort([1], [1])",
"def test_batch(self):\n pass",
"def test_multiple_commands_at_same_time(self):",
"def test_basic_execution(self):",
"def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)",
"def test_by_order(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_order(addon, execute_order, stop_order))\n self.run_mgr.by_order(self.cli_inst, ['execute', 'start'], ['stop'])\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Execute'))\n self.assertTrue(output[1].startswith('Start'))\n self.assertTrue(output[2].startswith('Stop'))",
"def test_result_order(env):\n timeouts = list(reversed([env.timeout(delay) for delay in range(3)]))\n\n def p(env, timeouts):\n results = yield env.all_of(timeouts)\n assert list(results.keys()) == timeouts\n\n env.process(p(env, timeouts))\n env.run()",
"def __execute_tests(self, lst_tests):\n tests_pass = tests_fail = 0\n queue_of_result = multiprocessing.Queue()\n for test in lst_tests:\n process = multiprocessing.Process(\n target=TestRunner.__helper_execute_test,\n kwargs={\"test_cls\": test,\n \"time_out\": self.__args.timeout,\n \"channel\": queue_of_result})\n process.start()\n process.join()\n temp_result = {}\n if not queue_of_result.empty():\n temp_result = queue_of_result.get_nowait()\n\n if \"status\" in temp_result:\n if temp_result[\"status\"] == result.Status.PASSED:\n tests_pass += 1\n else:\n tests_fail += 1\n\n if \"json_path\" in temp_result:\n self.__lst_json_files.append(temp_result[\"json_path\"])\n\n if \"log_path\" in temp_result:\n self.__lst_log_files.append(temp_result[\"log_path\"])\n\n return tests_pass, tests_fail",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])",
"def run(cls): \n tests_to_run = cls.config.TESTS # A list of 5-tuple elements specifying the tests to run. See the\n # 'Test Setup' section in config.py.template for more info.\n test_group_name = \"Alchemist Tests\" # A short string identifier for this test run.\n output_dir = cls.config.OUTPUT_DIR # The output file where we write results.\n \n try:\n os.makedirs(output_dir,0o777)\n except:\n pass\n num_tests_to_run = len(tests_to_run)\n\n print(OUTPUT_DIVIDER_STRING)\n if num_tests_to_run == 1:\n print(\"Running %d test in %s\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Running %d tests in %s\" % (num_tests_to_run, test_group_name))\n failed_tests = []\n\n cls.before_run_tests()\n \n spark_settings = []\n for i in cls.config.SPARK_SETTINGS:\n spark_settings.append(i.to_array()[0])\n \n output_settings = []\n for i in cls.config.OUTPUT_SETTINGS:\n output_settings.append(i.to_array()[0])\n \n main_class = \"altest.AlTest\"\n\n for meta_data, opt_sets in tests_to_run:\n print(OUTPUT_DIVIDER_STRING + '\\n')\n# print(\"Running test command: '%s' ... \" % main_class)\n \n meta = {}\n meta_pairs = [i.to_tuple() for i in meta_data]\n for mp in meta_pairs:\n meta[mp[0].replace('-', '_')] = mp[1].replace('0x20', ' ')\n \n meta_settings = []\n for i in meta_data:\n meta_settings.append(i.to_array()[0])\n \n# stdout_filename = \"%s/%s.out\" % (output_dir, meta['short_name'])\n# stderr_filename = \"%s/%s.err\" % (output_dir, meta['short_name'])\n# \n# out_file = open(output_dir + \"/\" + meta['short_name'] + \".out\", 'w')\n\n # Run a test for all combinations of the OptionSets given, then capture\n # and print the output.\n opt_set_arrays = [i.to_array() for i in opt_sets]\n for opt_list in itertools.product(*opt_set_arrays):\n\n cmd = cls.get_spark_submit_cmd(spark_settings, main_class, output_settings, meta_settings, opt_list)\n# print(\"\\nSetting env var SPARK_SUBMIT_OPTS: %s\" % java_opts_str)\n# test_env[\"SPARK_SUBMIT_OPTS\"] = java_opts_str\n print(\"Running command:\")\n print(\"%s\\n\" % cmd)\n Popen(cmd, shell=True, env=test_env).wait()\n\n try:\n src = output_dir + meta['short_name'] + '_latest/'\n src_files = os.listdir(src)\n src_file = src_files[0][:-4]\n new_dir = output_dir + src_file\n os.makedirs(new_dir)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if (os.path.isfile(full_file_name)):\n shutil.copy(full_file_name, new_dir)\n except:\n pass\n \n# result_string = cls.process_output(config, meta['short_name'], opt_list,\n# stdout_filename, stderr_filename)\n# print(OUTPUT_DIVIDER_STRING)\n# print(\"\\nResult: \" + result_string)\n# print(OUTPUT_DIVIDER_STRING)\n# if \"FAILED\" in result_string:\n# failed_tests.append(meta['short_name'])\n# \n# \n# out_file.write(result_string + \"\\n\")\n# out_file.flush()\n\n if num_tests_to_run == 1:\n print(\"Finished running %d test in %s.\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Finished running %d tests in %s.\" % (num_tests_to_run, test_group_name))\n# print(\"\\nNumber of failed tests: %d, failed tests: %s\" %\n# (len(failed_tests), \",\".join(failed_tests)))\n print(OUTPUT_DIVIDER_STRING)",
"def test_insertSort3(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_3[0]),self.test_3[1])",
"def test_run_exec(self):\n from multiprocessing import Process, Queue\n output = Queue()\n repodir = \"~/codes/ci/tests/repo\"\n processes = []\n for i in range(3):\n processes.append(Process(target=run_exec, args=(repodir, \"ls -la\", output, i)))\n processes[-1].start()\n \n #Wait for the unit tests to all finish.\n for p in processes:\n p.join()\n results = [output.get() for p in processes]\n ordered = {o[\"index\"]: o for o in results}\n\n #We consider the test successful if the output files were created and the end time\n #is not None. That means that the process ran correctly and python didn't lose\n #control of the subprocess.\n from os import path\n fullrepo = path.expanduser(repodir)\n for i in range(3):\n self.assertTrue(path.isfile(path.join(fullrepo, \"{}.cidat\".format(i))))\n self.assertIsNotNone(ordered[i][\"end\"])\n self.assertEqual(ordered[i][\"code\"], 0)",
"def test_3():"
] | [
"0.8005941",
"0.7942821",
"0.6458603",
"0.6281796",
"0.61934733",
"0.6125403",
"0.61176115",
"0.6106937",
"0.6105472",
"0.6088446",
"0.6071222",
"0.6060519",
"0.60560644",
"0.6031968",
"0.60201895",
"0.60119915",
"0.59998757",
"0.5996369",
"0.5989808",
"0.5985094",
"0.5966215",
"0.5933381",
"0.59317553",
"0.5927337",
"0.5876174",
"0.5826675",
"0.58211637",
"0.5817424",
"0.5800586",
"0.5793413"
] | 0.84070575 | 0 |
A Helper to create TestResult | def _create_test_result(self, **kwargs):
test_info = test_runner_base.TestResult(**RESULT_TEST_TEMPLATE._asdict())
return test_info._replace(**kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _makeResult(self):\n\n result = super(CustomTextTestRunner, self)._makeResult()\n result.test_case_count = self.test_case_count\n return result",
"def getTestResults():",
"def create_success(test, time):\n return _TestInfo(test, time)",
"def create_result(main_test):\n result = Result(outputs=[DBHandler.NAME], main_test=main_test)\n result.startTestRun()\n return result",
"def test_get_results(self):\n pass",
"def _create_result(\n self, raw_data: T, processed_data: List[TestGroupReport]\n ) -> ImportedResult:\n raise NotImplementedError",
"def test(self):\n return self._test(result_count=1, failure_amount=1)",
"def test(self):\n return self._test(result_count=1, failure_amount=1)",
"def _CreateTestResult(self) -> paranoid_pb2.TestResultsEntry:\n\n if self.severity is None:\n raise KeyError(\"Please specify self.severity for %s.\" % self.check_name)\n return paranoid_pb2.TestResultsEntry(\n severity=self.severity, test_name=self.check_name, result=False)",
"def make_final_result(test_result, steps, begin_time):\n import time\n import pytest\n if pytest.current_exception:\n steps[-1].set_status(Status.FAILED, pytest.current_exception)\n test_failed = False\n for step in steps:\n test_result.add_step(step)\n if step.get_status() == Status.FAILED:\n print('%s: ' % str(step.get_id()) + constant.Color.FAIL +\n 'failed\\nMessage: ' + step.get_message() +\n constant.Color.ENDC)\n test_failed = True\n\n if not test_failed:\n test_result.set_test_passed()\n else:\n test_result.set_test_failed()\n\n test_result.set_duration(time.time() - begin_time)\n test_result.write_result_to_file()",
"def test_student_do_homework_positive():\n assert isinstance(result_1, HomeworkResult)",
"def _create_result(\n self, raw_data: Element, processed_data: List[TestGroupReport]\n ) -> GTestImportedResult:\n return GTestImportedResult(\n name=self.name,\n results=processed_data,\n description=self.description,\n )",
"def _get_result(self, test_result_file):\n\t\tresult = {}\n\n\t\txml_obj = xml.dom.minidom.parse(test_result_file)\n\t\tif not xml_obj.getElementsByTagName(\"completed\"):\n\t\t\tsys.stderr.write(\n\t\t\t\t\"File has empty result...removing %s\\n\" % test_result_file)\n\t\t\tos.remove(test_result_file)\n\t\t\treturn\n\n\t\tinca_resource = amass.xml_tag_value(xml_obj, \"resourceHostname\")\n\t\tresult[\"SOURCE_RESOURCE\"] = self._normalize_resource(inca_resource)\n\t\ttry:\n\t\t\tinca_resource = amass.xml_tag_value(xml_obj, \"targetHostname\")\n\t\t\tresult[\"TARGET_RESOURCE\"] = self._normalize_resource(inca_resource)\n\t\texcept:\n\t\t\tresult[\"TARGET_RESOURCE\"] = result[\"SOURCE_RESOURCE\"]\n\t\tresult[\"TEST_NAME\"] = amass.xml_tag_value(xml_obj, \"nickname\")\n\t\tresult[\"COLLECTED_DATE\"] = amass.string2datetime(amass.xml_tag_value(xml_obj, \"gmt\"))\n\t\tresult[\"RESULT\"] = None\n\t\terror = None\n\n\t\ttry:\n\t\t\terror = amass.xml_tag_value(xml_obj, \"errorMessage\")\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\tcr = amass.xml_tag_value(xml_obj, \"comparisonResult\")\n\t\t\tif cr == 'Success':\n\t\t\t\tresult[\"RESULT\"] = True\n\t\t\telse:\n\t\t\t\terror = cr if error is None else \"%s: %s\" % (cr, error)\n\t\t\t\tresult[\"RESULT\"] = False\n\t\texcept:\n\t\t\tcompleted = amass.xml_tag_value(xml_obj, \"completed\")\n\t\t\tif completed == 'true':\n\t\t\t\tresult[\"RESULT\"] = True\n\t\t\telse:\n\t\t\t\tresult[\"RESULT\"] = False\n\n\t\tif error:\n\t\t\terror.replace(\"'\", \"\")\n\t\tresult[\"ERROR_MSG\"] = error\n\n\t\treturn result",
"def run( self, test ):\n\n result = self._makeResult()\n test( result )\n result.printErrors()\n self.stream.writeln( result.separator2 )\n run = result.testsRun\n self.stream.writeln()\n\n if not result.wasSuccessful():\n self.stream.write( \"FAILED (\" )\n failed, errored = map( len, ( result.failures, result.errors ) )\n if failed:\n self.stream.write( \"failures=%d\" % failed )\n if errored:\n if failed: self.stream.write( \", \" )\n self.stream.write( \"errors=%d\" % errored )\n self.stream.writeln( \")\" )\n else:\n self.stream.writeln( \"OK\" )\n \n return result",
"def test_return_class_content_by_accepts(self,request,**kwargs):\n \n class TestReturn:\n \"\"\"Test return class\"\"\"\n def __init__(self):\n self.__t1 = 'Test'\n \n t1 = TestReturn()\n t1.test1 = 'Test1'\n \n t2 = TestReturn()\n t2.test2=\"Test2\"\n return (t1,t2)",
"def test_get_results_verbose(self):\n\t\tpass",
"def test_formatResult(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x.formatResult(3), '3')",
"def test_make_results_simple(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\ttest.do_run()\n\t\ttest.make_results_simple()\n\t\tobj_ut = test.results_simple\n\t\tself.assertEqual(obj_ut, {'.text id': '100', '.text score': -1, \n\t\t\t'total wordcount': 7, 'total hits': 2, 'pos hits': 0,\n\t\t\t'neg hits': 2})",
"def make_final_result(test_result, steps, begin_time, logger):\n import time\n from .constant import Colors\n from libraries.result import Status\n for step in steps:\n test_result.add_step(step)\n if step.get_status() == Status.FAILED:\n print('%s: ' % str(step.get_id()) + Colors.FAIL + 'failed\\nMessage: ' + step.get_message() + Colors.ENDC)\n test_result.set_test_failed()\n\n test_result.set_duration(time.time() - begin_time)\n test_result.write_result_to_file()\n logger.save_log(test_result.get_test_status())",
"def generateFinalResult(self):\n if self.__testResult == 'FAIL':\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'PASS':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'NONE':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY) \n self.__testResult = 'PASS'\n #else:\n total_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"total_count\", TestScriptSymbolTable.test_result_tab))\n pass_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"pass_count\", TestScriptSymbolTable.test_result_tab))\n fail_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"fail_count\", TestScriptSymbolTable.test_result_tab))\n conditional_chk_flag = int(TestScriptSymbolTable.get_value_from_sym_tab(\"conditional_chk_flag\", TestScriptSymbolTable.test_result_tab))\n num_of_pass_required = int(TestScriptSymbolTable.get_value_from_sym_tab(\"num_of_pass_required\", TestScriptSymbolTable.test_result_tab))\n \n if total_count >= 1:\n if conditional_chk_flag == 1:\n if num_of_pass_required <= pass_count:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n if fail_count > 0:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n if GlobalConfigFiles.curr_tc_name != \"\":\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n logging.debug(\"\\n TEST COMPLETED without FINAL RESULT...\")\n\n self.__testResult = 'FAIL'\n\n self.tmsPacket.TestResult = self.__testResult\n if GlobalConfigFiles.curr_tc_name != \"\":\n logging.info(\"\\n FINAL TEST RESULT ---> %15s\", self.__testResult)\n logging.info(' END: TEST CASE [%s]', GlobalConfigFiles.curr_tc_name)\n\n Util.set_color(Util.FOREGROUND_WHITE)\n GlobalConfigFiles.test_result = self.__testResult\n\n self.tmsPacket.TimeStamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())\n if GlobalConfigFiles.curr_tc_name != \"\":\n self.tmsPacket.writeTMSJson()\n\n return",
"def createMakingTest(tx, query, personId, testId, date, hour, result):\n tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)",
"def _get_result(driver):\n try: \n \tresult = oval.OVALResult(driver.request.title, driver.execute_tests())\n \tcurrent_app.logger.info(time.ctime() + \"\\tOVAL Result created for %s\" % result.title)\n\n \treturn result\n except oval.OVALDriverError as e:\n\tflash(str(e))",
"def generate_mock_result(project='TEST', repository=None, status='SUCCESS', success=True, run_id=1,\n timestamp=None):\n if not timestamp: # If no time provided, use right now.\n timestamp = str(int(time.time() * 1000))\n if not repository:\n repository = '{}-repo'.format(project.lower())\n result = dict(project=project, repository=repository, status=status, success=success, run_id=run_id,\n timestamp=timestamp, id='{}{}'.format(repository, run_id))\n return result",
"def _actionTestSetDetailsFromResult(self):\n from testmanager.core.testresults import TestResultData;\n from testmanager.core.testset import TestSetData;\n idTestResult = self.getIntParam(TestSetData.ksParam_idTestResult);\n oTestResultData = TestResultData().initFromDbWithId(self._oDb, idTestResult);\n return self._actionTestSetDetailsCommon(oTestResultData.idTestSet);",
"def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_rule_create_command_success_hr(rule_create_success, rule_create_success_hr):\n resp = prepare_rule_create_output(rule_create_success)\n assert resp == rule_create_success_hr",
"def test_importer_returns_tests():\n flowtask = FlowTaskFactory()\n flowtask.build_flow.build.org = OrgFactory()\n with temporary_dir() as output_dir:\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_with_failures.xml\",\n Path(output_dir) / \"output.xml\",\n )\n actual = robot_importer.import_robot_test_results(flowtask, output_dir)\n expected = [\n {\n \"name\": \"Passing test\",\n \"group\": \"Robot Fail\",\n \"status\": \"Pass\",\n \"start_time\": \"2020-06-23T18:49:20.955000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.956000+00:00\",\n \"exception\": \"Life is good, yo.\",\n \"doc\": \"\",\n \"tags\": [\"tag one\", \"tag two\"],\n },\n {\n \"name\": \"Failing test 1\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.957000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"exception\": \"Danger, Will Robinson!\",\n \"doc\": \"A test that fails with a keyword directly in the test\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 2\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:20.960000+00:00\",\n \"end_time\": \"2020-06-23T18:49:20.963000+00:00\",\n \"doc\": \"A test that fails due to a failure in a lower level keyword.\",\n \"exception\": \"I'm sorry, Dave. I'm afraid I can't do that.\",\n \"tags\": [],\n },\n {\n \"name\": \"Failing test 3\",\n \"group\": \"Robot Fail\",\n \"status\": \"Fail\",\n \"start_time\": \"2020-06-23T18:49:21.017000+00:00\",\n \"end_time\": \"2020-06-23T18:49:21.024000+00:00\",\n \"exception\": (\n \"Several failures occurred:\\n\\n\"\n \" 1) First failure\\n\\n\"\n \" 2) Second failure\"\n ),\n \"doc\": \"A test that has multiple keyword failures\",\n \"tags\": [],\n },\n ]\n assert actual == expected",
"def get_result(self) -> Any:\n ...",
"def parse_verifier_result(self):\n stat = self.get_verifier_result(self.verification_id)\n try:\n num_executed = stat['num_tests'] - stat['num_skipped']\n try:\n self.result = 100 * stat['num_success'] / num_executed\n except ZeroDivisionError:\n self.result = 0\n if stat['num_tests'] > 0:\n LOGGER.info(\"All tests have been skipped\")\n else:\n LOGGER.error(\"No test has been executed\")\n return\n\n with open(os.path.join(self.res_dir, \"rally.log\"),\n 'r', encoding='utf-8') as logfile:\n output = logfile.read()\n\n success_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} success ',\n output):\n success_testcases.append(match)\n failed_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} fail',\n output):\n failed_testcases.append(match)\n skipped_testcases = []\n for match in re.findall(r'.*\\{\\d{1,2}\\} (.*?) \\.{3} skip(?::| )',\n output):\n skipped_testcases.append(match)\n\n self.details = {\"tests_number\": stat['num_tests'],\n \"success_number\": stat['num_success'],\n \"skipped_number\": stat['num_skipped'],\n \"failures_number\": stat['num_failures'],\n \"success\": success_testcases,\n \"skipped\": skipped_testcases,\n \"failures\": failed_testcases}\n except Exception: # pylint: disable=broad-except\n self.result = 0\n\n LOGGER.info(\"Tempest %s success_rate is %s%%\",\n self.case_name, self.result)",
"def test_results(self):\n result = self.test_client._results\n\n assert isinstance(result, list)\n assert len(result) == 1"
] | [
"0.73380005",
"0.707614",
"0.6962716",
"0.68611425",
"0.67847985",
"0.671325",
"0.67094016",
"0.67094016",
"0.66369814",
"0.64441335",
"0.64401895",
"0.64096403",
"0.62658703",
"0.6241152",
"0.6234344",
"0.6229282",
"0.6228823",
"0.62096107",
"0.61986804",
"0.6181871",
"0.61169815",
"0.6110971",
"0.60708463",
"0.6070631",
"0.6025079",
"0.59909356",
"0.5985757",
"0.59823847",
"0.5978657",
"0.5977935"
] | 0.8006094 | 0 |
generate a list of viable coordinates for mines, and randomly choose them. | def generate_mines(self, number):
mine_locations = []
available_places = [[j, i]
for i in xrange(0, self.x) for j in xrange(0, self.y)]
while number > 0:
# the chosen coordinate for a mine is appended into the list and is
# removed from the list of choices to prevent duplicates.
choice = random.choice(available_places)
available_places.remove(choice)
mine_locations.append(choice)
number -= 1
return mine_locations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_coordinates():\n return Coordinates(random.randint(0, 14), random.randint(0, 14))",
"def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine",
"def _generate_mines(self):\r\n mines_left = self.mines\r\n while mines_left > 0:\r\n gen_row = random.randint(0, self.rows-1)\r\n gen_col = random.randint(0, self.cols-1)\r\n\r\n if not self.fields[gen_row][gen_col].mine:\r\n self.fields[gen_row][gen_col].mine = True\r\n self._increment_fields_values(gen_row, gen_col)\r\n self.mines_cords.append((gen_row, gen_col))\r\n mines_left -= 1",
"def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island",
"def gen_placecells(self, min_spread=0.2):\r\n\r\n N = None\r\n num_tries = 1000 # a limit on the number of attempts to place a new placecell\r\n\r\n # assign random x,y locations to each neuron\r\n locations = [self.random_location()]\r\n while True:\r\n # generate a random new point\r\n new_loc = self.random_location()\r\n\r\n # check that the point isn't too close to previous points\r\n count = 0\r\n while min([self.calc_dist(new_loc, l) for l in locations]) < min_spread and count < num_tries:\r\n new_loc = self.random_location()\r\n count += 1\r\n\r\n # add the new point\r\n locations += [new_loc]\r\n\r\n if (N == None and count >= num_tries) or len(locations) == N:\r\n # stop when required number of place cells built (if N specified),\r\n # or when world has been decently filled\r\n break\r\n\r\n return locations",
"def generate_available_position(unavailable_positions, max_position):\n\n x = randint(0, max_position)\n y = randint(0, max_position)\n position = (x, y)\n while position in unavailable_positions:\n x = randint(0, max_position)\n y = randint(0, max_position)\n position = (x, y)\n\n return position",
"def generateMines(num_rows, num_cols, num_mines):\n arr = np.random.permutation(num_rows * num_cols)\n return arr[:num_mines]",
"def place_mines(board_size, num_mines):\n mines_placed = 0\n board = np.zeros((board_size, board_size), dtype=int)\n while mines_placed < num_mines:\n rnd = randint(0, board_size * board_size)\n x = int(rnd / board_size)\n y = int(rnd % board_size)\n if is_valid(x, y):\n if not is_mine(board, x, y):\n board[x, y] = MINE\n mines_placed += 1\n return board",
"def random_position():\n path = (\n os.path.dirname(__file__)\n + os.sep\n + \"templates\"\n + os.sep\n + \"data\"\n + os.sep\n + \"taxi_stations.json\"\n )\n with open(path) as f:\n stations = json.load(f)[\"features\"]\n pos = random.choice(stations)\n coords = [pos[\"geometry\"][\"coordinates\"][1], pos[\"geometry\"][\"coordinates\"][0]]\n lat = float(\"{0:.6f}\".format(coords[0]))\n lng = float(\"{0:.6f}\".format(coords[1]))\n return [lat, lng]",
"def random_positions(mini, maxi):\n x_cord = (maxi - mini)*np.random.random(SIZE) + mini\n y_cord = (maxi - mini)*np.random.random(SIZE) + mini\n return np.column_stack([x_cord, y_cord])",
"def __get_random_hotspot(self):\n x_min = self.occupancy_map.info.origin.position.x\n x_max = x_min + self.occupancy_map.info.width * self.occupancy_map.info.resolution\n y_min = self.occupancy_map.info.origin.position.y\n y_max = y_min + self.occupancy_map.info.height * \\\n self.occupancy_map.info.resolution\n # This might bes a bit strange, but we have the following problem:\n # some simulators need a square version of the same map. A square version\n # will have other x_max or y_max and thus the random hotspots will be different.\n # TO prevent this, we will always take only the max value of either x_max or y_max.\n # This will be the same for the square version and the not-square version (of the same map).\n max_value = max(x_max, y_max)\n\n # search for a not occupied position\n while True:\n # previously: x = random.uniform(x_min, x_max) # see problem description above\n x = random.uniform(x_min, max_value)\n # previously: y = random.uniform(y_min, y_max) # see problem description above\n y = random.uniform(y_min, max_value)\n # due to the workaround for the problem above, it can be that the value is out\n # of map for the not square map version. We need to skip this (the square\n # map version will skip it due to occupied cell...):\n if x <= x_max and y <= y_max:\n cell_x = min(int(\n (x - x_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.width - 1)\n cell_y = min(int(\n (y - y_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.height - 1)\n if not self.__cell_is_occupied(cell_x, cell_y):\n break\n spread = random.uniform(0.5, 1.0)\n return (x, y, spread)",
"def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations",
"def choose_next(self, round):\n return random.choice(self.possible_coords)",
"def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )",
"def generate_mine_map(width=30, height=16, num_mines=99):\n\n if num_mines > width * height:\n print(\"The number of mines exceeds the size of the board.\")\n return\n \n mine_map = [[False for i in range(width)] for j in range(height)]\n mines = 0\n while mines < num_mines:\n x = random.randint(0, width-1)\n y = random.randint(0, height-1)\n if not mine_map[y][x]:\n mine_map[y][x] = True\n mines += 1\n\n return mine_map",
"def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)",
"def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))",
"def random_gps_gen_from_range(s_lat,n_lat, e_lon, w_lon):\n #print(s_lat, n_lat, e_lon, w_lon)\n latitude = random.uniform(s_lat, n_lat)\n longitude = random.uniform(e_lon, w_lon)\n return latitude, longitude",
"def spawn(self):\n (x_coord, y_coord) = (0, 0)\n grid_x = SCREEN_X // self.size\n grid_y = SCREEN_Y // self.size\n while x_coord < EDGE + 5 or x_coord > SCREEN_X - self.size - EDGE - 5:\n x_coord = random.randrange(grid_x) * self.size\n while y_coord < EDGE + 5 or y_coord > SCREEN_Y - self.size - EDGE - 5:\n y_coord = random.randrange(grid_y) * self.size\n return (x_coord, y_coord)",
"def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]",
"def __new_position(self):\n iterables = [range(self.size_x), range(self.size_y)]\n points = [] # Save all points in size.\n for point in itertools.product(*iterables):\n points.append(point)\n\n current_points = [] # Save used points.\n for object in self.objects:\n if (object.x, object.y) not in current_points:\n current_points.append((object.x, object.y))\n\n for point in current_points:\n points.remove(point) # Remove all used points.\n\n location = np.random.choice(a=range(len(points)), replace=False)\n return points[location]",
"def random_pose(self):\n position = self._start\n while self[position].distance < np.sum(self._rooms.shape) * 2:\n position = np.array(\n [random.randrange(limit) for limit in self._rooms.shape]\n )\n direction = random.choice(self.exits(position))\n return (position, direction)",
"def get_random_location(self):\n max_x, max_y, max_z, min_x, min_y, min_z = self.get_max_and_min()\n if max_x == float('-inf') and min_x == float('inf') and max_y == float('-inf') and min_y == float('inf') and \\\n max_z == float('-inf') and min_z == float('inf'):\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n return ans\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n counter += 1\n x = random.uniform(max_x, min_x)\n y = random.uniform(max_y, min_y)\n z = random.uniform(max_z, min_z)\n if counter == 0: # means all nodes doesn't have any location\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n else:\n ans = x, y, z\n return ans",
"def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1",
"def list_of_positions():\n positions = []\n while len(positions) != 20:\n x = random.randrange(0, 20)\n y = random.randrange(0, 20)\n if (x, y) not in positions:\n positions.append((x, y))\n return positions",
"def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()",
"def random_coords(bounds):\n x_min, y_min, x_max, y_max = bounds\n x = np.random.randint(x_min, x_max)\n y = np.random.randint(y_min, y_max)\n return x, y",
"def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")",
"def _add_mines(self, mines_count: int, size: int, excluded: int):\n self.mines = random.sample(range(size), mines_count)\n excluded_list = [excluded]\n not_is_left = excluded % self.columns > 0\n not_is_rigth = excluded % self.columns < self.columns - 1\n if excluded // self.rows > 0:\n if not_is_left:\n excluded_list.append(excluded - self.columns - 1)\n excluded_list.append(excluded - self.columns)\n if not_is_rigth:\n excluded_list.append(excluded - self.columns + 1)\n if not_is_left:\n excluded_list.append(excluded - 1)\n if not_is_rigth:\n excluded_list.append(excluded + 1)\n if excluded // self.rows < self.rows - 1:\n if not_is_left:\n excluded_list.append(excluded + self.columns - 1)\n excluded_list.append(excluded + self.columns)\n if not_is_rigth:\n excluded_list.append(excluded + self.columns + 1)\n for el in excluded_list:\n try:\n index = self.mines.index(el)\n if index >= 0:\n new_value = random.randint(0, size - 1)\n while new_value in self.mines or new_value in excluded_list:\n new_value = random.randint(0, size - 1)\n self.mines[index] = new_value\n except ValueError:\n # index method throws ValueError exception if the value is not in the list\n pass\n for mine in self.mines:\n row = mine // self.columns\n column = mine % self.columns\n self._add_bomb(row, column)",
"def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]"
] | [
"0.6889577",
"0.68553376",
"0.6830914",
"0.6830659",
"0.66476494",
"0.64785975",
"0.64664406",
"0.646031",
"0.6445049",
"0.6439096",
"0.6436462",
"0.64131314",
"0.6397925",
"0.6376756",
"0.6361325",
"0.6299357",
"0.62326545",
"0.6220271",
"0.6188136",
"0.61843365",
"0.615677",
"0.6156487",
"0.61557424",
"0.6139877",
"0.6122892",
"0.61216825",
"0.61072123",
"0.6080504",
"0.6072965",
"0.6072531"
] | 0.7681125 | 0 |
Open neighbours if the flag number matches the count. | def special_open_neighbours(self, y, x):
if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x):
l = [[ye, xe] for xe in range(
x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]
for ye, xe in l:
if xe >= self.x or ye >= self.y: # do not open out of bounds
continue
# if it is a bomb but not flagged
if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG:
self.show_answer_board([ye, xe])
print "KABOOM!"
return Minesweeper.IS_A_BOMB
self.open_neighbours(y, x)
self.print_table(self.table_state)
return Minesweeper.NOT_A_BOMB | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_open(self, n_faces):\r\n count_used = Counter([item for sublist in self.tiles\r\n for item in sublist\r\n if item in self.get_borders()])\r\n if min(count_used.values()) == n_faces:\r\n self.open = False",
"def checkNumNeighbors():",
"def count_neighbor_flags(self, i, j):\n return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])",
"def count_neighbor_flags(self, x, y):\n\t\treturn sum(self.marks[n][m] == FLAG for (n, m) in self.get_valid_neighbors(x, y))",
"def open_neighbours(self, y, x):\n if [y, x] in self.mine_locations:\n return [y, x]\n # generate neighbours with positive indexes\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n # if the indexes are out of the game table, skip\n if xe >= self.x or ye >= self.y:\n continue\n # if the current coordinates are still untouched, update their values\n if self.table_state[ye][xe] == '-':\n self.table_state[ye][xe] = self.final_table[ye][xe]\n # if the coordinate has a value of 0, recursively open it's neighbours.\n if self.final_table[ye][xe] == '0':\n self.open_neighbours(ye, xe)",
"def _open_zeros(self, display: bool = False) -> None:\n\n if display:\n print('Before \"Open Zeros\":')\n print(repr(self), \"\\n\")\n\n for pos, space in self._lookup.items():\n if space.hint == '0':\n for neighbor in space.neighbors.values():\n if neighbor and self._lookup[neighbor].hint == '?':\n self._open(*neighbor)\n if display:\n print('After \"Open Zeros\":')\n print(repr(self), \"\\n\")",
"def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board",
"def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board",
"def flags_nearby(self, y, x):\n count = 0\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y:\n continue\n if self.table_state[ye][xe] == Minesweeper.FLAG:\n count += 1\n return str(count)",
"def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count",
"def count_islands(grid):\n grid_copy = list(grid)\n count = 0\n for i in range(0, len(grid_copy)):\n for j in range (0, len(grid_copy[0])):\n if grid[i][j] and grid_copy[i][j]:\n _dfs(grid_copy, i, j)\n count += 1\n return count",
"def add_to_open(open, neighbour):\n for node in open:\n if neighbour == node and neighbour.f >= node.f:\n # Will not add if there already exists the same node in open that has lower f value\n return False\n\n return True",
"def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total",
"def next_alive(neighbors, occupants):\n return bool((occupants == 0 and neighbors == 3) or\n (occupants == 1 and 2 <= neighbors <= 3))",
"def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)",
"def n_neighbors(self,n):\n return sum(1 for x in self.hex.get_neighbors_ring(n) if x is not None and x.is_occupied == 1)",
"def checkNeighbours(data):\n features = 0\n background = 0\n neighbours = [data[0,0],data[0,1],data[0,2],data[1,2],data[2,2],data[2,1],data[2,0],data[1,0]]\n fourConnected = False\n lastPoint = neighbours[-1] #Needed for checking a complete transition cycle\n for n in neighbours:\n if not n:\n features += 1\n elif fourConnected:\n background += 1\n\n fourConnected = not fourConnected\n lastPoint = n\n\n for pos,corner in enumerate(corners):\n if numpy.alltrue(data == corner):\n cornerPos = pos+1\n break\n else:\n cornerPos = 0\n return (features,background,cornerPos)",
"def all_bees_raised_flag(self):\n pos, com, success = self.perception\n if len(pos) > 0:\n return all(map(lambda x: x[1][\"flag\"] == (self.nr_of_possible_neighbors + 1), com))\n else:\n return True",
"def _check_satisfied_neighborhood(\n recursive_counter: int, stop_recursive: int, matrix_size: int\n) -> bool:\n return recursive_counter >= stop_recursive * (matrix_size ** 2)",
"def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()",
"def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands",
"def _is_valid_count(self, count: int, gp: GriddedPerm) -> bool:\n return self._point_in_fuse_region(gp) + 1 == count",
"def neighbors_of_8(mapdata, x, y):\n eight_neigh = set()\n # if PathPlanner.is_cell_walkable(mapdata, x, y) == True:\n eight_neigh |= PathPlanner.neighbors_of_4(mapdata,x,y)\n if (PathPlanner.is_cell_walkable(mapdata, x+1, y+1)):\n eight_neigh |= {(x+1,y+1)}\n if (PathPlanner.is_cell_walkable(mapdata, x-1, y-1)):\n eight_neigh |= {(x-1,y-1)}\n if (PathPlanner.is_cell_walkable(mapdata, x+1, y-1)):\n eight_neigh |= {(x+1,y-1)}\n if (PathPlanner.is_cell_walkable(mapdata, x-1, y+1)):\n eight_neigh |= {(x-1,y+1)} \n\n return eight_neigh",
"def open(self, xy):\n if xy in self.opened:\n return\n \n self.opened.add(xy)\n if xy in self._mines:\n self.mines_near[xy] = 'mine'\n self.flag(xy) # simplifies playing after death logic\n self.lose()\n else:\n self.mines_near[xy] = len(self.neighbours[xy] & self._mines)\n self.flagged.discard(xy)\n self.empty_remaining -= 1\n if self.empty_remaining <= 0:\n self.win()",
"def get_neighbors(self, cell, count):\n row, col = cell\n # get all the neighbors\n neighbors = set([(min(self.height - 1, max(row + i, 0)), min(self.width - 1, max(col + j, 0))) \n for i in range(-1, 2)\n for j in range(-1, 2)])\n\n for neighbor in deepcopy(neighbors):\n if neighbor in self.safes or neighbor == cell:\n neighbors.remove(neighbor)\n elif neighbor in self.mines:\n neighbors.remove(neighbor)\n count -= 1\n\n return neighbors, count",
"def add_neighbor(self):\n self.fono += 1",
"def open_spots(self):\n ret = []\n for i in range(1,25):\n if self.nodes[i].piece == None:\n ret.append(i)\n return ret",
"def open_adjacents(self, row, col, opened_tile): \n # Iterates through neighboring tiles, only opening closed tiles adjacent to a zero tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.tiles[i][j].category == Tiles.closed):\n self.opened += 1\n self.tiles[i][j] = self.board[i][j]\n opened_tile.append(self.board[i][j])\n\n # Checks for a game winning move while opening adjacent tiles.\n if (self.opened + self.mines) == (self.rows * self.cols):\n self.game_won = True\n\n # If an adjacent tile is zero, recursively opens that tile's adjacent tiles.\n if self.board[i][j].category == Tiles.zero:\n self.open_adjacents(i, j, opened_tile)\n\n return opened_tile",
"def propagate(possible: np.array, count: ma.array, where: ma.array) -> int:\n while np.equal(count, 1, out=where).any():\n i, j = _neighbors[:, where, :]\n _, k = possible[where, :].nonzero()\n possible[i, j, k[:, np.newaxis]] = False\n if not possible.sum(axis=2, out=count).all():\n return -1 # site with 0 possibility => infeasibility\n count[where] = ma.masked # avoid repetitive work\n return count.count()",
"def count_paths((min_i, min_j), (max_i, max_j)):\n\n def explore((i, j), path):\n found = 0\n for (x, y) in neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if (x, y) == (max_i, max_j):\n found += 1\n debug(\"neighbor %r of node %r on path %r is a goal node: +1\" % ((x, y), (i, j), path))\n elif (x, y) in path: \n debug(\"neighbor %r of node %r already on path %r; ignoring...\" % ((x, y), (i, j), path))\n continue\n else:\n debug(\"neighbor %r of node %r not already on path %r; exploring ...\" % ((x, y), (i, j), path))\n found += explore((x, y), mkpath(path, (x, y)))\n return found\n return explore((0, 0), set([(0, 0)]))"
] | [
"0.6551041",
"0.6264699",
"0.6198644",
"0.6008779",
"0.59324574",
"0.58290005",
"0.58284974",
"0.58284974",
"0.58235085",
"0.5753008",
"0.57382154",
"0.5685492",
"0.56439877",
"0.5639313",
"0.56212676",
"0.5600274",
"0.55936295",
"0.55705774",
"0.55526507",
"0.5544796",
"0.55295527",
"0.5505192",
"0.54728234",
"0.5460384",
"0.5455767",
"0.5454988",
"0.54543024",
"0.5449874",
"0.5410083",
"0.5405375"
] | 0.65664035 | 0 |
Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0. | def open_neighbours(self, y, x):
if [y, x] in self.mine_locations:
return [y, x]
# generate neighbours with positive indexes
l = [[ye, xe] for xe in range(
x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]
for ye, xe in l:
# if the indexes are out of the game table, skip
if xe >= self.x or ye >= self.y:
continue
# if the current coordinates are still untouched, update their values
if self.table_state[ye][xe] == '-':
self.table_state[ye][xe] = self.final_table[ye][xe]
# if the coordinate has a value of 0, recursively open it's neighbours.
if self.final_table[ye][xe] == '0':
self.open_neighbours(ye, xe) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board",
"def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board",
"def special_open_neighbours(self, y, x):\n if self.table_state[y][x] != \"-\" and self.table_state[y][x] == self.flags_nearby(y, x):\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y: # do not open out of bounds\n continue\n # if it is a bomb but not flagged\n if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG:\n self.show_answer_board([ye, xe])\n print \"KABOOM!\"\n return Minesweeper.IS_A_BOMB\n self.open_neighbours(y, x)\n self.print_table(self.table_state)\n return Minesweeper.NOT_A_BOMB",
"def open_adjacents(self, row, col, opened_tile): \n # Iterates through neighboring tiles, only opening closed tiles adjacent to a zero tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.tiles[i][j].category == Tiles.closed):\n self.opened += 1\n self.tiles[i][j] = self.board[i][j]\n opened_tile.append(self.board[i][j])\n\n # Checks for a game winning move while opening adjacent tiles.\n if (self.opened + self.mines) == (self.rows * self.cols):\n self.game_won = True\n\n # If an adjacent tile is zero, recursively opens that tile's adjacent tiles.\n if self.board[i][j].category == Tiles.zero:\n self.open_adjacents(i, j, opened_tile)\n\n return opened_tile",
"def _open_zeros(self, display: bool = False) -> None:\n\n if display:\n print('Before \"Open Zeros\":')\n print(repr(self), \"\\n\")\n\n for pos, space in self._lookup.items():\n if space.hint == '0':\n for neighbor in space.neighbors.values():\n if neighbor and self._lookup[neighbor].hint == '?':\n self._open(*neighbor)\n if display:\n print('After \"Open Zeros\":')\n print(repr(self), \"\\n\")",
"def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list",
"def check_neighbours(matrix, cur_pos, visited):\n visited[cur_pos[0]][cur_pos[1]] = True\n\n for i in range(num_of_neighbours):\n cur_neighbour = (cur_pos[0]+neighbours_positions[i][0], cur_pos[1]+neighbours_positions[i][1])\n if is_safe(matrix, cur_neighbour, visited):\n check_neighbours(matrix, cur_neighbour, visited)",
"def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors",
"def _dfs(grid, i, j):\n grid[i][j] = False\n for x in range(i - 1, i + 2):\n for y in range(j - 1, j + 2):\n if (abs((x + y) - (i + j)) == 1) and _is_valid_land(x, y, grid):\n _dfs(grid, x, y)",
"def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False",
"def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()",
"def draw_open_cells(self):\n empty_cells = [cell for cell in self.game.get_cells() if cell.player == 0]\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=empty_cells,\n edgecolors='black', node_color='white', linewidths=2)",
"def dfs(cell):\n r, c = cell\n if (0 <= r < len(grid)) and (0 <= c < len(grid[0])) and (cell not in visited) and (grid[r][c] != 0):\n\n visited.add((r, c)) # save cell\n grid[r][c] = self.num_islands\n # update current island size\n dfs((r, c+1))\n dfs((r+1, c))\n dfs((r-1, c))\n dfs((r, c-1))\n\n else:\n # out of bounds or visited\n return",
"def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total",
"def neighbors(self, x):\n pass",
"def open(self, i, j):\n if not self.isOpen(i, j):\n # set open to true\n self.arr_open[self._index(i, j)] = True\n # connect to surrounding sites\n [self.qu.union(self._index(i, j), self._index(x[0], x[1]))\n for x in [(i + 1, j), (i - 1, j), (i, j - 1), (i, j + 1)]\n if self.isOpen(x[0], x[1])]",
"def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]",
"def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos",
"def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours",
"def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors",
"def fn(i, j, empty):\n nonlocal ans \n if grid[i][j] == 2: \n if empty == -1: ans += 1\n return \n grid[i][j] = -1 # mark as visited \n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n and grid[ii][jj] != -1: \n fn(ii, jj, empty-1)\n grid[i][j] = 0 # backtracking",
"def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)",
"def graph_search(problem, open_nodes):\n explored = [problem.initial]\n open_nodes.append(Node(problem.initial))\n while len(open_nodes) > 0:\n node = open_nodes.pop()\n if problem.goal_test(node.state):\n #print \"Path cost: %d\" % node.path_cost\n print 'Broj poteza: ' + str(len(node.solution())-1)\n return node.solution()\n for child in node.expand(problem):\n if child.state not in explored:\n open_nodes.append(child)\n explored.append(child.state)\n return None",
"def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n",
"def neighbors(current_node, maze):\n UP, DOWN, LEFT, RIGHT = -1, 1, -1, 1\n neighbors = []\n pos = [(0, UP), (0, DOWN), (LEFT, 0), (RIGHT, 0)]\n diag = [(LEFT, UP), (RIGHT, DOWN), (LEFT, DOWN), (RIGHT, UP)]\n if not args.disable_diagonal:\n pos += diag\n for new_position in pos:\n node_position = (\n current_node.position[0] + new_position[0],\n current_node.position[1] + new_position[1],\n ) \n # range check\n if (\n node_position[0] > (len(maze) - 1)\n or node_position[0] < 0\n or node_position[1] > (len(maze[node_position[0]]) - 1)\n or node_position[1] < 0\n ):\n continue\n # wall check\n if new_position in diag:\n if (\n maze[current_node.position[0]][current_node.position[1] + new_position[1]] == 0 \n and maze[current_node.position[0] + new_position[0]][current_node.position[1]] == 0\n ):\n continue\n if maze[node_position[0]][node_position[1]] == 0:\n continue\n new_node = Node(node_position)\n # g is how the cost of the step\n if new_position[0] != 0 and new_position[1] != 0:\n new_node.g = current_node.g + 1.44\n else:\n new_node.g = current_node.g + 1\n new_node.parent = current_node\n neighbors.append(new_node)\n return neighbors",
"def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor",
"def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1",
"def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]",
"def get_unvisited_neighbours(self, grid):\n\t\tfor neighbour in self.get_neighbours(grid):\n\t\t\tif not neighbour.visited:\n\t\t\t\tyield neighbour",
"def Explore(self,x,y):\n if [x,y]==self.MazeKey and self.State:\n self.State=False\n return self.Path\n\n if [x,y]==self.CurrentCell and not self.State:\n return self.Path\n \n if not self.North[x][y] and [x,y+1] not in self.Path: #If false and not in the explore path so far.\n self.track.push([x,y],self.FootPrints)\n self.track.push([x,y+1],self.Path)\n return self.Explore(x,y+1)\n \n elif not self.East[x][y] and [x+1,y] not in self.Path:\n self.track.push([x,y],self.FootPrints)\n self.track.push([x+1,y],self.Path)\n return self.Explore(x+1,y)\n \n elif not self.South[x][y] and [x,y-1] not in self.Path:\n self.track.push([x,y],self.FootPrints)\n self.track.push([x,y-1],self.Path)\n return self.Explore(x,y-1)\n\n elif not self.West[x][y] and [x-1,y] not in self.Path:\n self.track.push([x,y],self.FootPrints)\n self.track.push([x-1,y],self.Path)\n return self.Explore(x-1,y)\n else:\n prev=self.track.pop(self.FootPrints)\n x=prev[0]\n y=prev[1]\n self.track.push(prev,self.Path)\n return self.Explore(x,y)"
] | [
"0.6919871",
"0.6919871",
"0.68459386",
"0.6506167",
"0.64691454",
"0.615098",
"0.61087185",
"0.6104565",
"0.60708123",
"0.60645074",
"0.60544246",
"0.5988625",
"0.592371",
"0.5901404",
"0.5862205",
"0.5847411",
"0.5810795",
"0.5798435",
"0.5754326",
"0.57496643",
"0.5704948",
"0.5691",
"0.5636125",
"0.56204915",
"0.56139606",
"0.5602924",
"0.5596358",
"0.5587375",
"0.55641055",
"0.5562893"
] | 0.75434107 | 0 |
come here when the coordinates do not have a bomb. update the table_state with the selected coordinate. | def tease_user(self, y, x):
self.table_state[y][x] = self.final_table[y][x]
# if there are no neighbouring 0s, open neighbours
if self.table_state[y][x] == '0':
self.open_neighbours(y, x)
self.print_table(self.table_state) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click_cell(self, event):\n if (self.world_setable):\n x, y = event.x, event.y\n row = y / self.cell_size\n col = x / self.cell_size\n if ((row in range(self.cell_row)) and\n (col in range(self.cell_col))):\n status_now = not self.world_status.now[row, col]\n if (status_now):\n color = self.color_alive\n else:\n color = self.color_dead\n item_id = self.world[row, col]\n self.canvas.itemconfig(item_id, fill=color)\n self.world_status.now[row, col] = status_now\n self.world_status.next = self.world_status.now.copy()\n self.init_world = self.world_status.now.copy()",
"def create_foothold(self):\n sel = self.selected()\n cell = sel[0]\n if cell.contents == Contents.bomb:\n cell.contents = Contents.empty\n for adj in cell.get_adjacent():\n if adj.contents == Contents.bomb:\n adj.contents = Contents.empty\n self.set_bomb_contacts()",
"def mouseClick(self, event):\n if self.editMode:\n self.applyEditing(event)\n self.clearEditCursor(event)\n return\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if self.checkFree(x, y) == self.colors['busy']:\n return # clicked busy position\n self.onBoard += 1\n self.refreshScore()\n self.history.append((\n self.setBusy(x, y),\n self.addPentomino(x, y)\n ))\n if self.onBoard == self.expectedBest:\n self.gameOver()",
"def update_state(self):\n self.reset_state()\n for piece in self.pieces:\n coordinates = piece.get_block_positions()\n for coor in coordinates:\n x, y = coor\n self.state[y][x] = piece",
"def table_move_update():\n pos = self.variables.table.get_current_position()\n self.table_move_ui.x_move.setProperty(\"value\", int(pos[0]))\n self.table_move_ui.y_move.setProperty(\"value\", int(pos[1]))\n self.table_move_ui.z_move.setProperty(\"value\", int(pos[2]))",
"def change_cell(self, event):\n try:\n (x, y) = self.get_id_from_coor(event.x, event.y)\n if self._board[x][y]:\n self._board[x][y] = False\n else:\n self._board[x][y] = True\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)\n except KeyError:\n pass # tkinter bug",
"def pick(self, obj_height_from_table):\n init_x = self.x\n init_y = self.y\n init_z = self.z\n obj_z = self.table_z + obj_height_from_table*self.disk_height\n \n #open gripper\n self.gripper.command_position(100)\n \n #drop to given height\n self.move_to(init_x, init_y, obj_z)\n \n #close gripper\n self.gripper.command_position(0)\n \n #return to initial position\n self.move_to(init_x, init_y, init_z)",
"def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))",
"def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]",
"def flag(self, y, x):\n if self.table_state[y][x] == '-':\n self.table_state[y][x] = Minesweeper.FLAG\n Minesweeper.print_table(self.table_state)",
"def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0",
"def update_board(self, coordinate, hit):\n \n if hit:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"H\"\n else:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"M\"",
"def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())",
"def _set_state_coordinates(atomic_entity, width, height):\n state_entity = atomic_entity.get(\"children\")[0]\n parent_coor = atomic_entity[\"coordinates\"]\n state_entity[\"coordinates\"] = {\n \"x\": parent_coor[\"x\"] + (parent_coor[\"width\"] - width) / 2,\n \"y\": parent_coor[\"y\"] - (height / 2),\n \"width\": width,\n \"height\": height,\n }",
"def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass",
"def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()",
"def update_cell(self, x, y, value):\n x1, y1 = self.transpose_coordinates(x, y)\n if self.is_in_field(x1, y1):\n self._cells[y1][x1] = value\n return True\n return False",
"def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()",
"def _check_event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.waiting = not self.waiting\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.waiting:\n x,y = pygame.mouse.get_pos()\n cell_addr_y = int(y/self.cell_width)\n cell_addr_x = int(x/self.cell_width)\n self.cells[cell_addr_y][cell_addr_x].update()",
"def check_position():\n if self.variables.table:\n pos = self.variables.table.get_current_position()\n position_update()",
"def move(self):\r\n if self.d == 'NORTH' and (self.y + 1) <= table_max_y:\r\n self.y += 1\r\n elif self.d == 'EAST' and (self.x + 1) <= table_max_x:\r\n self.x += 1\r\n elif self.d == 'SOUTH' and (self.y - 1) >= 0:\r\n self.y -= 1\r\n elif self.d == 'WEST' and (self.x - 1) >= 0:\r\n self.x -= 1\r\n else:\r\n print(\"Edge of Table Reached!\")",
"def setUpBombs(self, event):\n pos = (event.widget.row * self.cols) + event.widget.col\n size = self.rows * self.cols\n \n #get a list random indexes in range to be mines\n mines = random.sample(range(size), self.numMines)\n if pos in mines:\n mines.remove(pos)\n temp = random.sample(range(size), 1)[0]\n while (temp == pos): temp = random.sample(range(size), 1)[0]\n mines.append(temp)\n \n #mark all mine squares as mines\n for mine in mines:\n targetRow = int(mine/self.cols)\n targetCol = mine % self.cols\n self.tiles[targetRow][targetCol].setMine()\n\n #calculate the number in each Square of the current game\n for row in self.tiles:\n for tile in row:\n if not tile.isMine():\n counter = 0\n for adjTile in self.getAdjacentTiles(tile.row,tile.col):\n if adjTile.isMine(): counter += 1\n tile.setCount(counter)\n \n self.minesArmed = True\n self.startTime = time.time()\n return 1",
"def result(self, state, action):\n\n # blank is the index of the blank square\n blank = self.find_blank_square(state)\n new_state = list(state)\n\n delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}\n neighbor = blank + delta[action]\n new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]\n\n return tuple(new_state)",
"def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True",
"def flag_cell(self, event):\n if self.mineboard.gamestate is None:\n x = (event.x-2) // CELLWIDTH\n y = (event.y-2) // CELLWIDTH\n self.mineboard.flag_cell(y, x)\n self.update_cells()\n mines_rem = self.mineboard.minecount - self.mineboard.flagcount\n # updates the mines_left label\n if mines_rem == 1:\n self.mines_left.set(f\"{mines_rem} mine left\")\n else:\n self.mines_left.set(f\"{mines_rem} mines left\")",
"def setBusy(self, x, y):\n changes = []\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n changes.append((new_x, new_y))\n self.gridBusy[new_x][new_y] = self.onBoard\n self.correctPending()\n return changes",
"def update(self, new_state):\n\n if self.update_animation:\n self.canvas.delete(self.agent)\n row, col = new_state\n x1 = col * self.GRID_ROW_HEIGHT\n y1 = row * self.GRID_ROW_HEIGHT\n self.agent = self.canvas.create_image(x1 + self.GRID_ROW_HEIGHT / 2, y1 + self.GRID_ROW_HEIGHT / 2,\n image=self.penguin)",
"def update_state_v1(self, dbsession, state):\n moving_player = self.active_player()\n board = self.update(state)\n table_game = dbsession.query(TableGame).filter(\n TableGame.game == board.id).first()\n table_board = TableBoard(\n board_state=dumps(tuple(map(tuple, board.board))),\n move_num=board._board.move_count,\n player=board.active_player(),\n game=board.id)\n if table_game: # TODO(grandquista)\n table_board.game_link.append(table_game)\n dbsession.add(table_board)\n if board:\n board.poke_player(False)\n return {'end': False}\n board.poke_player(True, moving_player)\n if board._board.has_kings():\n table_game.one_won = False\n table_game.two_won = False\n elif moving_player == table_game.player_one:\n table_game.two_won = False\n else:\n table_game.one_won = False\n board.close()\n return {'end': True}",
"def in_cell(self):\n for player in self.players:\n for cell in self.cell_lst:\n if player.x in cell[0] and player.y in cell[1]:\n player.current_cell = cell\n break",
"def select_small_cell(self, event):\n row = (self.small_yoffset - event.y) / self.small.height\n row = fmsgeom.Small.nrows() - 1 - row\n if event.x > self.xoffset:\n column = (event.x - self.xoffset) / self.small.width\n else:\n column = (self.xoffset - event.x) / self.small.width\n detector = NORTH_SMALL\n if event.x > self.xoffset:\n detector = SOUTH_SMALL\n cell = self.detectors[detector].get_cell(row, column)\n if cell is not None:\n d = dialog.CellDialog(self.tkroot,\n self.dialog_title(cell), cell)\n # Update the overall programme modified state.\n self.modified = d.modified or self.modified\n self.canvas.focus_set()"
] | [
"0.61604476",
"0.57963514",
"0.5728119",
"0.5681672",
"0.5650815",
"0.5578061",
"0.55363643",
"0.55130213",
"0.548478",
"0.5473019",
"0.54568815",
"0.54379356",
"0.5364507",
"0.5332647",
"0.53126705",
"0.5300106",
"0.52811605",
"0.5278566",
"0.52776027",
"0.5275714",
"0.5265053",
"0.5261587",
"0.5239645",
"0.5238544",
"0.5236494",
"0.5228478",
"0.5191042",
"0.51905423",
"0.518508",
"0.51773024"
] | 0.6044062 | 1 |
Method that check if file at provided url exist. | def file_exist(file_url):
try:
response = requests.head(file_url)
if 200 <= response.status_code < 300:
return True
return False
except ConnectionError:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _url_exists(self, url):\n return url_exists(url)",
"def exists(self, url):\n return (self.base_path / url).exists()",
"def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False",
"def _url_exists(url):\n h = httplib2.Http()\n try:\n resp = h.request(url, 'HEAD')\n if resp[0].status == 200:\n return True\n except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):\n return False",
"def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False",
"def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False",
"def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False",
"def exists(self, url):\n url = urlparse.urlparse(url)\n connection = httplib.HTTPConnection(url.hostname, timeout=CONNECTION_TIMEOUT)\n \n try:\n connection.request(\"HEAD\", url.geturl())\n response = connection.getresponse()\n except:\n return False\n \n if str(response.status)[0] not in [\"2\", \"3\"]:\n return False\n \n connection.close()\n return True",
"def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok",
"def url_exists(url):\r\n from urllib import parse\r\n res = parse.urlparse(url)\r\n if res.scheme == 'gs':\r\n # blob_name has no '/' prefix\r\n bucket_name, blob_name = res.netloc, res.path[1:]\r\n from google.cloud import storage\r\n storage_client = storage.Client()\r\n bucket = storage_client.get_bucket(bucket_name)\r\n blob = bucket.blob(blob_name)\r\n return blob.exists()\r\n else:\r\n return os.path.exists(res.path)",
"def check_remote_file_exists(url, login=None, password=None):\r\n credentials = None\r\n if login and password:\r\n credentials = login, password\r\n\r\n response = requests.get(url,\r\n stream=True,\r\n verify=False,\r\n auth=credentials)\r\n if response.status_code >= 400 or response.status_code < 200:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n\r\n response.close()",
"def check_if_exist(self,url):\r\n\t\t\"\"\" verefier si un lien existe \"\"\"\r\n\t\trequest = mechanize.Request(url)\r\n\t\tBAD_REQ = [400,401,404]\r\n\t\ttry :\r\n\t\t\tresponse = mechanize.urlopen(request)\r\n\t\t\tif response.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True\r\n\t\texcept urllib2.HTTPError, error:\r\n\t\t\tif error.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True",
"def is_file_exists(self):\n pass",
"def exists(self, path):\n\n # First test for local path\n if os.path.exists(path):\n return True\n\n # We import this here because importing urllib is slow and\n # a significant fraction of numpy's total import time.\n from urllib.request import urlopen\n from urllib.error import URLError\n\n # Test cached url\n upath = self.abspath(path)\n if os.path.exists(upath):\n return True\n\n # Test remote url\n if self._isurl(path):\n try:\n netfile = urlopen(path)\n netfile.close()\n del(netfile)\n return True\n except URLError:\n return False\n return False",
"def check_file_exist(self):\n return False",
"def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)",
"def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False",
"def path_exists(path):\n if path.startswith('http://') or path.startswith('https://'):\n return True\n\n return isfile(path)",
"def file_exist() -> bool:\n pass",
"def _verify_url_exists(url, use_head=False):\n # (str, bool) -> bool\n try:\n if use_head:\n resp = requests.head(url)\n else:\n resp = requests.get(url)\n except requests.exceptions.ConnectionError:\n return False\n\n return resp.status_code in [200, 302]",
"def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']",
"def file_exists(filename: str):\n if osp.exists(filename) is True:\n return True\n else:\n return False",
"def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False",
"def exists_filing(dir, url, length):\n\tfilepath = os.path.join(dir,url.split('/')[-1])\n\treturn os.path.exists(filepath) and (length is None or os.path.getsize(filepath) == length)",
"def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)",
"def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False",
"def url_checker(url_str):\n file_msg = fd.Program_Msg(__file__)\n ## Checking input parameters\n if not (isinstance(url_str, str)):\n msg = '{0} `url_str` ({1}) is not a STRING!'.format(file_msg,\n type(url_str))\n raise LSSUtils_Error(msg)\n ##\n ## Checking Website\n request_url = requests.get(url_str)\n if (request_url.status_code != 200):\n msg = '{0} `url_str` ({1}) does not exist!'.format(file_msg, url_str)\n raise LSSUtils_Error(msg)",
"def file_exists(self):\r\n if os.path.exists(self.file_path):\r\n return True\r\n else:\r\n return False",
"def check_url(url):\n return get_svninfo(url) != {}",
"def check_url(url):\n return 'products.json' in url"
] | [
"0.7852359",
"0.7844551",
"0.77893347",
"0.77855074",
"0.7775254",
"0.7766845",
"0.7766845",
"0.76954746",
"0.76517665",
"0.75399566",
"0.7395437",
"0.7388874",
"0.7315557",
"0.72969633",
"0.72895604",
"0.7279086",
"0.7263453",
"0.7241822",
"0.71523726",
"0.7136986",
"0.7090065",
"0.70799917",
"0.70388234",
"0.6976379",
"0.6958026",
"0.69191253",
"0.6865299",
"0.6863917",
"0.6855283",
"0.68398225"
] | 0.84588355 | 0 |
Method that based on file url return appropriate hash. | def get_hash(file_url):
file_extension = os.path.splitext(file_url)[1]
return str(HASHES.get(file_extension)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hash(self, filepath):\n if (os.path.isfile(filepath) and not (\n os.path.islink(filepath) and self.ignorelinks)):\n file_hash = self.hashfile(open(filepath, 'rb'))\n else:\n file_hash = self.hashstring(filepath)\n if not self._increment_hash:\n self._increment_hash = file_hash\n else:\n self._increment_hash = self.hashstring(\n self._increment_hash + file_hash)\n return file_hash",
"def get_hash(self, url, hash_type):\n hasher = StreamHasher(chunk_size=self.multipart_chunksize, hashes=[hash_type])\n path = self.base_path / url\n if not path.exists():\n return None\n with path.open(\"rb\", self.CHUNK_SIZE) as f:\n hasher.compute(f)\n return hasher.hexdigest(hash_type)",
"def static_file_hash(filepath):\n hasher = hashlib.md5() # nosec: B303\n\n with contextlib.closing(open(filepath, 'rb')) as file:\n hasher.update(file.read())\n return hasher.hexdigest()",
"def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))",
"def calc_file_hash(filepath):\n with open(filepath, 'rb') as f:\n return md5(f.read()).hexdigest()",
"def hash_from_file(file_path):\r\n return hash_from_code(open(file_path, 'rb').read())",
"def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()",
"def getHashFile(file):\n try:\n fileContent = open(file, 'rb').read()\n except:\n raise IOError, \"No such file...\"\n return False\n return getHash(fileContent)",
"def hashfile(file):\n\n hasher = hashlib.sha256()\n\n with open(file, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n hasher.update(buf)\n\n return(hasher.hexdigest())",
"def get_file_hash(self, filepath):\n if filepath not in self._file_hash_cache:\n self._file_hash_cache[filepath] = self.static_file_hash(filepath)\n return self._file_hash_cache[filepath]",
"def get_file_hash(file_path):\n with open(file_path, 'rb') as f:\n file_name = os.path.basename(file_path)\n to_hash = f.read() + file_name.encode('utf-8')\n new_hash = hashlib.md5(to_hash).hexdigest()\n return new_hash",
"def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()",
"def hash_file(path: str) -> str:\n return _hash_file(path, hashlib.md5()).hexdigest()",
"def compute_hash(fileName):\n m = hashlib.sha1()\n try:\n fd = open(fileName,\"rb\")\n except IOError:\n print (\"Unable to open the file in readmode:\", fileName)\n return\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n m.update(eachLine)\n return m.hexdigest()",
"def calculate_hash(filename, raise_on_not_found = False):\n if not is_file(filename) and not raise_on_not_found:\n return \"NOTFOUND\"\n\n with open(filename, \"rb\") as file:\n sha256 = hashlib.sha256()\n buf = file.read(128)\n while len(buf) > 0:\n sha256.update(buf)\n buf = file.read(128)\n return str(binascii.hexlify(sha256.digest()), \"utf8\")",
"def hash_file(method, path):\n f = open(path, \"rb\")\n h = method()\n while True:\n buf = f.read(BUFSIZE)\n if not buf:\n break\n h.update(buf)\n return h.hexdigest()",
"def hash_file(filename):\r\n\r\n # make a hash object\r\n h = hashlib.sha1()\r\n\r\n # open file for reading in binary mode\r\n with open(filename,'rb') as file:\r\n\r\n # loop till the end of the file\r\n chunk = 0\r\n while chunk != b'':\r\n # read only 1024 bytes at a time\r\n chunk = file.read(1024)\r\n h.update(chunk)\r\n\r\n # return the hex representation of digest\r\n return h.hexdigest()",
"def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()",
"def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n \n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()",
"def get_file_hash (fullpath) : \n\n # This bit was sourced from Stack Overflow via Google, specifically:\n # http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python\n\n md5 = hashlib.md5()\n with open(fullpath,'rb') as f: \n for chunk in iter(lambda: f.read(512*md5.block_size), ''): \n md5.update(chunk)\n # Hexdigest is the safe varchar(32) style output\n return md5.hexdigest()",
"def hash_file(filename):\n # make a hash object\n h = hashlib.sha1()\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n # return the hex representation of digest\n return h.hexdigest()",
"def hash_file(file_path, hash_type=hashlib.sha256, binary=False, buffer_size=65536):\n hash_func = hash_type()\n with open(file_path, \"rb\") as file:\n while True:\n data = file.read(buffer_size)\n if not data:\n break\n hash_func.update(data)\n return hash_func.digest() if binary else hash_func.hexdigest()",
"def hashFile(path: str) -> str:\n\tif not os.path.exists(path):\n\t\traise FileNotFoundError\n\n\thasher = hashlib.sha1()\n\tblock_sz = 8192\n\twith open(path, 'rb') as f:\n\t\tbuf = f.read(block_sz)\n\t\twhile len(buf) > 0:\n\t\t\thasher.update(buf)\n\t\t\tbuf = f.read(block_sz)\n\treturn str(hasher.hexdigest())",
"def get_md5_hash(file_path: str) -> str:\n from hashlib import md5\n\n # local file\n if file_path.startswith('/'):\n return md5(open(file_path, 'rb').read()).hexdigest()\n\n # remote file\n httpresponse = url_is_alive(file_path)\n if not httpresponse:\n error_open_mess(file_path)\n return ''\n\n md5hash = md5()\n max_file_size = 100 * 1024 * 1024\n total_read = 0\n while True:\n data = httpresponse.read(4096)\n total_read += 4096\n\n if not data or total_read > max_file_size:\n break\n\n md5hash.update(data)\n\n httpresponse.close()\n return md5hash.hexdigest()",
"def get_hash_from_file(img):\n with open(img, 'rb') as f:\n return hashlib.sha256(f.read()).hexdigest()",
"def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha256()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()",
"def hash_for_file(file_name, block_size=2 ** 20):\n hasher = SHA256.new()\n source_file = open(file_name, \"r\")\n\n while True:\n data = source_file.read(block_size)\n if not data:\n break\n hasher.update(data.encode('utf-8'))\n\n source_file.close()\n return hasher.hexdigest()",
"def get_file_hash(afile, hasher, block_size=65536):\n buf = afile.read(block_size)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(block_size)\n return hasher.digest()",
"def hash_file(self, filename_or_bytestream):\n\n try:\n for data in self._read_blocks(filename_or_bytestream):\n self._update(data)\n except OSError as e:\n print('digest: ', filename_or_bytestream, ': ', e.strerror, sep='', file=sys.stderr)\n return None\n return self._hexdigests()",
"def computeHash(filename):\n fileHash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n fileHash.update(chunk)\n return fileHash.hexdigest()"
] | [
"0.7292291",
"0.72585297",
"0.72363",
"0.7177594",
"0.7159621",
"0.70332694",
"0.70052016",
"0.6999285",
"0.69315827",
"0.6879491",
"0.6835787",
"0.6830834",
"0.6828008",
"0.6765713",
"0.67623746",
"0.67612416",
"0.6758131",
"0.6731684",
"0.6731684",
"0.67308",
"0.67212594",
"0.671724",
"0.670072",
"0.6694894",
"0.6681219",
"0.66709065",
"0.66614497",
"0.6655756",
"0.6652983",
"0.6650765"
] | 0.8334774 | 0 |
Newton's second law of motion for measuring stoppnig distance Newton's second law of motion is d = (1/2)(v02/(mug)) so the stopping distance of an object in motion, like a car, can be measured. The friction coefficient measures how slick a road is with a default of 0.3. | def stopping_length_function(initial_velocity=120, friction_coefficient=0.3):
g = 9.81
v0 = initial_velocity/3.6
mu = friction_coefficient
return (1/2)*(v0**2/(mu*g)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_force_from_damping(v, damping, masses):\n F = masses*damping*np.diff(v, 0)\n\n return F",
"def duty_cycle_by_force(newton: float, profile: GripForceProfile) -> float:\n if profile.min <= newton <= profile.max:\n return sum(ele[1] * (newton ** ele[0]) for ele in profile.polynomial)\n else:\n raise ValueError(\"Gripper force out of bounds\")",
"def friction_factor(v1: \"int\", v2: \"int\") -> \"int\":",
"def friction_model():\n return TimeWeakening()",
"def work_dos():\n #potential = 2x**2+x**2y+y**2\n x1,y1 = (2, -3)\n x2,y2 = (-1, 2)\n p1 = (2*(x1**2)) + ((x1**2)*y1) + (y1**2)\n p2 = (2*(x2**2)) + ((x2**2)*y2) + (y2**2)\n sol = p1 - p2\n sol = abs(sol)\n print(f'The vector field F=(4x+2xy,x2+2y) \\n'\n 'along the curve C parametrized by r(t)=(3t−1,−5t+2) \\n '\n f'for 0 ≤ t ≤ 1 is: {sol}')",
"def friction_factor_2(v1: \"int\", v2: \"int\") -> \"int\":",
"def my_Newton( fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs( fct( xn**(i + 1)) - fct( xn**i)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print( i, 'fct value', abs( fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs( fct( xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan",
"def speed(v, t):\n rf = v.orbit.body.non_rotating_reference_frame\n vec = v3minus(v.velocity(rf), t.velocity(rf))\n a = vec[0] * vec[0]\n b = vec[1] * vec[1]\n c = vec[2] * vec[2]\n return math.sqrt(a + b + c)",
"def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f",
"def FO2(lam):\n return 1.096 + 1.385 *1e-3 *lam**(-2) + 1.448 *1e-4 *lam**(-4)",
"def calculate_forces(v0, mu, density_m, CD, diameter_b, \\\n area_b, volume_b, density_b, \\\n dt, T):\n \n # Gravitational const. m/s^2\n g = 9.81 \n # Proportionality constant for\n # Reynolds number\n Re_const = diameter_b*density_m/mu\n \n a_s = 3*math.pi*diameter_b*mu/(density_b*volume_b)\n a_q = 0.5*CD*density_m*area_b/(density_b*volume_b)\n b = g*(density_m/density_b - 1.0)\n \n # Numerical solution gives velocity as \n # a function of time.\n v, t = vm.solver(v0, a_s, a_q, b, Re_const, T, dt) \n\n # Initialize vectors\n Fg = zeros(len(v))\n Fb = zeros(len(v))\n Fd = zeros(len(v))\n\n # Loop over time steps\n for n in range(0, len(v)):\n # Evaluate Reynolds number\n Re = Re_const*v[n] \n \n # Gravity force\n Fg[n] = -density_b*volume_b*g\n # Bouyancy force\n Fb[n] = density_m*g*volume_b\n \n # Drag force\n if abs(Re) < 1:\n # If Re < 1, use Stokes' drag force \n Fd[n] = -3.0*math.pi*diameter_b*mu*v[n]\n else:\n # If Re >= 1, use the quadratic\n # drag force\n Fd[n] = -0.5*CD*density_m*area_b*abs(v[n])*v[n]\n\n \n return Fg, Fb, Fd, t",
"def eval_dryfriction():\n # Environment\n env = WAMBallInCupSim(num_dof=7, max_steps=1500)\n\n # Policy (random init)\n policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))\n policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)\n\n # Do the rolllouts\n t_all = []\n qpos_all = []\n dp_vals = [0.0, 0.3, 0.6, 0.9, 1.2]\n print_cbt(f\"Run policy for stiction coefficients: {dp_vals}\")\n for dpv in dp_vals:\n env.reset(\n domain_param=dict(\n joint_1_dryfriction=dpv,\n joint_2_dryfriction=dpv,\n joint_3_dryfriction=dpv,\n joint_4_dryfriction=dpv,\n joint_5_dryfriction=dpv,\n joint_6_dryfriction=dpv,\n joint_7_dryfriction=dpv,\n )\n )\n ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)\n t_all.append(ro.time[:-1])\n qpos_all.append(ro.env_infos[\"qpos\"])\n\n # Plot\n fig, ax = plt.subplots(nrows=env.num_dof, sharex=\"all\", figsize=(16, 7))\n for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):\n ax[i].set_prop_cycle(color=plt.get_cmap(\"cividis\")(np.linspace(0, 1, env.num_dof)))\n ax[i].set_ylabel(f\"joint {idx_joint+1} pos [rad]\")\n for j in range(len(dp_vals)):\n ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls=\"--\", label=f\"s = {dp_vals[j]}\")\n if i == 0:\n ax[i].legend(ncol=len(dp_vals))\n ax[-1].set_xlabel(\"time [s]\")\n plt.suptitle(\"Evaluation of joint stiction coefficients\")\n plt.show()",
"def Delta(z):\n return (18*np.pi**2 - 82*cosmology.Ode(z) - 39*cosmology.Ode(z)**2) / cosmology.Om(z)",
"def my_Newton(fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs(fct (xn)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print(i , 'fct_value', abs(fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs(fct(xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan",
"def _forces_moments(self, delta):\n # assert delta.shape == (4,1)\n da = delta[0]\n de = delta[1]\n dt = delta[2]\n dr = delta[3]\n\n e0 = self._state[3]\n e1 = self._state[4]\n e2 = self._state[5]\n e3 = self._state[6]\n u = self._state[7]\n v = self._state[8]\n w = self._state[9]\n p = self._state[10]\n q = self._state[11]\n r = self._state[12]\n\n self._Va = np.sqrt(u**2 + v**2 + w**2)\n self._alpha = np.arctan(1.0*w/u)\n self._beta = np.arcsin(1.0*v/self._Va)\n\n\n\n Fg = self.mass*self.gravity*np.array([2*(e1*e3-e2*e0),\n 2*(e2*e3 + e1*e0),\n e3**2 + e0**2 - e1**2 - e2**2,\n ])\n\n # Fg = self.mass*self.gravity*np.array([2*(e1*e3 - e2*e0),\n # 2*(e2*e3 + e1*e0),\n # e3**2 + e0**2 - e1**2 - e2**2,\n # ])\n\n M_e = 25\n sig = lambda a: (1+np.exp(-M_e*(a-self.alpha0))+np.exp(M_e*(a+self.alpha0)))/((1+np.exp(-M_e*(a-self.alpha0)))*(1+np.exp(M_e*(a+self.alpha0))))\n cla = lambda a: (1-sig(a))*(self.C_L_0+self.C_L_alpha*a)+sig(a)*(2*np.sign(a)*np.sin(a)**2*np.cos(a))\n cda = lambda a: self.C_D_p + (self.C_L_0+self.C_L_alpha*a)**2/(np.pi*self.e*self.AR)\n\n cxa = lambda a: -(cda(a)) * np.cos(a) + (cla(a)) * np.sin(a)\n\n cxq = lambda a: -self.C_D_q * np.cos(a) +self.C_L_q * np.sin(a)\n\n cxde = lambda a: -self.C_D_delta_e * np.cos(a) + self.C_L_delta_e * np.sin(a)\n\n cza = lambda a: -(cda(a)) * np.sin(a) - (cla(a)) * np.cos(a)\n\n czq = lambda a: -self.C_D_q * np.sin(a) - self.C_L_q * np.cos(a)\n\n czde = lambda a: -self.C_D_delta_e * np.sin(a) - self.C_L_delta_e * np.cos(a)\n\n c = self.c/(2.0*self._Va)\n b = self.b/(2.0*self._Va)\n\n\n\n one = 0.5*self.rho*self._Va**2*self.S_wing\n # two = np.array([[1,0,0],[0,1,0],[0,0,1]])\n three = np.array([[cxa(self._alpha)+cxq(self._alpha)*c*q+cxde(self._alpha)*de],\n [self.C_Y_0+self.C_Y_beta*self._beta+self.C_Y_p*b*p+self.C_Y_r*b*r+self.C_Y_delta_a*da+self.C_Y_delta_r*dr],\n [cza(self._alpha)+czq(self._alpha)*c*q+czde(self._alpha)*de]])\n\n Fa = np.squeeze(three) * one\n # pdb.set_trace()\n Fa = Fa.reshape((3,-1))\n\n F = Fg + Fa\n #\n # print(\"Fa:\",Fa)\n\n Fp = 0.5*self.rho*self.S_prop*self.C_prop*((self.k_motor*dt)**2-self._Va**2)\n\n # print(\"FP:\", Fp)\n\n fx = F[0] + Fp\n # + 0.5*MAV.rho*self._Va**2*MAV.S_wing*(\\\n # +cxa(self._alpha)\\\n # + cxq(self._alpha)*c*q\\\n # + cxde(self._alpha)*de\n # )\n\n fy = F[1]\n fz = F[2]\n\n # Moment time!!!\n one = 0.5*self.rho*self._Va**2*self.S_wing\n two = np.array([\\\n [self.b*(self.C_ell_0+self.C_ell_beta*self._beta+self.C_ell_p*b*p+self.C_ell_r*b*r+self.C_ell_delta_a*da+self.C_ell_delta_r*dr)],\n [self.c*(self.C_m_0+(self.C_m_alpha*self._alpha)+(self.C_m_q*c*q)+(self.C_m_delta_e*de))],\n [self.b*(self.C_n_0+(self.C_n_beta*self._beta)+(self.C_n_p*b*p)+(self.C_n_r*b*r)+(self.C_n_delta_a*da)+(self.C_n_delta_r*dr))]\n ])\n Ma = one * np.squeeze(two)\n # print(\"\\nMa:\", Ma)\n # pdb.set_trace()\n Ma = Ma.reshape((3,-1))\n\n size = Ma.shape[1]\n\n Mp = np.block([[np.ones(size)*-self.kTp*(self.kOmega*dt)**2],\n [np.zeros(size)],\n [np.zeros(size)]\n ])\n\n M = Mp + Ma\n\n Mx = M[0]\n My = M[1]\n Mz = M[2]\n\n # self._forces[0] = fx\n # self._forces[1] = fy\n # self._forces[2] = fz\n # pdb.set_trace()\n # print(fx, fy, fz, Mx, My, Mz)\n\n return np.array([fx, fy, fz, Mx, My, Mz])",
"def driftRHS(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.linalg.norm(f)\n f = f/fs\n return -f*drift_velocity(fs)",
"def diffusion_spherical_FV(c, t, r, R, D, j0):\n\n # Compute spacing\n dr = r[1]-r[0]\n\n # Evaluate j\n j = current(t, j0)\n\n # Set maximum concentration\n\n # Compute fluxes\n q = - D*r[1:-1] ** 2. * (c[1:] - c[0:-1]) / dr\n q_surf = -j*R**2\n\n # Append boundary conditions\n q = np.append(0, q)\n q = np.append(q, q_surf)\n\n # Compute discretised dc/dt\n dcdt_out = - (2. / (r[1:] + r[0:-1])) ** 2. \\\n * (q[1:] - q[0:-1]) / dr\n\n return dcdt_out",
"def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir",
"def vel(z, c = cp.cc.c_light_cm_s/1e5):\n # return z*c/(1+z)\n return c*((1+z)**2-1)/((1+z)**2+1)",
"def f(z):\n omega_m = 0.308\n omega_de = 0.692\n #omega = omega_m*(1+z)**3\n #return omega**0.6 + omega_de/70*(1+omega/2) # Dodelson approx\n\n omega = omega_m*(1+z)**3*H(0)**2/H(z)**2\n omega_de = omega_de*H(0)**2/H(z)**2\n return omega**(4/7) + omega_de/70*(1+omega/2) # Dodelson approx\n #return 5*omega/(2*(omega**(4/7) - omega_de + (1 + omega/2)*(1 + omega_de/70)))\n #return omega**0.55",
"def get_fde(forecasted_trajectory, gt_trajectory) -> float:\n fde = torch.sqrt(\n (forecasted_trajectory[:,-1, 0] - gt_trajectory[:,-1, 0]) ** 2\n + (forecasted_trajectory[:,-1, 1] - gt_trajectory[:,-1, 1]) ** 2\n )\n return fde.mean()",
"def simpson2(func, start, stop):\n return (func(start) + 3*func((2*start+stop)/3) + 3*func((start+2*stop)/3) + func(stop)) * (stop-start)/8",
"def force ( r ):\n \n assert r.shape == (n,3), 'Incorrect shape of r'\n\n d = np.zeros_like(r) # Create d vectors (bonds)\n d[1:n,:] = r[1:n,:] - r[0:n-1,:] # Compute d vectors (zero index not used)\n\n # Store C coefficients in a matrix\n # In the general case we would not need to calculate every pair\n # and also we would make use of the symmetry cc[a,b]=cc[b,a]\n cc = np.zeros((n,n),dtype=np.float_) # Create C array (scalar products)\n for a in range(1,n):\n for b in range(1,n):\n cc[a,b]=np.dot(d[a,:],d[b,:]) # Compute C array (zero indices not used)\n\n a = n-1 # For this test there is just one angle\n\n # Here is the potential as a function of cos(theta)\n # For testing we use the simplest form: v= -cos(theta)\n # The notation matches that used in the appendix\n\n prefac = 1.0 / np.sqrt(cc[a,a]*cc[a-1,a-1])\n fac = cc[a,a-1]\n pot = -prefac*fac # This is -cos(theta)\n\n # Here we include the derivative of the potential with respect to cos(theta) in the prefactor\n # For this simple case it is -1, so the forces are simply gradients of cos(theta) as in the text\n f = np.empty_like(r) # Create force array\n fac1 = fac / cc[a,a]\n fac2 = fac / cc[a-1,a-1]\n f[a,:] = -prefac * ( fac1*d[a,:] - d[a-1,:] )\n f[a-1,:] = prefac * ( fac1*d[a,:] - fac2*d[a-1,:] + d[a,:] - d[a-1,:] )\n f[a-2,:] = prefac * ( fac2*d[a-1,:] - d[a,:] )\n\n return pot, f",
"def finite_diff(F, x0, v0, dt, M, K, C, T):\r\n\r\n ### INITIAL PARAMETERS ####\r\n\r\n # defining the number of steps of analysis = Ns\r\n Ns = int(T/dt)+1\r\n # step t0 (initial acceleration)\r\n ngl = np.shape(F)[0] # captures the number of degrees of freedom\r\n\r\n ### MODELLING THE DISPLACEMENTS ###\r\n\r\n x_before = np.zeros((ngl,1))\r\n # matrix that indicates the displacements, in each degree of freedom, along the time of \r\n # duration of analysis. Each column is a time step\r\n x = np.zeros((ngl, Ns))\r\n x[:,0] = x0[:,0]\r\n\r\n ### SOLVING INITIAL STEP ###\r\n\r\n # initial Force F0 is equivalent to the first column of the matrix of load vectors F along time\r\n aux1 = np.zeros((ngl,1))\r\n aux1[:,0] = np.copy(F[:,0])\r\n aux2 = aux1 - np.dot(C,v0) - np.dot(K,x0)\r\n a0 = np.dot(la.inv(M),aux2)\r\n # step t-1 (before initial condition)\r\n x_before = dt*dt*a0/2 - dt*v0 + x0 \r\n # step t+1 (after initial condition)\r\n C1 = M / (dt*dt) + C / (2*dt)\r\n C2 = K - 2*M / (dt*dt)\r\n C3 = M / (dt*dt) - C / (2*dt)\r\n aux3 = aux1 - np.dot(C2, x0) - np.dot(C3, x_before)\r\n x[:,1] = np.dot(la.inv(C1), aux3[:,0])\r\n\r\n ### INTEGRATING ALONG THE DURATION OS ANALYSIS ###\r\n\r\n i = 0\r\n aux4 = np.zeros((ngl,1))\r\n aux5 = np.zeros((ngl,1))\r\n aux6 = np.zeros((ngl,1))\r\n aux7 = np.zeros((ngl,1))\r\n for i in range(1,Ns-1):\r\n aux4[:,0] = np.copy(F[:,i])\r\n aux5[:,0] = np.copy(x[:,i])\r\n aux6[:,0] = np.copy(x[:,i-1])\r\n aux7[:,0] = np.copy(x[:,i+1])\r\n aux7 = np.dot(la.inv(C1), aux4 - np.dot(C2,aux5) - np.dot(C3,aux6))\r\n x[:,i+1] = np.copy(aux7[:,0])\r\n return x",
"def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)",
"def newton_update(f, df):\n def update(x):\n return x - f(x) / df(x)\n return update",
"def derivative(r, t, G=6.67e-11, AU=1.496e+11,\n m1=5.972e+24, m2=6.417e+23, m3=1.989e+30,\n a1=1.0*1.496e+11, a2=1.52*1.496e+11):\n\n if G < 0:\n print(f\"The gravitational constant is negative\")\n\n if AU < 0:\n print(f\"The Astronomical unit is negative\")\n\n if m1 < 0:\n print(f\"The mass of the first body is negative\")\n\n if m2 < 0:\n print(f\"The mass of the second body is negative\")\n\n if m3 < 0:\n print(f\"The mass of the third body is negative\")\n\n if a1 < 0:\n print(f\"The distance of body 1 from the body center is negative\")\n\n if a2 < 0:\n print(f\"The distance of body 2 from the body center is negative\")\n\n x1 = r[0]\n y1 = r[1]\n v_x1 = r[2]\n v_y1 = r[3]\n\n x2 = r[4]\n y2 = r[5]\n v_x2 = r[6]\n v_y2 = r[7]\n\n x3 = r[8]\n y3 = r[9]\n v_x3 = r[10]\n v_y3 = r[11]\n\n z1 = r[12]\n z2 = r[13]\n z3 = r[14]\n\n v_z1 = r[15]\n v_z2 = r[16]\n v_z3 = r[17]\n\n r1 = np.array([x1, y1, z1])\n r2 = np.array([x2, y2, z2])\n r3 = np.array([x3, y3, z3])\n\n dr1 = v_x1\n dr2 = v_y1\n\n dr3 = (G*m2/distance(r1, r2)**3)*(x2-x1) + (G*m3/distance(r1, r3)**3)*(x3-x1)\n dr4 = (G*m2/distance(r1, r2)**3)*(y2-y1) + (G*m3/distance(r1, r3)**3)*(y3-y1)\n\n dr5 = v_x2\n dr6 = v_y2\n\n dr7 = (G*m1/distance(r1, r2)**3)*(x1-x2) + (G*m3/distance(r2, r3)**3)*(x3-x2)\n dr8 = (G*m1/distance(r1, r2)**3)*(y1-y2) + (G*m3/distance(r2, r3)**3)*(y3-y2)\n\n dr9 = v_x3\n dr10 = v_y3\n\n dr11 = (G*m1/distance(r1, r3)**3)*(x1-x3) + (G*m2/distance(r2, r3)**3)*(x2-x3)\n dr12 = (G*m1/distance(r1, r3)**3)*(y1-y3) + (G*m2/distance(r2, r3)**3)*(y2-y3)\n\n dr13 = v_z1\n dr14 = v_z2\n dr15 = v_z3\n\n dr16 = (G*m2/distance(r1, r2)**3)*(z2-z2) + (G*m3/distance(r1, r3)**3)*(z3-z1)\n dr17 = (G*m3/distance(r2, r3)**3)*(z1-z2) + (G*m1/distance(r2, r1)**3)*(z1-z2)\n dr18 = (G*m1/distance(r1, r3)**3)*(z1-z3) + (G*m2/distance(r2, r3)**3)*(z2-z3)\n\n dr = np.array([dr1, dr2, dr3, dr4, dr5, dr6,\n dr7, dr8, dr9, dr10, dr11, dr12,\n dr13, dr14, dr15, dr16, dr17, dr18])\n\n return dr",
"def settling_velocity(self, evaporation_factor: float=0.3) -> _VectorisedFloat:\n if self.diameter is None:\n return 1.88e-4\n else:\n return 1.88e-4 * (self.diameter*evaporation_factor / 2.5)**2",
"def epsilon_delta(self):",
"def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V"
] | [
"0.6221095",
"0.5993694",
"0.59182566",
"0.5850141",
"0.5800747",
"0.5787989",
"0.56625694",
"0.55726403",
"0.55664736",
"0.555712",
"0.5540242",
"0.5521722",
"0.5505632",
"0.54962945",
"0.5486384",
"0.54847544",
"0.5474211",
"0.5469343",
"0.5457595",
"0.545249",
"0.54498357",
"0.5445204",
"0.54339385",
"0.54317605",
"0.54214525",
"0.5419991",
"0.54157037",
"0.5414509",
"0.54123455",
"0.5404006"
] | 0.70825905 | 0 |
Integration function Using scitools.StringFunction to do integration. >>> integration.py 'sin(x)' 0 pi/2 | def integrate_function():
def midpoint_integration(f, a, b, n=100):
h = (b - a)/float(n)
I = 0
for i in range(n):
I += f(a + i*h + 0.5*h)
return h*I
f_formula = sys.argv[1]
a = eval(sys.argv[2])
b = eval(sys.argv[3])
if len (sys.argv) >= 5:
n = int(sys.arvg[4])
else:
n = 200
from scitools.StringFunction import StringFunction
f = StringFunction(f_formula) # turn formula into f(x) func.
"""
>>> g = StringFunction('A*exp(-a*t)*sin(omega*x)',
independent_variable='t',
A=1, a=0.1, omega=pi, x=0.5)
>>> g.set_parameters(omega=0.1)
>>> g.set_parameters(omega=0.1, A=5, x=0)
>>> g(0)
0.0
>>> g(pi)
2.8382392288852166e-15
"""
I = midpoint_integration(f, a, b, n)
print("Integral of {:s} on [{:g}, {:g}] with n ={:d}: {:g}" \
.format(f_formula, a, b, n, I)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def integrate(f, inf_lim, sup_lim):\n function = get_function_from_text(f)\n return sp_integrate.quad(function, inf_lim, sup_lim)[0]",
"def sin(x):\r\n # see decorator for function body\r",
"def f(x):\n return (2.0*math.sin(10.0*x+1.0)+1.0)",
"def a_math_function():\n return np.sin(2*np.pi)",
"def sin(self, a):\n return math.sin(a)",
"def f(x):\r\n\treturn np.sin(x)",
"def sin(x):\n raise NotImplementedError",
"def xsin(x):\n return x + tf.sin(x)",
"def sin(x):\n if isinstance(x, int):\n x = Expression(x)\n return _sin(x)",
"def f(x):\r\n return x * np.sin(x)",
"def f(x):\n return x * np.sin(x)",
"def f(x):\n return x * np.sin(x)",
"def f(x):\n return x * np.sin(x)",
"def f(x):\n return x * np.sin(x)",
"def integrate(equ):\n if \"x\" in equ:\n return polynomial_equation(equ)\n else:\n return constant_equation(equ)",
"def sin(x):\n return 0.0",
"def integrate(self, t):",
"def integrand(order, theta, x_eval):\n return np.cos(order*theta - x_eval*np.sin(theta))/np.pi",
"def integrate(self, *args, **kwargs):\n from sympy.integrals.integrals import integrate\n return integrate(self, *args, **kwargs)",
"def func(x):\n return jnp.sum(jnp.power(jnp.sin(x), 2))",
"def _integrate_0_2pi_phis(self, expr):\n\n phi_s = sp.Symbol('phi_s')\n\n # replace first all odd powers of sin(phi_s) as these are\n # all zero for the integral\n replacements1 = [(sp.sin(phi_s) ** i, 0.)\n for i in range(1, self.SRF.ncoefs +\n self.V.ncoefs + 1) if i % 2 == 1]\n\n # then substitute the sine**2 by 1-cos**2\n replacements1 = (replacements1 +\n [(sp.sin(phi_s) ** i,\n expand((1. -\n sp.cos(phi_s) ** 2) ** sp.Rational(i, 2)))\n for i in range(2, self.SRF.ncoefs +\n self.V.ncoefs + 1) if i % 2 == 0])\n\n res = expand(expr.xreplace(dict(replacements1)))\n\n # replacements need to be done simultaneously, otherwise all\n # remaining sin(phi_s)**even will be replaced by 0\n\n # integrate the cosine terms\n replacements3 = [(sp.cos(phi_s) ** i, self._cosintegral(i))\n for i in range(1, self.SRF.ncoefs +\n self.V.ncoefs + 1)]\n\n res = expand(res.xreplace(dict(replacements3)))\n return res",
"def integrate(self, x, dx):\n raise NotImplementedError(\"Not implemented yet.\")",
"def sine(B):\n sin = math.sin\n pi = math.pi\n \n def f(x):\n return B*sin(pi*x)\n return f",
"def f(x):\n\treturn np.sin(x / 5.0) * np.exp(x / 10.0) + 5 * np.exp(-x / 2.0)",
"def sp_integrate_1D ( func , xmin , xmax , *args , **kwargs ) : \n from scipy import integrate\n ##\n result = integrate.quad ( func , xmin , xmax , *args , **kwargs )\n return result[0]",
"def constant_equation(funct):\n return funct + \"x\"",
"def function(self):\r\n lambd = 5*np.sin(2*np.pi*self.x_array) #The function in question\r\n return 3*np.pi*np.exp(-lambd)",
"def sin_term(x, i):\n n = 2*i+1\n return alternate(i, exp_term(x, n))",
"def integrate(self, x1, dx):\n return x1 + dx",
"def process_fn(fn_string, symbols):\n fn_string = fn_string.replace('^', '**')\n fn = lambdify([sympy.symbols(symbols)], fn_string, 'numpy')\n return fn"
] | [
"0.63227785",
"0.62122375",
"0.61956286",
"0.60972846",
"0.6085416",
"0.60581106",
"0.60526955",
"0.60383034",
"0.6019527",
"0.5990838",
"0.5950136",
"0.5950136",
"0.5950136",
"0.5950136",
"0.58858174",
"0.588468",
"0.58532083",
"0.58137757",
"0.5803429",
"0.5779814",
"0.5709809",
"0.56989974",
"0.56988394",
"0.5696074",
"0.56615335",
"0.56344116",
"0.56009626",
"0.55934536",
"0.5590319",
"0.5549469"
] | 0.74676055 | 0 |
Calculating the psi operator for the transport and production of the enstrophy | def psi_enstrophy(
Tau, # SGS; (6,64,64,64)
h = False, # spatial step size
flag = True): # spectral flag; default is gradient tool
#---------------------------------------------------------------------#
# Default variables #
#---------------------------------------------------------------------#
if h is False:
Pi = np.pi
N = 64
h = (2.0*Pi)/N
#---------------------------------------------------------------------#
# Preallocation variables #
#---------------------------------------------------------------------#
dim = np.shape(Tau)[1]
Psi = np.zeros((9, dim, dim, dim))
#---------------------------------------------------------------------#
# Calculating psi using spectral methods #
#---------------------------------------------------------------------#
if flag is False:
kspec = np.fft.fftfreq(dim) * dim
Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij'))
#-----------------------------------------------------------------#
# Psi_{11} #
#-----------------------------------------------------------------#
Psi[0] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real -\
np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real
#-----------------------------------------------------------------#
# Psi_{12} #
#-----------------------------------------------------------------#
Psi[1] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[4])).real -\
np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[3])).real
#-----------------------------------------------------------------#
# Psi_{13} #
#-----------------------------------------------------------------#
Psi[2] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[5])).real -\
np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[4])).real
#-----------------------------------------------------------------#
# Psi_{21} #
#-----------------------------------------------------------------#
Psi[3] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[0])).real -\
np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[2])).real
#-----------------------------------------------------------------#
# Psi_{22} #
#-----------------------------------------------------------------#
Psi[4] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real -\
np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real
#-----------------------------------------------------------------#
# Psi_{23} #
#-----------------------------------------------------------------#
Psi[5] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[2])).real -\
np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[5])).real
#-----------------------------------------------------------------#
# Psi_{31} #
#-----------------------------------------------------------------#
Psi[6] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[1])).real -\
np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[0])).real
#-----------------------------------------------------------------#
# Psi_{32} #
#-----------------------------------------------------------------#
Psi[7] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[3])).real -\
np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[1])).real
#-----------------------------------------------------------------#
# Psi_{33} #
#-----------------------------------------------------------------#
Psi[8] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real -\
np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real
#---------------------------------------------------------------------#
# Calculating psi using gradient tool #
#---------------------------------------------------------------------#
else:
#-----------------------------------------------------------------#
# Psi_{11} #
#-----------------------------------------------------------------#
Psi[0] = np.gradient(Tau[2],h, edge_order=2)[1] -\
np.gradient(Tau[1], h, edge_order=2)[0]
#-----------------------------------------------------------------#
# Psi_{12} #
#-----------------------------------------------------------------#
Psi[1] = np.gradient(Tau[4],h, edge_order=2)[1] -\
np.gradient(Tau[3], h, edge_order=2)[0]
#-----------------------------------------------------------------#
# Psi_{13} #
#-----------------------------------------------------------------#
Psi[2] = np.gradient(Tau[5],h, edge_order=2)[1] -\
np.gradient(Tau[4], h, edge_order=2)[0]
#-----------------------------------------------------------------#
# Psi_{21} #
#-----------------------------------------------------------------#
Psi[3] = np.gradient(Tau[0],h, edge_order=2)[0] -\
np.gradient(Tau[2], h, edge_order=2)[2]
#-----------------------------------------------------------------#
# Psi_{22} #
#-----------------------------------------------------------------#
Psi[4] = np.gradient(Tau[1],h, edge_order=2)[0] -\
np.gradient(Tau[4], h, edge_order=2)[2]
#-----------------------------------------------------------------#
# Psi_{23} #
#-----------------------------------------------------------------#
Psi[5] = np.gradient(Tau[2],h, edge_order=2)[0] -\
np.gradient(Tau[5], h, edge_order=2)[2]
#-----------------------------------------------------------------#
# Psi_{31} #
#-----------------------------------------------------------------#
Psi[6] = np.gradient(Tau[1],h, edge_order=2)[2] -\
np.gradient(Tau[0], h, edge_order=2)[1]
#-----------------------------------------------------------------#
# Psi_{32} #
#-----------------------------------------------------------------#
Psi[7] = np.gradient(Tau[3],h, edge_order=2)[2] -\
np.gradient(Tau[1], h, edge_order=2)[1]
#-----------------------------------------------------------------#
# Psi_{33} #
#-----------------------------------------------------------------#
Psi[8] = np.gradient(Tau[4],h, edge_order=2)[2] -\
np.gradient(Tau[2], h, edge_order=2)[1]
return Psi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi) \n \n\n # tmpnum=100000\n # locq = self.param_q(np.linspace(0,1,tmpnum)) #augmenting precision near the core\n # locphi = self.rhotor**2\n # locphi_p = interpolate.interp1d(np.linspace(0,1,len(locphi)),locphi)\n # locphi = locphi_p(np.linspace(0,1,tmpnum))\n # psi = integrate.cumtrapz(1/locq,locphi)\n # psi = np.concatenate([[0], psi])\n # psi = psi/max(psi)\n # rhopsi = psi**0.5\n # self.param_psi = interpolate.interp1d(np.linspace(0,1,tmpnum), rhopsi)",
"def exptomo(self, psi):\n return np.exp(1j*psi * self.voxelsize * self.wavenumber())",
"def _psi_ ( self ) :\n return psis",
"def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi)",
"def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi",
"def psi_wf(self, vw, d1, d2, ns, tl):\n\t osmotic = (R*299./VW)*np.log((((vw/self.ZW)*self.ZW)/(VW))/((((vw/self.ZW)*self.ZW)/(VW))+ns))/10**6 #MPa\n\t turgor = ((vw/self.ZW) - d1)**d2#MPa\n\t return turgor+osmotic #MPa ",
"def get_mfp(self, T):\n\n self.air.T = T\n self.air.set_TempPres_dependents()\n\n self.mfp = (\n (np.sqrt(2.) * np.pi * self.air.d ** 2. * self.air.n) ** -1.\n )\n\n return self.mfp",
"def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2",
"def psi(x, y):\n return x",
"def phi_opinion(self, persp):\n f1 = self.nrs[persp]+float(self.beta_o)\n f2 = np.sum(self.nrs[persp], axis=1, keepdims=True)+self.VO*self.beta_o\n return f1/f2",
"def _phi2psi(self):\n try:\n self.param_q.mean()\n except:\n self._readeqdsk()\n tmpnum=100000\n locq = self.param_q(np.linspace(0,1,tmpnum)) #augmenting precision near the core\n locphi = np.linspace(0,1,tmpnum)\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n rhopsi = psi\n self.param_psi = interpolate.interp1d(np.linspace(0,1,tmpnum), rhopsi)",
"def psi(n, x):\n H = h(n, x, orthonormal=True)\n weight = np.exp(-(x ** 2) / 2)\n psi = H * weight\n return psi",
"def cal_eta(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for eta routine)')\n \n theta=math.acos(self.pz/math.sqrt(self.px**2+self.py**2+self.pz**2))\n self.eta=-math.log(math.tan(theta/2.0))",
"def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)",
"def psi(n,x):\r\n a = 1/(sqrt((2**n)*fac(n)*sqrt(pi)))\r\n b = (e)**(-1*(x**2)*0.5)\r\n H_n = H(n,x)\r\n return a*b*(H_n)",
"def _calc_interaction_expansion(self):\n # preevaluate expansions for volume and surface phase functions\n # this returns symbolic code to be then further used\n\n volexp = self.V.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n brdfexp = self.SRF.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n # preparation of the product of p*BRDF for coefficient retrieval\n # this is the eq.23. and would need to be integrated from 0 to 2pi\n fPoly = expand(2 * sp.pi * volexp * brdfexp)\n\n # do integration of eq. 23\n expr = self._integrate_0_2pi_phis(fPoly)\n\n # now we do still simplify the expression to be able to express\n # things as power series of cos(theta_s)\n theta_s = sp.Symbol('theta_s')\n replacements = [(sp.sin(theta_s) ** i,\n expand((1. - sp.cos(theta_s) ** 2)\n ** sp.Rational(i, 2)))\n for i in range(1, self.SRF.ncoefs + self.V.ncoefs - 1)\n if i % 2 == 0]\n\n res = expand(expr.xreplace(dict(replacements)))\n\n return res",
"def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u",
"def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)",
"def flux_qubit_potential(self):\n return -self.Ej * cos(self.phis - 2. * pi * self.phi) + self.El/2. * (self.phis) ** 2",
"def addPsiOsmo(self):\n # Salinity is 0 ppt is the basic scenario\n self._psi_osmo = np.array([0] * self.no_plants)",
"def psi(x):\n return np.sin(x)",
"def u(E_wholesale_P, fixed_P_component, price_elast, xi, q):\n \n end_P = p_endconsumers(E_wholesale_P, fixed_P_component)\n u = xi / (1.0 - 1.0 / price_elast) * q**(1.0 - 1.0 / price_elast) - end_P * q\n \n return u",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)",
"def idealOpAmp():",
"def psi(self):\n return PoundSquareInch(self.base_value / 6894.76)",
"def psi(arity, degree, convention=convention):\n if degree == 0:\n return SurjectionElement({tuple(range(1, arity + 1)): 1},\n convention=convention)\n else:\n previous = psi(arity, degree - 1, convention=convention)\n acted_on = operators[degree % 2] * previous\n answer = h(acted_on)\n return answer",
"def energy(ps):\n return kinetic_energy(ps) + potential_energy(ps)",
"def psi_packet_k(pos=x1,angular_f=omega1,time=t1,phi_dic=example_phik()):\n\n phi_var = phi_dic['var']\n phi_par = 1, np.pi, \n phi_y = phi_dic['y']\n\n psi_var = sy.var('x omega t i')\n psi_par = pos, angular_f, time, sy.I\n\n # var = phi_var + psi_var\n # par = phi_par + psi_par\n var = psi_var\n par = psi_var\n\n k = sy.symbols('k')\n y1 = phi_y * sy.exp( i * (k*x - omega*t) )\n\n return y1\n\n # def integrand(k,x,omega,t):\n # return phi(k) * np.exp( 1j * (k*x - omega*t) )\n #\n # I = si.quad(integrand, -np.inf, np.inf, args=(x,omega,t) )\n #\n # return 1/np.sqrt( 2 * np.pi ) * ( I[0] - I[1] )",
"def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))"
] | [
"0.66981435",
"0.6571289",
"0.64863443",
"0.64510655",
"0.6450011",
"0.63740313",
"0.6351212",
"0.6321734",
"0.6310165",
"0.62904805",
"0.62640244",
"0.623965",
"0.61741835",
"0.61319333",
"0.6116678",
"0.60592675",
"0.6031023",
"0.6012221",
"0.600168",
"0.59927183",
"0.59659547",
"0.59367836",
"0.5933083",
"0.59089637",
"0.59055865",
"0.5905166",
"0.5872765",
"0.5870351",
"0.58688956",
"0.5853626"
] | 0.66243196 | 1 |
Gets the coordinates of this atom. Returns | def get_coords(self):
return self.coords | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def coordinates(self):\n return self.xy",
"def get_coordinates(self):\n return self.coordinates",
"def coordinates(self):\n return self._coordinates",
"def coordinates(self):\n return self._coordinates",
"def getCoords(self):\r\n \r\n return self.coords",
"def coords(self):\n return nx.get_node_attributes(self.network, 'coords')",
"def xy(self):\n return self.coords.xy",
"def coordinates(self):\n return np.array([self.x, self.y])",
"def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y",
"def coords(self):\n return (self.x, self.y, self.z)",
"def coords(self):\n return (self.x, self.y, self.z)",
"def coords(self):\n\n return self.__get_gps_location()",
"def get_coordinates(self):\n return np.array([(n.x, n.y) for n in self.nodes])",
"def coordinates(self):\n return self.latitude, self.longitude",
"def coordinates(self):\n if hasattr(self, '_coordinates'):\n return self._coordinates\n else:\n return self._points",
"def getCoords(self):\n\n if self._coords is not None:\n return self._coords[self._acsi].copy()",
"def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng",
"def coordinates(self) -> Optional[Coordinates]:\n if self.atoms is None:\n return None\n\n return self.atoms.coordinates",
"def get_coords(self):\n return [self.x,self.y,self.w,self.h]",
"def xy(self) -> Tuple[int, int]:\n return self._x, self._y",
"def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)",
"def get_coord(self):\n return self.coord",
"def getMachineCoordinates(self):\n return (self.x, self.y, self.z)",
"def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank",
"def coordinates(self):\n return np.array([[f.x, f.y] for f in self])",
"def position(self):\n return self._x, self._y",
"def _getCoords(self):\n\n if self._coords is not None:\n return self._coords[self._acsi]",
"def position(self):\n return self.x, self.y",
"def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)",
"def get_edge_coords(self):\n return self.coords"
] | [
"0.8518098",
"0.8345379",
"0.82986754",
"0.82986754",
"0.82948023",
"0.8146323",
"0.8145044",
"0.8112127",
"0.8061573",
"0.8036323",
"0.8036323",
"0.7918564",
"0.7912341",
"0.7905923",
"0.789621",
"0.7851619",
"0.77691483",
"0.77299666",
"0.76837",
"0.76726896",
"0.766294",
"0.76354736",
"0.7613365",
"0.76053834",
"0.7582628",
"0.75804543",
"0.75575846",
"0.75477004",
"0.7523281",
"0.7471958"
] | 0.8384263 | 1 |
Gets the x coordinate of this atom. Returns | def get_x(self):
return self.coords[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Getxcoord(self):\n return self.x_coord",
"def _get_x(self):\n return self.position.x",
"def getXCoordinate(self) -> float:\n return self.x_coord",
"def get_x_position(self):\n return self.actual_coordinates[0]",
"def x(self):\n return self._coords[0]",
"def get_pos_x(self):\n return self.__pos_x",
"def x(self):\n if self._x is None:\n self.compute_coordinates()\n return self._x",
"def x(self):\n return self.coords[0]",
"def get_x(self):\n return self.posX",
"def get_x_position(self):\n return self.rect.x",
"def get_x(self) -> int:\n return self.__x",
"def x(self):\n return _libsbml.Point_x(self)",
"def GetX(self):\r\n\r\n return self._x",
"def getX(self):\n return self.__x",
"def x_coord(self):\n\n return self.x0 + np.arange(self.nx) * self.dx",
"def getX(self):\n return self.x",
"def getX(self):\n return self.position.getX()",
"def getX(self):\r\n\t\treturn self._x",
"def getX(self):\n return self.position[0]",
"def getX(self):\n return _libsbml.BoundingBox_getX(self)",
"def get_origin_x_position(self):\n return self.origin_coordinates[0]",
"def origin_x(self):\n return self._origin[0]",
"def x(self):\r\n return self.position.x",
"def get_ship_x(self):\n return self.x",
"def getXOffset(self):\n return _libsbml.Point_getXOffset(self)",
"def x(self) -> int:\n return self._x",
"def x(self):\n return self._x",
"def x(self):\n return self._x",
"def x(self):\n return self._x",
"def x(self):\n return self._x"
] | [
"0.8656004",
"0.8442269",
"0.84205234",
"0.83736426",
"0.8353082",
"0.83035123",
"0.82412916",
"0.8212211",
"0.82038295",
"0.81553376",
"0.8097587",
"0.80942667",
"0.7829044",
"0.78141165",
"0.78131527",
"0.7794782",
"0.7767253",
"0.77473986",
"0.773306",
"0.76320076",
"0.762475",
"0.7616443",
"0.7519012",
"0.74905527",
"0.7462451",
"0.7451654",
"0.7419411",
"0.7419411",
"0.7419411",
"0.7419411"
] | 0.8588003 | 1 |
Gets the z coordinate of this atom. Returns | def get_z(self):
return self.coords[2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getZ(self):\n return self.position.getZ()",
"def getZ(self):\n\t\treturn self.coords.z",
"def z(self):\n return self._coords[2]",
"def z(self):\n return self.coords[2]",
"def get_z(self) -> int:\n return self.__z",
"def z(self):\r\n return self.position.z",
"def getZ(self):\n return _libsbml.BoundingBox_getZ(self)",
"def z(self):\n return self._z",
"def z(self):\n return self._z",
"def z(self):\n return self._z",
"def __get_z__(self):\n return self.Direction['z']",
"def z(self):\n return _libsbml.Point_z(self)",
"def z ( self ) :\n return self.zvar",
"def getZCoord(self, x, y):\n n = self.normal()\n z = (-n.x * (x - self.p0.x) - n.y * (y - self.p0.y) + n.z * self.p0.z) / n.z\n return z",
"def z(self):\n return self._data[2]",
"def getZOffset(self):\n return _libsbml.Point_getZOffset(self)",
"def zaxis(self):\n return self._zaxis",
"def z(self):\n return self._reg2val(MMA7660_Z_ADDR)",
"def get_z(self, dt):\n return dt[\"z\"]",
"def getz_index(self):\n return self._getz_index",
"def M_z(self) -> int:\n return self.params.M_z",
"def origin_z(self):\n return self.locations_z[0]",
"def z(self):\n return self._translation[2, 0]",
"def zaxis ( self ) :\n return self.__zaxis",
"def z(self):\n return self._arr[2]",
"def zvar ( self ) :\n return self.__zvar",
"def z0(self):\n return self.params['z0']",
"def z(self):\n return self[2]",
"def z_distance(self):\n return self.get_distance(self.Z_INDEX)",
"def _get_z_coord(cube):\n for coord in cube.coords(dim_coords=True):\n if iris.util.guess_coord_axis(coord) == 'Z':\n z_coord = coord\n break\n else:\n raise ValueError(f\"Cannot determine height axis (Z) of cube \"\n f\"{cube.summary(shorten=True)}\")\n return (z_coord, cube.coord_dims(z_coord)[0])"
] | [
"0.8616124",
"0.86023843",
"0.8559942",
"0.84333587",
"0.83052397",
"0.82192296",
"0.80853426",
"0.80163383",
"0.80163383",
"0.80163383",
"0.79635745",
"0.79482794",
"0.78137195",
"0.77216375",
"0.7505032",
"0.74130934",
"0.73766494",
"0.7360839",
"0.730008",
"0.729796",
"0.72931254",
"0.7246056",
"0.7219753",
"0.7181541",
"0.7157797",
"0.71193194",
"0.710592",
"0.7101097",
"0.70534045",
"0.6912274"
] | 0.8734822 | 0 |
Gets the mass of this atom. Returns | def get_mass(self):
return self.m | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mass(self):\n return self._mass",
"def mass(self):\n return self._mass",
"def get_mass(self):\n _pal.lib.geometry_get_mass.restype = c.c_float\n return _pal.lib.geometry_get_mass(self._geometry)",
"def mass(self):\n return self._getAttribute(Attribute.mass)",
"def getMolecularMass(self):\n dataDict = self.__dict__\n # get formula dictionary\n dd = {}\n for ca in self.chemAtoms:\n if isinstance(ca, ChemAtom):\n ss = ca.elementSymbol\n ii = dd.get(ss)\n if ii is None:\n dd[ss] = 1\n else:\n dd[ss] = ii + 1\n \n # calculate mass\n xx = self.root.currentChemElementStore\n result = sum(ii * xx.findFirstChemElement(symbol=ss).mass for (ss, ii) in dd.iteritems())\n return result",
"def mass(self):\n return self._P",
"def mass(self):\n self.check_symbols()\n return self._tree_mass(self._tokentree())",
"def getMass(self):\n return self.mass",
"def total_mass(self):\n return self._total_mass",
"def total_mass(self):\n return self._total_mass",
"def mass(self):\n\t\treturn self.volume*self.density",
"def mass(self):\n return _cantera.reactor_mass(self.__reactor_id)",
"def Mass(self):\n mpa = self.MassPerLength()\n if mpa == 0.0:\n return 0.\n L = self.Length()\n mass = L * mpa\n\n #try:\n #mass = (self.Rho() * self.Area() + self.Nsm()) * L\n #except TypeError:\n #msg = 'TypeError on eid=%s pid=%s:\\n' % (self.eid, self.Pid())\n #msg += 'rho = %s\\narea = %s\\nnsm = %s\\nL = %s' % (self.Rho(),\n # self.Area(),\n # self.Nsm(), L)\n #raise TypeError(msg)\n\n return mass",
"def particleMass(self):\n return self.params['particleMass']",
"def mass(self) -> Mass:\n return self.weight",
"def m1(self):\n return self.mass[0]",
"def get_mass(atomic_symbol: str) -> float:\n\n if atomic_symbol in _masses.keys():\n return _masses[atomic_symbol]\n\n else:\n return 0",
"def get_mass(elem):\n return mass[get_num(elem)]",
"def get_mass(element):\n return pt.elements.isotope(element).mass",
"def getMasses(self):\n try:\n return self._massList\n except AttributeError:\n self._massList = [float(x) for x in self._raw_data['MASS']]\n return self._massList",
"def molarMass(matID):\n mat = goodID(matID)\n compound = xl.CompoundParser(mat)\n return atomWeight(matID) * compound['nAtomsAll']",
"def calc_mass(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) *\n (star.rho[:-2:2, j] + 4 * star.rho[1:-1:2, j] +\n star.rho[2::2, j])) / 6\n\n mass = 0\n\n for j in range(0, N - 2, 2):\n mass += (r[j + 2] - r[j]) * (r[j]**2 * Q1(j) +\n 4 * r[j + 1]**2 * Q1(j + 1) +\n r[j + 2]**2 * Q1(j + 2))\n\n return 2 / 3 * np.pi * mass",
"def mass(self, star_mass=1.0):\n m_mj = 0.004920266275467775 * star_mass**(2./3) \\\n * self.P**(1./3) * self.K * np.sqrt(1-self.e**2)\n return m_mj",
"def meanMolarMass(self):\n return _cantera.phase_meanmolwt(self._phase_id)",
"def mass(self):\n\t\traise NotImplementedError",
"def mu(self):\n return self.mass * G",
"def mass(self, element):\n return self.m(element)",
"def mass(self) -> Mass:\n return Mass(0.0)",
"def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass",
"def enthalpy_mass(self):\n return _cantera.reactor_enthalpy_mass(self.__reactor_id)"
] | [
"0.8615101",
"0.8615101",
"0.8449791",
"0.827885",
"0.81125116",
"0.7995724",
"0.7925729",
"0.7887645",
"0.78556985",
"0.78556985",
"0.7763608",
"0.76362807",
"0.7610759",
"0.7542108",
"0.735307",
"0.7303788",
"0.7278605",
"0.7205126",
"0.7170194",
"0.7108265",
"0.7056702",
"0.69589114",
"0.69333863",
"0.69306815",
"0.6919366",
"0.6872219",
"0.68660146",
"0.6797525",
"0.67918766",
"0.6771746"
] | 0.8783727 | 0 |
Gets the van Der Waals radius of this atom. Returns | def get_van_Der_Waals_radius(self):
return self.van_Der_Waals_radius | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_radius(self):\n return self.r",
"def get_radius(self):\n return self.R",
"def get_radius(self):\n return self.__radius",
"def radius(self) -> float:\n return get_radius_from_element(self.element)",
"def get_radius(self):\n return self.radius",
"def get_radius(self):\n return self.radius",
"def radius(self):\n return sqrt(self.radius_square())",
"def outer_radius(self):\n return self._outer_radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self) -> float:\n return self._radius",
"def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius",
"def getRadius(self):\n return self.__radius",
"def getRadius(self):\n return self.radius",
"def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius",
"def inner_radius(self):\n return self._inner_radius",
"def get_radius(self):\r\n return self._handler.get_radius()",
"def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])",
"def radius(self) -> Union[int, float]:\n return self.proto.radius",
"def radius(self) -> float:\n return math.hypot(self.x, self.y)",
"def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()",
"def get_radius(self):\r\n return 1",
"def radius(self):\n c = self.centroid()\n dmax = -np.inf\n for vertex in self.path.vertices:\n d = np.linalg.norm(vertex - c)\n if d > dmax:\n dmax = d\n return d",
"def radius(self) -> int:\n pass",
"def getCoreRadius(self):\n return self.getNumRings(indexBased=True) * self.getFirstBlock().getPitch()",
"def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)",
"def mean_radius(self):\n return self._mean_radius"
] | [
"0.79551435",
"0.78757066",
"0.78437364",
"0.7793091",
"0.7788298",
"0.7788298",
"0.7774703",
"0.7747598",
"0.77394575",
"0.77394575",
"0.77394575",
"0.77394575",
"0.77394575",
"0.7731752",
"0.7719796",
"0.7713599",
"0.7675112",
"0.7668061",
"0.7660242",
"0.7640078",
"0.75554675",
"0.74558413",
"0.7382431",
"0.73580617",
"0.72847605",
"0.7203548",
"0.6982298",
"0.6974618",
"0.69417745",
"0.6918043"
] | 0.86208606 | 0 |
Gets the euler tensor of this atom. Returns | def get_euler(self):
return array([ coord * self.coords for coord in self.coords ]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def imu_get_euler(self):\n return self.imu.get_euler()",
"def euler_integrator(self, t, y, tau):\n\n return self.plant.rhs(t, y, tau)",
"def getTensor(self):\n\t\treturn self.cur_tensor",
"def get_deltaE(self):\n return self.deltaE",
"def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])",
"def E(self):\n return self._E",
"def E(self):\n return self._E",
"def euler_characteristic(self):\n return Integer(self.degree() * 2 -\n sum(sum(j - 1 for j in self.profile(i))\n for i in range(self.length())))",
"def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)",
"def get_E(self):\n return self.E",
"def getEll(self):\n\n\t\tellx = fftengine.fftfreq(self.data.shape[0])*2.0*np.pi / self.resolution.to(u.rad).value\n\t\telly = fftengine.rfftfreq(self.data.shape[0])*2.0*np.pi / self.resolution.to(u.rad).value\n\t\treturn np.sqrt(ellx[:,None]**2 + elly[None,:]**2)",
"def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta",
"def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)",
"def forwardEuler(self,un, tn):\n return un + self.dt*self.f(un, tn)",
"def euler(faces, edges, verticies):\n\n # Return the calculated value\n return verticies + edges - faces",
"def e(self):\n if self._e is None:\n # self._e = self.distributions.uniform(0.3,0.33)\n # return self._e\n # max is set by q but also limited by users choice of e_max.\n res_a = 29.9*((self.j[0]/self.k[0])**(2/3))\n q = self.distributions.truncated_normal(self.q_c, self.q_w, res_a*(1-0.8), res_a*(1-0.001))\n self._e = 1 - q/res_a\n return self._e",
"def get_E(self):\r\n return self.E",
"def get_ell(self):\n lx, ly = self.get_lxly()\n return np.sqrt(lx**2 + ly**2)",
"def get_ell(self):\n lx, ly = self.get_lxly()\n return np.sqrt(lx**2 + ly**2)",
"def get_ell(self):\n lx, ly = self.get_lxly()\n return np.sqrt(lx**2 + ly**2)",
"def get_e(self):\n return self.e_min + self.e_ * self.e_range",
"def etol(self) -> PotentialEnergy:\n return self._etol",
"def inertia_tensor(self, masswt=True, zero=ZERO):\n return self.inertia_tensor_partial(range(self.natom()), masswt, zero)",
"def get_eigenvalues(self):\n return self.eigenValues",
"def omega(self):\n return self._data.train_X @ self._thetas",
"def get_torque(self):\n return self.node.sdo[0x6077].phys # rate torque(mN.m) /1000",
"def E(self, temperature):\n E = None\n if self._E_table:\n E = self._E_table.Value(temperature)\n return E",
"def get_efg_tensor(self, atom_index: int) -> ArrayLike:\n return self._efg_tensors[atom_index - 1]",
"def E(self):\n return self._properties['E']",
"def get_eigen(self, predictor=True):\n\n if predictor is True:\n vec_ = range(1, self.npred + 1)\n fctr_ = self.gamma\n else:\n vec_ = range(1, self.nresp + 1)\n fctr_ = self.eta\n eigen = np.exp([-fctr_ * float(p_) for p_ in vec_]) / np.exp(-fctr_)\n return eigen"
] | [
"0.65126354",
"0.6043174",
"0.59547913",
"0.590338",
"0.5897418",
"0.58401287",
"0.58401287",
"0.5825734",
"0.5797117",
"0.5762484",
"0.572737",
"0.5713381",
"0.5662122",
"0.5651244",
"0.5643783",
"0.56348675",
"0.56310534",
"0.56149095",
"0.56149095",
"0.56149095",
"0.5590667",
"0.5574735",
"0.55439043",
"0.55356556",
"0.55289936",
"0.55267936",
"0.5495457",
"0.5494943",
"0.54921675",
"0.5445496"
] | 0.64894354 | 1 |
Gets the symbol of this atom. Returns | def get_symbol(self):
return self.symbol | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def symbol(self):\n return self._symbol",
"def symbol(self): \n return self.__symbol",
"def symbol_id(self) -> str:\n return self._symbol",
"def getSymbol(self):\n return _libsbml.InitialAssignment_getSymbol(self)",
"def getElementSymbol(self):\n dataDict = self.__dict__\n yy = self\n while yy is not None:\n xx = yy\n yy = xx.findFirstChemAtomSet()\n \n result = xx.findFirstChemAtom().elementSymbol\n return result",
"def symbol(self) -> str:\n return self.current_token",
"def symbol(self) -> Optional[str]:\n if self._is_a() or self._is_label():\n return self._cur().split(\"@\")[1]",
"def get_display_symbol(self):\n return self.symbol if self.display_symbol is None else self.display_symbol",
"def get_display_symbol(self):\n return self.symbol if self.display_symbol is None else self.display_symbol",
"def atomic_symbol(self) -> str:\n return self.label",
"def getAiSymbol(self) -> str:\n return self.ai.getSymbol()",
"def obj(self) -> str:\n return self._symbol",
"def obj(self) -> str:\n return self._symbol",
"def atomic_symbol(self, atomic_number):\n return self.GetSymbol(atomic_number)",
"def getPlayerSymbol(self) -> str:\n return self.player.getSymbol()",
"def symbol(self, **kw):\n if not kw:\n raise ValueError(u\"'symbol' needs keyword arguments\")\n res = self.find_symbols(**kw)\n if len(res)==1:\n return res[0]\n else:\n return res",
"def read_symbol(self) -> str:\n return self.tape[self.current_position]",
"def symbolic_name(self):\n return self._symbolic_name",
"def atom(self):\n\n result = self.current_char\n pos = self.pos\n self.next()\n\n if self.current_char is not None and self.current_char.isalpha():\n nresult = result + self.current_char\n if nresult in TOT_SYMBOLS:\n self.next()\n return nresult\n\n if result in TOT_SYMBOLS:\n return result\n else:\n raise LexerException(pos, '{} is not a valid atomic symbol'.format(result))",
"def get_symbol(operator):\r\n if isinstance(operator, AST):\r\n operator = type(operator)\r\n try:\r\n return ALL_SYMBOLS[operator]\r\n except KeyError:\r\n raise LookupError('no known symbol for %r' % operator)",
"def get_address(self, symbol):\n return self.table[symbol]",
"def address(self, symbol):\r\n return self.s_table[symbol]",
"def symbol(self):\n if self.content is None:\n return \" \"\n else:\n return self.content.symbol",
"def _GetSymbol(atom):\n ks = atom.keys()\n if 'sym' in ks:\n return atom['sym']\n\n for k in ks:\n if k not in PROTECTED_KEYS and isinstance(atom[k], list):\n if len(atom[k]) == 3:\n return k\n\n raise ValueError",
"def getSymbolAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.symbol.Symbol:\n ...",
"def getSymbolValue(self) -> int:\n ...",
"def getMibSymbol(self):\n if self.__state & self.stClean:\n return self.__modName, self.__symName, self.__indices\n else:\n raise SmiError('%s object not fully initialized' % self.__class__.__name__)",
"def getSymbol(self, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> ghidra.program.model.symbol.Symbol:\n ...",
"def symbol(self):\n if self.currCommType is A_COMMAND:\n return self.currentCommand[COMMAND_START:].strip()\n\n elif self.currCommType is L_COMMAND:\n return self.currentCommand[COMMAND_START:COMMAND_ENDS].strip()",
"def next_symbol(self):\r\n try:\r\n return self.rule.rightside[self.position]\r\n except IndexError:\r\n return None"
] | [
"0.85209036",
"0.8121533",
"0.76356006",
"0.75384325",
"0.73964155",
"0.7318919",
"0.73038465",
"0.7173422",
"0.7173422",
"0.70371056",
"0.69875854",
"0.6953391",
"0.6953391",
"0.6947048",
"0.6933806",
"0.6931096",
"0.68797255",
"0.68131906",
"0.6801363",
"0.6762182",
"0.6737989",
"0.67216915",
"0.6715377",
"0.67109805",
"0.6690734",
"0.66814363",
"0.66635394",
"0.66204065",
"0.66015756",
"0.65572214"
] | 0.85047024 | 1 |
Gets the chain sequence number of the amminoacid this atom belongs to. Returns | def get_ammino_chain_seq(self):
return self.ammino_chain_seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sequence_number(self):\n return self._sequence_number",
"def sequence_number(self):\n # type: () -> int\n return self._sequence_number",
"def attempt_sequence_number(self):\n return self._attempt_sequence_number",
"def chain_serial(self):\n return self.structure.chain_serial[self.mask]",
"def __get_sequence_number(self):\n if self.counter > 999:\n self.counter = 0\n else:\n self.counter += 1\n\n str_sequence_num = self.counter + 256\n str_hex_sequence_num = hex(str_sequence_num)[2:]\n return str_hex_sequence_num",
"def get_sequence(self):\n self.__sequence = self.__sequence + 1\n return self.__sequence - 1",
"def sequence(self):\n\n\t\tseq = \"\"\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tseq += res.aa1()\n\n\t\treturn seq",
"def get_atomic_number(self):\n\n return self._atomic_number",
"def getResidueNumber(self, iAtom):\n return self._getResiduePointer(iAtom)+1",
"def sequence(self):\n return self._sequence",
"def sequence(self):\n return self._sequence",
"def get_sequence(self, chain_id, model_num = 0):\n if self.get_chain_length(chain_id, model_num) == 0:\n return \"\"\n\n seq = \"\"\n for res in self.residues(chain_id, model_num):\n aa = self.res_definitions.get_one_letter_from_three(res.resname)\n if not aa:\n print \"Setting NCAA as X: \"+res.resname\n print \"This could pose a problem!\"\n seq = seq+'X'\n continue\n\n seq = seq+aa\n return seq",
"def residueNumber(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_number(self._c_structure,i)",
"def casenumber(self) :\n\t\ttry :\n\t\t\treturn self._casenumber\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_seq_from_pdbchain(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n three_res_list = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'CA':\n residue = atom.get_parent()\n three_res_list.append(residue.get_resname())\n return three_to_one(three_res_list) # three_to_one function\n else:\n nucleic_acid_res = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'P':\n residue = atom.get_parent()\n nucleic_acid_res.append(residue.get_resname())\n nucleic_acid_seq = [x[2] for x in nucleic_acid_res]\n return \"\".join(nucleic_acid_seq)",
"def sequence_number(self):\n return self._annotations.get(EventData.PROP_SEQ_NUMBER, None)",
"def sequence (self):\n seq_av_at = \"%s:__seq__\" % (self.classkey)\n seq = r.incr (seq_av_at)\n return seq",
"def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur",
"def get_sequence(self, ID):\n try: \n record = self.database[ID]\n except KeyError:\n return '-1'\n sequence = record[\"sequence\"]\n return sequence",
"def seq(self):\n return self.__seq",
"def get_sequence_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetSequenceIndex', self.handle)",
"def seq(self):\n\t\tif self._record is not None:\n\t\t return self._record.seq\n\t\telse:\n\t\t return None",
"def match_seq_num(self):\n return self._get(\"match_seq_num\")",
"def sequence(self):\n return self[23]",
"def get_atomic_number(molecule, atom_index):\n return molecule.GetAtomAtomicNumber(atom_index)",
"def sequence_length(self):\n return self.get_sequence_length()",
"def sequence_length(self):\n return self._sequence_length",
"def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n",
"def nseqs(self):\n return libhts.faidx_nseq(self._fai)",
"def readSeq(self):\n # clear buffer in case of errors\n self.flushInput()\n\n if (self.model == 'TDS'):\n self.write('ACQuire:NUMACq?\\n')\n return int(self.readline())\n\n # clear buffer in case of errors\n self.flushInput()"
] | [
"0.7083817",
"0.6912228",
"0.63440174",
"0.6332537",
"0.62717706",
"0.62422633",
"0.6234725",
"0.6123495",
"0.61229354",
"0.6113672",
"0.6113672",
"0.60696846",
"0.6031649",
"0.6021344",
"0.6019921",
"0.5965752",
"0.59603554",
"0.59326804",
"0.59226",
"0.5900438",
"0.58660275",
"0.5848535",
"0.5783916",
"0.57827073",
"0.5750906",
"0.5727806",
"0.57013667",
"0.5695262",
"0.56811553",
"0.5637944"
] | 0.80430615 | 0 |
Gets the euclid distance from this atom to the given atom. Returns | def get_euclid_distance_to(self, atom):
return linalg.norm(self.get_coords() - atom.get_coords()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_distance(self) -> int:\n return self.get_measurement_data().distance",
"def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)",
"def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)",
"def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )",
"def distance(self):\n return self._distance",
"async def distance(self):\n return round(await self._rpc.distance(), 2)",
"def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"def dist(self, other: Coordinate) -> int:\n return abs(other.x - self.x) + abs(other.y - self.y)",
"def distance(self):\n return Distance(length_of(self.position.au))",
"def distance(self, x: int, y: int) -> float:\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)",
"def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab",
"def distance(self):\n return self.value * len(self.alignment.query)",
"def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))",
"def distance_to(self, circle):\n diff = tuple(map(sub, self.pos, circle.pos))\n return math.hypot(*diff)",
"def _euclid_distance(self, A, B, axis=1):\n return np.linalg.norm(A - B, axis=axis)",
"def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))",
"def get_distance(self, star):\n if self.centroid == star.centroid:\n print(\"distance for same star\")\n return 0\n\n unitary_a = self.get_unitary_vector()\n unitary_b = star.get_unitary_vector()\n dab = math.degrees(math.acos(unitary_a[0] * unitary_b[0] +\n unitary_a[1] * unitary_b[1] +\n unitary_a[2] * unitary_b[2]))\n return dab",
"def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])",
"def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))",
"def __compute_distance(self, x, centroid):\n \n diff = x - centroid\n return np.sqrt(np.dot(diff.T, diff))",
"def getElectricalDistance(self, neighborID):\n\n if not neighborID in self.Neighbors: # neighborID is not a neighbor\n return -1\n\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] == neighborID:\n break;\n\n return self.ElectricalDistanceToNeighbors[n]",
"def distance_from_origin(self) -> float:\n return self._distance_from_origin",
"def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d",
"def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))",
"def distorted_distance(self):\n return self._distance",
"def euclidean_distance(self, other_point):\n return sqrt((self.x - other_point.x)**2 + (self.y - other_point.y)**2)",
"def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance",
"def euclidDist(x1, y1, x2, y2):\n c = math.sqrt(((x2-x1)**2) + ((y2-y1)**2))\n\n return c",
"def distance_factor(self):\n return self._distancefactor"
] | [
"0.66581625",
"0.6649904",
"0.6637657",
"0.6630217",
"0.6601542",
"0.6598605",
"0.6596507",
"0.63966656",
"0.636073",
"0.63055176",
"0.62724113",
"0.62593937",
"0.6219049",
"0.62136334",
"0.62126756",
"0.6210324",
"0.61876845",
"0.61710095",
"0.616928",
"0.61606264",
"0.6159046",
"0.6155477",
"0.61517185",
"0.61355484",
"0.6128655",
"0.6096839",
"0.60678494",
"0.6027739",
"0.60135216",
"0.60121363"
] | 0.8762926 | 0 |
Create a PLaSM cuboid with a color an put it on this atom coords. | def plasm_cube(self, size=0.1, color=WHITE):
return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size]))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_cube(color=COLOR_WHITE):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glBegin(GL_QUADS)\n glColor4fv(color)\n drawVertexListCreateNormal([a, b, c, d])\n drawVertexListCreateNormal([b, f, g, c])\n drawVertexListCreateNormal([f, e, h, g])\n drawVertexListCreateNormal([e, a, d, h])\n drawVertexListCreateNormal([d, c, g, h])\n drawVertexListCreateNormal([a, e, f, b])\n glEnd()\n glPopMatrix()\n glEndList()\n return obj",
"def create_cube_solid(color=COLOR_WHITE):\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n try:\n glutSolidCube(1.0)\n except:\n if not _ERRS[3]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidCube\")\n _ERRS[3] = True\n glPopMatrix()\n glEndList()\n return obj",
"def cuboid(geometry,\n network,\n propname,\n **params):\n print('cuboid: nothing yet')",
"def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")",
"def __init__(self, color):\n self.id = Piece.PIECE_ID\n self.crowned = False\n self.color = color\n\n Piece.PIECE_ID += 1",
"def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Gu'",
"def __init__(self):\n self.size = 16\n self.color = COLOR\n self.pos = self.spawn()",
"def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ca'",
"def display(self, color = (190,205,205), add = False): \r\n s += pgl.Shape(pgl.FaceSet( [[0,0,0],[1,0,0],[1,1,0],[0,1,0]], [[0,1,2,3]]) , pgl.Material((0,100,0)))",
"def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')",
"def __init__(self, color, location):\n\n self._color = color\n self._piece_type = None\n self._location = location",
"def create_cube():\n new_cube = RubicsCube2x2()\n show_cube_console(new_cube)\n\n seed = [10, 9, 17, 14, 11, 8, 3, 2, 17, 3, 9, 7, 15, 4, 14, 14, 3, 3, \\\n 13, 7, 15, 9, 14, 13, 11, 17, 7, 10, 5, 16, 11, 5, 7, 10, 14, \\\n 7, 17, 7, 8, 6, 12, 3, 6, 1, 16, 12, 5, 13, 3, 4]\n for move in seed:\n new_cube.do_move(move)\n return new_cube",
"def __init__(self, color):\n self._color = color # Color redefined when placed\n # Palace coords\n self._d = ['d1','d2','d3','d8','d9','d10'] \n self._e = ['e1','e2','e3','e8','e9','e10']\n self._f = ['f1','f2','f3','f8','f9','f10']\n self._special = self._d + self._f + self._e\n self._corners = ['d1','f1','e2','d3','f3','d8','d10','f8','f10','e9']",
"def __init__(self, nickname, position, direction, color, object_hash = None):\n GameObject.__init__(\n self,\n nickname,\n position,\n direction,\n color = color,\n remote_object = True,\n object_hash = object_hash\n )",
"def make_cube(r, g, b):\n ny, nx = r.shape\n R = np.zeros([ny, nx, 3])\n R[:,:,0] = r\n G = np.zeros_like(R)\n G[:,:,1] = g\n B = np.zeros_like(R)\n B[:,:,2] = b\n\n RGB = R + G + B\n\n return R, G, B, RGB",
"def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box",
"def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box",
"def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'So'",
"def __init__( self, seed=(1, 0, 0) ):\n x, y, z = seed\n self._coords = matrix( [[x], [y], [z], [1.]], 'd' )",
"def make_soma(self, size, location):\n bpy.ops.mesh.primitive_uv_sphere_add(segments=8, ring_count=8, size=size, location=location)\n # Name object as cell\n bpy.context.object.name = self.id\n # Save referrence\n self.blender_obj = bpy.context.object",
"def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'El'",
"def paint_square(self, pos, color, cr):\n cr.set_source_rgb(*color)\n i, j = pos\n cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)\n cr.fill()",
"def __init__(self, x, y, x2, y2, x3, y3, color=(255, 255, 255, 255),\n batch=None, group=None):\n self._x = x\n self._y = y\n self._x2 = x2\n self._y2 = y2\n self._x3 = x3\n self._y3 = y3\n self._rotation = 0\n self._num_verts = 3\n\n r, g, b, *a = color\n self._rgba = r, g, b, a[0] if a else 255\n\n program = get_default_shader()\n self._batch = batch or Batch()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()",
"def place_cube(self,\n cube_xy,\n player=None,\n weight=1,\n azimuth=None,\n return_azimuth=False):\n\n self.color_idx += 1\n if self.color_idx == len(self.colors):\n self.color_idx = 0\n if azimuth is None:\n azimuth = np.random.randint(0, 180)\n else:\n assert azimuth >= 0 and azimuth <= 180\n cube_rot = self.p0.getQuaternionFromEuler([\n 0, 0, np.deg2rad(azimuth)\n ]) # rotated around which axis? # np.deg2rad(90)\n\n alpha = 1 # this could be set to .5 for some transparency\n\n if weight == 1:\n if player is None or self.four_colors:\n color = self.colors[self.color_idx] + [alpha]\n elif player == Player.Player:\n color = [0, 0, 1, 1]\n if DEBUG:\n print(\"Player putting down cube at\", cube_xy)\n elif player == Player.Enemy:\n color = [1, 0, 0, 1]\n if DEBUG:\n print(\"Opponent putting down cube at\", cube_xy)\n elif player == Player.Starter:\n color = [0, 0, 0, 1]\n if self.dark:\n color = [1, 1, 1, 1]\n if DEBUG:\n print(\"Starter cube at\", cube_xy)\n else:\n color = WEIGHT_COLORS[weight]\n\n max_z = self.find_highest_z(cube_xy, azimuth)\n\n cube_pos = [cube_xy[0], cube_xy[1], max_z + 1.0001]\n # print (\"placing cube at\",cube_pos)\n\n cube_visual = self.p0.createVisualShape(\n shapeType=self.p0.GEOM_BOX,\n rgbaColor=color,\n halfExtents=[1, 1, 1]\n # specularColor=[0.4, .4, 0],\n )\n\n cube = self.p0.createMultiBody(\n baseMass=weight,\n # baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=self.cube_collision,\n baseVisualShapeIndex=cube_visual,\n basePosition=cube_pos,\n baseOrientation=cube_rot,\n useMaximalCoordinates=True)\n\n self.cubes.append(cube)\n\n if max_z > self.current_max_z:\n self.current_max_z = np.around(max_z)\n out = True\n else:\n out = False\n\n if not return_azimuth:\n return out\n else:\n return out, azimuth",
"def put_object(self, surface, p, color):\n coords = self.transform_coordinates(p)\n if not self.in_display(coords):\n return\n pygame.draw.circle(surface,\n color,\n coords,\n int(p.radius / SCALE_FACTOR))",
"def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ge'\n self._in_check = False",
"def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ho'",
"def cube_colors(self, cubes):\n n = cubes.shape[0]\n col = np.zeros((n ** 3, 3))\n terrain_col = (66, 244, 72)\n empty_col = self.background\n for i in range(n):\n for j in range(n):\n for k in range(n):\n c = cubes[i, j, k]\n col[i * n ** 2 + j * n + k] = empty_col if c.state == 'empty' else terrain_col\n self.wireframe_col = col",
"def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)",
"def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ch'"
] | [
"0.6723132",
"0.6348297",
"0.60369486",
"0.5786166",
"0.57097834",
"0.5662898",
"0.56111765",
"0.5570667",
"0.5565409",
"0.5560575",
"0.55361265",
"0.5518773",
"0.5501009",
"0.5469816",
"0.54623425",
"0.54447734",
"0.54447734",
"0.5434046",
"0.54167914",
"0.53902054",
"0.53896713",
"0.5383581",
"0.53749996",
"0.53689116",
"0.5365708",
"0.53643614",
"0.5325049",
"0.5314483",
"0.5308681",
"0.5291437"
] | 0.77502567 | 0 |
Checks that the GsmModem in PDU mode accepts outgoing SMS, when the text is within ASCII chars 22 126. | def testSendSmsPduMode(self):
# setup expectation to raise a timeout error with prompt
err = errors.GsmReadTimeoutError(">")
when(self.mockDevice).read_lines().thenRaise(err).thenReturn(self.oklines)
self.gsm.send_sms("1234", "Test Message")
# must see command with size
verify(self.mockDevice, times=1).write("AT+CMGS=21\r")
# must see command with text and terminating char
verify(self.mockDevice, times=1).write("00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\x1a")
# allow any number of reads
verify(self.mockDevice, atleast=1).read_lines()
verifyNoMoreInteractions(self.mockDevice) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True",
"def testSendSmsPduModeError(self):\n\n # setup expectation to raise a non-timeout error with prompt\n when(self.mockDevice).read_lines().thenRaise(Exception(\"something other than timeout\"))\n self.gsm.send_sms(\"1234\", \"Test Message\")\n \n # must see command with size\n verify(self.mockDevice, times=1).write(\"AT+CMGS=21\\r\")\n # must see command to break out of command prompt\n verify(self.mockDevice, times=1).write(\"\\x1b\")\n # must NOT see command with text and terminating char\n verify(self.mockDevice, times=0).write(\"00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\\x1a\")\n # allow any number of reads\n verify(self.mockDevice, atleast=1).read_lines()\n verifyNoMoreInteractions(self.mockDevice)",
"def send_sms_via_modem(self, mobile, text=\"\"):\n\n mobile = self.sanitise_phone(mobile)\n\n # Add '+' before country code\n mobile = \"+\" + mobile\n\n try:\n self.modem.send_sms(mobile, text)\n return True\n except:\n return False",
"def astral(msg):\r\n return any(ord(c) > 0xFFFF for c in msg)",
"def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)",
"def isSpamSMS(textLine):\n\treturn re.sub(\"[\\^w]\", \" \", textLine).split()[0].lower() == \"spam\"",
"def isvalidport(txt):\n return txt.isdigit() and int(txt) <= 65535 and int(txt) >= 0",
"def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True",
"def verify_text(self, text):\n pass",
"def _text(self, fromwhom, number, text):\n\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(self._smsaddress, self._smspw)\n server.sendmail(str(fromwhom), '{}@vtext.com'.format(number),\n str(text))\n server.quit()",
"def send_sms(self, sms):\n pass",
"def check_ascii_compliance(plaintext: bytes) -> bool:\n return all(c < 128 for c in plaintext)",
"def test_get_sms_message(self):\n pass",
"def is_addressed_to_us(cls, msg):\n return msg.fields.get('to_addr') in cls.acceptable_to_numbers",
"def is_valid_msg(msg):\n for char in msg:\n if char not in string.ascii_letters and char not in string.punctuation and char != ' ':\n return False\n return True",
"def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True",
"def validate_message(self, message):\n\n for char in message:\n if ord(char) < 65 or ord(char) > 90:\n raise ValueError('Invalid message. Enigma Machine only supports messages composed of uppercase letters')",
"def sms():\n def send_sms(number, message):\n #get session bus\n try:\n session_bus = dbus.SessionBus()\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('Have a display you must'))\n return\n\n #check for kdeconnect\n try:\n devices_dbus_obj = session_bus.get_object('org.kde.kdeconnect','/modules/kdeconnect/devices')\n except dbus.exceptions.DBusException:\n click.echo(chalk.red('kdeconnect not installed it appears'))\n return\n\n #get devices ids\n devices_xml = devices_dbus_obj.Introspect(dbus_interface='org.freedesktop.DBus.Introspectable')\n devices_xml = ET.fromstring(devices_xml)\n nodes = devices_xml.findall('node')\n if(len(nodes) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n deviceIDs = list()\n for node in nodes:\n deviceIDs.append(node.get('name'))\n\n #get devices properties\n deviceID_Props = dict()\n for ID in deviceIDs:\n try:\n device = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + ID)\n deviceProps = device.GetAll('', dbus_interface='org.freedesktop.DBus.Properties')\n deviceID_Props[ID] = deviceProps\n except dbus.exceptions.DBusException:\n #don't create an entry in the dictionary if the object, or a GetAll method does not exist\n pass\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices there are not'))\n return\n\n #eliminate non sms devices\n devices_no_sms = list()\n for device in deviceID_Props:\n keeping = False\n for plugin in deviceID_Props[device]['supportedPlugins']:\n if('sms' in plugin):\n keeping = True\n if(not keeping):\n devices_no_sms.append(device)\n for device in devices_no_sms:\n del deviceID_Props[device]\n\n #if there are no devices that support sms\n if(len(deviceID_Props) is 0):\n click.echo(chalk.red('Devices that support sms there are not'))\n return\n #elif only one device was found that supports sms\n elif(len(deviceID_Props) is 1):\n click.echo(chalk.yellow('Device using: ' + str(list(deviceID_Props.values())[0]['name'])))\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + str(list(deviceID_Props.keys())[0]) + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n #otherwise get user to choose device\n else:\n choice_map = dict()\n for idx, device in enumerate(deviceID_Props, start=1):\n click.echo(chalk.green(str(idx) + ': ' + deviceID_Props[device]['name']))\n choice_map[str(idx)] = device\n choice = click.prompt(chalk.blue('Device, you must select: '), default='1', type=click.Choice(choice_map.keys()))\n #click.echo('you chose: ' + choice_map[the_chosen_device] + ' with id: ' + deviceNames_IDs[choice_map[the_chosen_device]])\n sendMessage = session_bus.get_object('org.kde.kdeconnect', '/modules/kdeconnect/devices/' + choice_map[choice] + '/sms')\n sendMessage.sendSms(number, message, dbus_interface='org.kde.kdeconnect.device.sms')\n return\n\n click.echo(chalk.blue('For whom you want to send an sms'))\n friend_name = input().strip()\n friend_name_lower = friend_name.lower()\n if os.path.isfile(PEOPLE_CONFIG_FILE_PATH):\n with open(PEOPLE_CONFIG_FILE_PATH) as fin:\n contents = yaml.load(fin)\n entries = contents['entries']\n for entry in entries:\n if(friend_name == entry['name'] or friend_name_lower == entry['name']):\n number = entry['mobile']\n break\n if('number' not in locals()):\n click.echo(chalk.red('Friend not found.'))\n else:\n if(len(number) is not 0):\n click.echo(chalk.blue('Message, you must enter: '))\n message = input(':')\n click.echo(chalk.yellow('Device to send sms to ' + number + ' looking for: '))\n send_sms(number, message)\n else:\n click.echo(chalk.red('Friends number not in people file, run `yoda people setup` to add it.'))\n else:\n click.echo(chalk.red('The People file does not exist, run `yoda people setup` to create an entry.'))",
"def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char in s:\n if re.compile('[a-zA-Z0-9]+').match(char) == None: return 0\n return 1",
"def send_text(msg, up):\n try:\n client = TwilioRestClient(account=TWILIO_ACCOUNT_SID,\n token=TWILIO_AUTH_TOKEN)\n c = client.sms.messages.create(to=up.phone,\n from_=WATTTIME_PHONE,\n body=msg.msg)\n TwilioSMSEvent(user=up.user,\n msg_type=msg.msg_type,\n to_number=up.phone,\n from_number=WATTTIME_PHONE,\n body=msg.msg).save()\n\n debug(\"texted '{}' to {}\".format(msg, str(up.name)))\n return True\n except:\n print (\"Faild message\", up.phone, WATTTIME_PHONE, msg.msg)\n debug(\"failed to text '{}' to {}\".format(msg, str(up.name)))\n return False",
"def isValid(text):\n return bool(re.search(r'\\R2D2\\b', text, re.IGNORECASE))",
"def parseable(message_data):\n if not message_data:\n raise TypeError('message_data must not be None')\n\n acceptable = range(97, 123) + range(65, 91) + range(48, 58) + range(33, 43) + range(44, 48) + [58, 63, 64, 94]\n return any(ord(c) not in acceptable for c in message_data['message'].replace(' ', ''))",
"def check_pass(text):\r\n\r\n upperRegex = re.compile(r'[A-Z]')\r\n lowerRegex = re.compile(r'[a-z]')\r\n lengthRegex = re.compile(r'.{8,}')\r\n digitRegex = re.compile(r'\\d')\r\n\r\n if not upperRegex.search(text):\r\n return False\r\n elif not lowerRegex.search(text):\r\n return False\r\n elif not lengthRegex.search(text):\r\n return False\r\n elif not digitRegex.search(text):\r\n return False\r\n else:\r\n return True",
"def test_smsmessage_user():",
"def isScintillator(string, pos):\n return ((pos == 65 or pos == 66) and\n (string == 12 or string == 62))",
"def sms_code(string: str) -> Union[str, None]:\n m = p.search(string.strip())\n if m: m = m.group().replace('-', '').replace('sms:', '')\n return m",
"def send_sms(self, message, to=CONTACT_NUMBER):\n try:\n pbx_alarm = PBXAlert()\n pbx_alarm.send_sms(self.tinfo['message'])\n if self.verbose:\n print(\"{} Successfully sent SMS!\".format(Timer.OK))\n return True\n except Exception as e:\n print(\"{} Caught exception in send_sms: {}\".format(Timer.FAIL, e))\n return False",
"def _msim_message_test(\n self,\n ad_mo,\n ad_mt,\n mo_sub_id,\n mt_sub_id, msg=\"SMS\",\n max_wait_time=MAX_WAIT_TIME_SMS_RECEIVE,\n expected_result=True):\n\n if msg == \"SMS\":\n for length in self.message_lengths:\n message_array = [rand_ascii_str(length)]\n if not sms_send_receive_verify_for_subscription(\n self.log,\n ad_mo,\n ad_mt,\n mo_sub_id,\n mt_sub_id,\n message_array,\n max_wait_time):\n ad_mo.log.warning(\n \"%s of length %s test failed\", msg, length)\n return False\n else:\n ad_mo.log.info(\n \"%s of length %s test succeeded\", msg, length)\n self.log.info(\"%s test of length %s characters succeeded.\",\n msg, self.message_lengths)\n\n elif msg == \"MMS\":\n for length in self.message_lengths:\n message_array = [(\"Test Message\", rand_ascii_str(length), None)]\n\n if not mms_send_receive_verify(\n self.log,\n ad_mo,\n ad_mt,\n message_array,\n max_wait_time,\n expected_result):\n self.log.warning(\"%s of body length %s test failed\",\n msg, length)\n return False\n else:\n self.log.info(\n \"%s of body length %s test succeeded\", msg, length)\n self.log.info(\"%s test of body lengths %s succeeded\",\n msg, self.message_lengths)\n return True",
"def isValid(text):\n return bool(re.search(r\"\\b((close|activate)\\ (check|tunnel|ubuntu|fedora|windows))\\b\", text, re.IGNORECASE))",
"def validate_seq(sequence):\n sequence = sequence.strip()\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('^[ACTGNRYSWKMBDHVEFILPQSXZ]*$', re.I)\n if regex.search(sequence) is not None:\n return True\n else:\n return False"
] | [
"0.7446568",
"0.6175791",
"0.59026414",
"0.5805947",
"0.56742376",
"0.5664919",
"0.55345494",
"0.5473586",
"0.5392989",
"0.5327824",
"0.53224766",
"0.53210604",
"0.5278678",
"0.52711344",
"0.5263743",
"0.52219886",
"0.52177036",
"0.51783454",
"0.5167334",
"0.51671714",
"0.516648",
"0.51610374",
"0.5158195",
"0.51408297",
"0.5066006",
"0.50655586",
"0.50389147",
"0.5034926",
"0.5024629",
"0.501616"
] | 0.6486989 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.