query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Checks if the cell has valid geometry. There are at least two reasons why a cell might have invalid | def validCellGeometry(self, ijk = None , global_index = None , active_index = None):
gi = self.__global_index( global_index = global_index , ijk = ijk , active_index = active_index)
return self._valid_cell( gi ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkGeom(geodataframe):\n for geometry in geodataframe.geometry:\n if explain_validity(geometry) != 'Valid Geometry':\n print(explain_validity(geometry))",
"def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False",
"def ensure_valid_data():\n cursor = connection.cursor()\n cursor.execute(\"SELECT id, name, st_area(geom) FROM firestation_firedepartment where st_area(geom)>6.99\")\n messages = []\n\n for id, name, area in cursor.fetchall():\n messages.append('{0} ({1}) has an area of {2}.'.format(name, id, area))\n\n if messages:\n mail_admins('Invalid Geometries Detected', message='\\n'.join(messages))\n\n cursor.execute(\"SELECT COUNT(*) FROM genericm2m_relatedobject;\")\n generic_count = cursor.fetchone()\n\n if generic_count[0] < 2940:\n generic_count_message = \"Related government units has dropped below 2,940.\"\n mail_admins('Low number of government units alert.', message=generic_count_message)",
"def is_valid(geometry, **kwargs):\n # GEOS is valid will emit warnings for invalid geometries. Suppress them.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n result = lib.is_valid(geometry, **kwargs)\n return result",
"def is_valid_input(geometry, **kwargs):\n return lib.is_valid_input(geometry, **kwargs)",
"def valid(self):\r\n if self.file_exists and len(self.missing_columns) == 0 and len(self.veg_columns) > 0 and \\\r\n len(self.lat_errors) == 0 and len(self.lon_errors) == 0 and len(self.time_errors) == 0 and len(self.date_errors) == 0:\r\n return True\r\n else:\r\n return False",
"def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid",
"def _is_valid(self):\n for cell in self._cells_iterable():\n if cell not in self._valid_elements:\n return False\n return True",
"def is_valid_reason(geometry, **kwargs):\n return lib.is_valid_reason(geometry, **kwargs)",
"def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()",
"def test_cover_geometry_empty_geoms(tiler):\n assert not cover_geometry(tiler, geometry.Point(), 0) == True\n assert not cover_geometry(tiler, geometry.MultiPoint(), 0) == True\n assert not cover_geometry(tiler, geometry.LineString(), 0) == True\n assert not cover_geometry(tiler, geometry.MultiLineString(), 0) == True\n assert not cover_geometry(tiler, geometry.Polygon(), 0) == True\n assert not cover_geometry(tiler, geometry.MultiPolygon(), 0) == True\n assert not cover_geometry(tiler, geometry.GeometryCollection(), 0) == True",
"def verify_cell_details(app, style, color, cell):\n error = ''\n found_cell = find_cell(app, cell)\n if found_cell is None:\n return 'Cell not found. may be invalid column and row in block or cell'\n # verify state: raised or normal\n if found_cell.get_style() != style:\n return f'Cell invalid style: expected {style}, actual: {found_cell.get_style()}'\n if color is not None and found_cell.get_cell_color() != color:\n return f'Cell not expected color: expected: {color}, actual: {found_cell.get_cell_color()}'\n\n return error",
"def verify_geometry(data):\n lon, lat, alt = None, None, None\n properties = data['properties']\n delete = []\n try:\n for p, value in properties.items():\n if p.lower().strip() == 'longitude' or p.lower().strip() == 'lon' or p.lower().strip() == 'lng' or p.lower().strip() == 'long':\n lon = value\n delete.append(p)\n elif p.lower().strip() == 'latitude' or p.lower().strip() == 'lat': \n lat = value\n delete.append(p)\n elif p.lower().strip() == 'altitude' or p.lower().strip() == 'alt': \n alt = value\n delete.append(p) \n if lon is not None and lat is not None:\n if data['geometry'] is None: ## this retains geometry if it exists, is that ok?\n data['geometry'] = {'type': \"Point\", 'coordinates': [float(lon), float(lat), float(alt) if alt is not None else None]}\n for p in delete:\n del properties[p]\n data['properties'] = properties \n except Exception as e:\n log.error(\"Error parsing coordinates: %s\" % log.exc(e))\n return data",
"def test_051128_invalid(self):\n spc = parser(get_file('PTSDY1_biggeom2.txt'))\n # spc.draw_outlooks()\n spc.sql(self.txn)\n outlook = spc.get_outlook('WIND', 'SIGN', 1)\n self.assertTrue(outlook.geometry.is_empty)\n self.assertEquals(len(spc.warnings), 2, \"\\n\".join(spc.warnings))",
"def _checkSettings(self):\n geomsThatNeedMeshSize = (\"1D slab\", \"1D cylinder\")\n if self.geometry in geomsThatNeedMeshSize:\n if self.meshSubdivisionsPerCm is None:\n raise ValueError(\n \"{} geometry requires `mesh points per cm` to be defined in cross sections.\".format(\n self.geometry\n )\n )\n if self.criticalBuckling != False:\n raise ValueError(\n \"{} geometry cannot model critical buckling. Please disable\".format(\n self.geometry\n )\n )",
"def check(self):\n gAsset = cmds.ls(type='gAsset')\n\n render_geo = []\n if gAsset:\n trans = cmds.listRelatives(gAsset[0], p=True, f=True)\n meshes = cmds.listRelatives(trans, ad=True, type='mesh', f=True)\n if meshes:\n render_geo.extend(meshes)\n # for item in meshes:\n # trans = cmds.listRelatives(item, p=True, f=True)\n # render_geo.extend(trans)\n\n if not pm.ls(\"*.grid_renderGeo\"):\n self.status = self.errorMode\n self.addError(\"No geometry's are tagged as render geo\")\n self.errorMessage = \"No geometry is tagged as render geo\"\n elif not len(set(cmds.ls(\"*.grid_renderGeo\"))) == len(render_geo):\n self.status = self.errorMode\n self.addError(\"Not all Geo tags under gasset\")\n self.errorMessage = \"Not all Geo tags under gasset\"\n else:\n self.status = \"OK\"\n else:\n self.addError(\"No Gasset found\")\n self.errorMessage = \"No gasset found\"",
"def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False",
"def test_cover_geometry_nonshapely_geom(tiler):\n with pytest.raises(ValueError):\n for tile in cover_geometry(tiler, None, 0):\n pass",
"def _validate(self):\n if self.region_type not in regions_attributes:\n raise ValueError(\"'{}' is not a valid region type in this package\"\n .format(self.region_type))\n\n if self.coordsys not in valid_coordsys['DS9'] + valid_coordsys['CRTF']:\n raise ValueError(\"'{}' is not a valid coordinate reference frame \"\n \"in astropy\".format(self.coordsys))",
"def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height",
"def test_polygon_with_duplicate_nodes_is_valid():\n geom = query_row(db_conf, 'osm_landusages', 30005)['geometry']\n assert geom.is_valid\n assert len(geom.exterior.coords) == 4",
"def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True",
"def has_geom(self):\n return bool(self.give_geom())",
"def is_legal(self):\n if not self._is_valid():\n return False\n\n if not self._check_all(self.get_rows()):\n return False\n\n if not self._check_all(self.get_cols()):\n return False\n\n if not self._check_all(self.get_blocks()):\n return False\n\n return True",
"def _validate(self) -> None:\n for box in self.boxes:\n if any(box[0] == s[0] and box[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Box should not be inside wall.')\n if box[0] == self.current_location[0] and box[1] == self.current_location[1]:\n raise RuntimeError('In illegal state. Box should not be inside player.')\n if any(self.current_location[0] == s[0] and self.current_location[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Player should not be inside wall.')",
"def isValid(self):\n for ir in range(self.nRow): # Check rows for duplicates\n row = ir + 1\n vals = {}\n for ic in range(self.nCol):\n col = ic + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"doing row {row} at col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n \n for ic in range(self.nCol): # Check cols for duplicates\n col = ic + 1\n vals = {}\n for ir in range(self.nRow):\n row = ir + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"at row={row} doing col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n return True",
"def is_valid_geometry(self, value: List) -> bool:\n\n def check_geom(geom):\n if isinstance(geom, (Point, MultiPoint)):\n out = 'point' in self._permitted_geometries\n if not out:\n logger.error('Not allowed point type geometry components')\n return out\n elif isinstance(geom, (LineString, MultiLineString)):\n out = 'line' in self._permitted_geometries\n if not out:\n logger.error('Not allowed line type geometry components')\n return out\n elif isinstance(geom, (Polygon, MultiPolygon)):\n out = 'polygon' in self._permitted_geometries\n if not out:\n logger.error('Not allowed polygon type geometry components')\n return out\n elif isinstance(geom, GeometryCollection):\n out = True\n for entry in geom.geometries:\n out &= check_geom(entry)\n return out\n else:\n raise TypeError('Got unexpected geometry type `{}`'.format(type(geom)))\n\n if self._permitted_geometries is None or value is None:\n return True\n\n if isinstance(value, str):\n return value.lower().strip() in self._permitted_geometries\n if not isinstance(value, Geometry):\n raise TypeError('Got unexpected geometry type `{}`'.format(type(value)))\n return check_geom(value)",
"def test_generalized_banana_polygon_is_valid():\n park = query_row(db_conf, 'osm_landusages', 7101)\n # geometry is not valid\n assert not park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen0', 7101)\n # but simplified geometies are valid\n assert park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen1', 7101)\n assert park['geometry'].is_valid, park",
"def init_grid_geometry(self):\n self.vc = self.grid.cells_center() # circumcenters\n self.ec = self.grid.edges_center()\n \n self.c1 = self.grid.edges['cells'][:,0]\n self.c2 = self.grid.edges['cells'][:,1]\n\n # distance between cell centers\n self.d_j = utils.mag( self.vc[self.c1] - self.vc[self.c2] )\n bdry=self.c2<0\n # grid has a few places where vc is coincident with outer boundary, thanks\n # to janet\n self.d_j[bdry] = 2*utils.mag( self.vc[self.c1[bdry]] - self.ec[bdry] ).clip(self.d_j_min,np.inf)\n self.l_j = self.grid.edges_length()\n\n self.normal_j = self.grid.edges_normals()\n self.area_c = self.grid.cells_area()\n\n self.K_j = 100*np.ones(self.grid.Nedges())\n\n j_valid=~self.grid.edges['deleted']\n\n print(\"Checking finite geometry\")\n assert np.all( np.isfinite(self.d_j[j_valid]))\n assert np.all( np.isfinite(self.l_j[j_valid]))\n assert np.all( np.isfinite(self.area_c))\n assert np.all( np.isfinite(self.normal_j[j_valid]))\n assert np.all( self.d_j[j_valid] > 0 )\n assert np.all( self.l_j[j_valid] > 0 )\n assert np.all( self.area_c > 0 )",
"def is_valid(self) -> bool:\n return \\\n (self.spatial is None or all([v(self.spatial)\n for v, _ in self.spatial_validations])) \\\n and \\\n (self.temporal is None or all([v(self.temporal)\n for v, _ in self.temporal_validations]))"
]
| [
"0.7485371",
"0.68256056",
"0.6738727",
"0.6689731",
"0.6644501",
"0.65865844",
"0.6509728",
"0.6504427",
"0.64611405",
"0.643762",
"0.6320081",
"0.6297482",
"0.62604874",
"0.62329763",
"0.61944276",
"0.61709315",
"0.6167919",
"0.6162139",
"0.6156477",
"0.61471033",
"0.6139188",
"0.61368924",
"0.6121156",
"0.6115455",
"0.611294",
"0.61002403",
"0.60927236",
"0.600251",
"0.59693867",
"0.5963741"
]
| 0.6928083 | 1 |
Lookup global index based on ijk or active index. | def get_global_index( self , ijk = None , active_index = None):
gi = self.__global_index( active_index = active_index , ijk = ijk)
return gi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )",
"def get_active_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_index1( gi)",
"def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index",
"def global_index(self):\n raise NotImplementedError",
"def get_active_fracture_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_fracture_index1( gi )",
"def get_ijk( self, active_index = None , global_index = None):\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n\n gi = self.__global_index( active_index = active_index , global_index = global_index)\n self._get_ijk1( gi , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k))\n\n return (i.value , j.value , k.value)",
"def get_index(self, name):\n for index in self.indexes:\n if index.name == name:\n return index\n return None",
"def local_to_global(local_index):\n return global_index.value.get(tokens.value[local_index], -1)",
"def active( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n active_index = self._get_active_index1( gi)\n if active_index >= 0:\n return True\n else:\n return False",
"def create_global_index(self):\n\n LOGGER.debug(\"Indexing ...\")\n errors = []\n err = self.index_team_keys()\n if err is not None:\n errors.append(err)\n\n err = self.index_player_keys()\n if err is not None:\n errors.append(err)\n\n err_len = len(errors)\n\n if err_len == 0:\n LOGGER.debug(\"Indexing finished.\")\n return None\n elif err_len > 0:\n LOGGER.error(f\"Indexing finished with {err_len} error(s).\")\n return errors",
"def lookup(index,keyword):\n\tif keyword in index:\n\t\treturn index[keyword]\n\treturn None",
"def get_default_index(session_key):\n app_config = get_app_config(session_key)\n\n if app_config is None:\n return \"default\"\n else:\n return app_config.index",
"def get_lookup(self, cell_status, num_neighbors):\n return self.lookup[cell_status,num_neighbors]",
"def get_index(self, _quals):\n return self._options['index']",
"def cloud_index():\n import alltheitems.cloud\n return alltheitems.cloud.index()",
"def _index_lookup(self, key: int) -> str:\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token",
"def get_global_index1F( self , active_fracture_index):\n return self._get_global_index1F( active_fracture_index )",
"def get_jnt_index(self, jnt_name):\n return self.wrapper.get_jnt_index(self.instance, jnt_name.encode('utf-8'))",
"def internal_location_indexer(self):\n return self.indexer_by_pid_type(INTERNAL_LOCATION_PID_TYPE)",
"def get_index(self, *args, **dargs):\n pass",
"def find_index(self, obj):\n return self.model.indexlist[obj]",
"def _get_index(passed_id=None):\r\n if passed_id:\r\n index_matcher = re.search(r'.*?/?(\\d+)$', passed_id)\r\n if index_matcher:\r\n return int(index_matcher.group(1))\r\n\r\n # return 0 if no index found\r\n return 0",
"def global_start_index(self):\n return self._global_start_index",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def use_local_search(self) -> bool:\n return self.__use_local_search",
"def fetchindexed(ad):\n\n # Add the macro to the list of recognized macros.\n ad.AddMacro('.fetchindexed', 3, [ ['','symbol'] ]);\n\n # Define the macro functionality.\n def emitFunction(ad,fp,argument):\n (addr,ixBank,bankName) = ad.Emit_GetAddrAndBank(argument[0]);\n ad.EmitPush(fp,addr,ad.Emit_String(argument[0]['value']),argument[0]['loc']);\n ad.EmitOpcode(fp,ad.InstructionOpcode('+'),'+');\n ad.EmitOpcode(fp,ad.specialInstructions['fetch'] | ixBank,'fetch '+bankName);\n\n ad.EmitFunction['.fetchindexed'] = emitFunction;",
"def _get_ea_index():\n ea_index_temp = {'Address': 5, 'Agency': 10, 'City': 4, 'Country': 3,\n 'Datacenter': 7, 'Division': 8, 'Interface Name': 13,\n 'Region_List': 2, 'Requester Email': 9, 'Site': 6,\n 'VLAN Description': 11, 'IPR Designation': 16}\n return ea_index_temp",
"def dummy_search(query):\n ii = InvertedIndex()\n return ii.lookup_query(query)",
"def _index(self):\n annotations = IAnnotations(self.portal)\n # create the error reference storage\n if annotations.get(INDEX_KEY) is None:\n annotations[INDEX_KEY] = OOBTree()\n return annotations[INDEX_KEY]",
"def cell_regular(self, active_index = None , global_index = None , ijk = None):\n gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)\n return self._cell_regular( gi )"
]
| [
"0.81544656",
"0.78202045",
"0.73211944",
"0.705059",
"0.64130604",
"0.6121929",
"0.59489614",
"0.5895438",
"0.58456194",
"0.5773087",
"0.56557363",
"0.5576186",
"0.5565344",
"0.55652833",
"0.552736",
"0.5520127",
"0.5494459",
"0.5486436",
"0.54677886",
"0.54393417",
"0.5429441",
"0.54156405",
"0.5369826",
"0.53559846",
"0.5340176",
"0.5332741",
"0.53267735",
"0.53224933",
"0.52994204",
"0.52929866"
]
| 0.8171045 | 0 |
Will return the (x,y,z) for the node given by (i,j,k). Observe that this method does not consider cells, but the nodes in the grid. This means that the valid input range for i,j and k are are upper end inclusive. To get the four | def getNodePos(self , i , j , k):
if not 0 <= i <= self.getNX():
raise IndexError("Invalid I value:%d - valid range: [0,%d]" % (i , self.getNX()))
if not 0 <= j <= self.getNY():
raise IndexError("Invalid J value:%d - valid range: [0,%d]" % (j , self.getNY()))
if not 0 <= k <= self.getNZ():
raise IndexError("Invalid K value:%d - valid range: [0,%d]" % (k , self.getNZ()))
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_corner_xyz( i,j,k , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z))
return (x.value , y.value , z.value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getNodeXYZ(self , i,j,k):\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n\n corner = 0\n \n if i == nx:\n i -= 1\n corner += 1\n\n if j == ny:\n j -= 1\n corner += 2\n\n if k == nz:\n k -= 1\n corner += 4\n\n if self._ijk_valid( i , j , k):\n return self.get_corner_xyz( corner , global_index = i + j*nx + k*nx*ny )\n else:\n raise IndexError(\"Invalid coordinates: (%d,%d,%d) \" % (i,j,k))",
"def graph_3d_grid(xyz, k=18):\n if np.size(xyz) == 0:\n return None\n lxyz = xyz - xyz.min(0)\n m = 3 * lxyz.max(0).sum() + 2\n\n # six neighbours\n n6 = [np.array([1, m, m ** 2]), np.array([m ** 2, 1, m]),\n np.array([m, m ** 2, 1])]\n\n # eighteen neighbours\n n18 = [np.array([1 + m, 1 - m, m ** 2]),\n np.array([1 + m, m - 1, m ** 2]),\n np.array([m ** 2, 1 + m, 1 - m]),\n np.array([m ** 2, 1 + m, m - 1]),\n np.array([1 - m, m ** 2, 1 + m]),\n np.array([m - 1, m ** 2, 1 + m])]\n\n # twenty-six neighbours\n n26 = [np.array([1 + m + m ** 2, 1 - m, 1 - m ** 2]),\n np.array([1 + m + m ** 2, m - 1, 1 - m ** 2]),\n np.array([1 + m + m ** 2, 1 - m, m ** 2 - 1]),\n np.array([1 + m + m ** 2, m - 1, m ** 2 - 1])]\n\n # compute the edges in each possible direction\n def create_edges(lxyz, nn, l1dist=1, left=np.array([]), right=np.array([]),\n weights=np.array([])):\n q = 0\n for nn_row in nn:\n v1 = np.dot(lxyz, nn_row)\n o1 = np.argsort(v1)\n sv1 = v1[o1]\n nz = np.squeeze(np.nonzero(sv1[: - 1] - sv1[1:] == - l1dist))\n o1z, o1z1 = o1[nz], o1[nz + 1]\n left = np.hstack((left, o1z, o1z1))\n right = np.hstack((right, o1z1, o1z))\n q += 2 * np.size(nz)\n weights = np.hstack((weights, np.sqrt(l1dist) * np.ones(q)))\n return left, right, weights\n\n i, j, d = create_edges(lxyz, n6, 1.)\n if k >= 18:\n i, j, d = create_edges(lxyz, n18, 2, i, j, d)\n if k == 26:\n i, j, d = create_edges(lxyz, n26, 3, i, j, d)\n i, j = i.astype(np.int_), j.astype(np.int_)\n\n # reorder the edges to have a more standard order\n order = np.argsort(i + j * (len(i) + 1))\n i, j, d = i[order], j[order], d[order]\n return i, j, d",
"def findCellCornerXY(self , x, y , k):\n i,j = self.findCellXY(x,y,k)\n if k == self.getNZ():\n k -= 1\n corner_shift = 4\n else:\n corner_shift = 0\n \n nx = self.getNX()\n x0,y0,z0 = self.getCellCorner( corner_shift , ijk = (i,j,k))\n d0 = math.sqrt( (x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))\n c0 = i + j*(nx + 1)\n\n x1,y1,z1 = self.getCellCorner( 1 + corner_shift , ijk = (i,j,k))\n d1 = math.sqrt( (x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))\n c1 = i + 1 + j*(nx + 1)\n\n x2,y2,z2 = self.getCellCorner( 2 + corner_shift , ijk = (i,j,k))\n d2 = math.sqrt( (x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))\n c2 = i + (j + 1)*(nx + 1)\n\n x3,y3,z3 = self.getCellCorner( 3 + corner_shift , ijk = (i,j,k))\n d3 = math.sqrt( (x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))\n c3 = i + 1 + (j + 1)*(nx + 1)\n\n l = [(d0 , c0) , (d1,c1) , (d2 , c2) , (d3,c3)]\n l.sort( EclGrid.d_cmp )\n return l[0][1]",
"def get_coord(path, n_i, n_k):\n fnm = \"%s/coord_mpi%02i%02i.nc\" % (path, n_i, n_k)\n fnc = netcdf.netcdf_file(fnm, 'r')\n x = fnc.variables['x'][:, :]\n z = fnc.variables['z'][:, :]\n return x, z",
"def _get_coordinates(x,y,z):\n\t\ttemp = Board.board\n\t\ttemp1 = temp=='M'\n\t\tfor i in range(6,x):\n\t\t\tfor j in range(y,z):\n\t\t\t\tif(temp1[i][j]==True):\n\t\t\t\t\tcurrent_x = i\n\t\t\t\t\tcurrent_y = j\n\n\t\treturn current_x,current_y",
"def find_cell( self , x , y , z , start_ijk = None):\n\n if start_ijk:\n start_index = self.__global_index( ijk = start_ijk )\n else:\n start_index = 0\n global_index = self._get_ijk_xyz( x , y , z , start_index)\n if global_index >= 0:\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n self._get_ijk1( global_index , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k)) \n return (i.value , j.value , k.value)\n else:\n return None",
"def findCellXY(self , x, y , k):\n if 0 <= k <= self.getNZ():\n i = ctypes.c_int()\n j = ctypes.c_int()\n ok = self._get_ij_xy( x,y,k , ctypes.byref(i) , ctypes.byref(j))\n if ok:\n return (i.value , j.value)\n else:\n raise ValueError(\"Could not find the point:(%g,%g) in layer:%d\" % (x,y,k))\n else:\n raise IndexError(\"Invalid layer value:%d\" % k)",
"def four_neighbors(self, row, col):\n ans = []\n if row > 0:\n ans.append((row - 1, col))\n if row < self._grid_height - 1:\n ans.append((row + 1, col))\n if col > 0:\n ans.append((row, col - 1))\n if col < self._grid_width - 1:\n ans.append((row, col + 1))\n return ans",
"def getNode_xyz(self, nodeIdx):\n\t\tx = self.nodeDictList[nodeIdx]['x']\n\t\ty = self.nodeDictList[nodeIdx]['y']\n\t\tz = self.nodeDictList[nodeIdx]['z']\n\t\treturn (x,y,z)",
"def neighbor_list(i, j, k, nx):\n left_center = (i-1, j, k)\n right_center = (i+1, j, k)\n top_center = (i, j+1, k)\n bottom_center = (i, j-1, k)\n left_up = (i, j, k + 1)\n left_down = (i, j, k -1)\n return np.mod([left_center, right_center, top_center, bottom_center, left_up, left_down], nx)",
"def fetch_gridcell(x, y, z, vtk_volume):\n s = (vtk_volume.GetScalarComponentAsFloat(x, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y+1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y+1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y, z+1, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y, z+1, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y+1, z+1, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y+1, z+1, 0))\n return s",
"def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out",
"def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)",
"def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)",
"def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)",
"def from_3d_grid(self, xyz, k=18):\n if xyz.shape[0] != self.V:\n raise ValueError('xyz should have shape n * 3, with n = self.V')\n\n if xyz.shape[1] != 3:\n raise ValueError('xyz should have shape n * 3')\n\n graph = graph_3d_grid(xyz, k)\n if graph is not None:\n i, j, d = graph\n else:\n raise TypeError('Creating graph from grid failed. '\\\n 'Maybe the grid is too big')\n self.E = np.size(i)\n self.edges = np.zeros((self.E, 2), np.int_)\n self.edges[:, 0] = i\n self.edges[:, 1] = j\n self.weights = np.array(d)\n return self.E",
"def idx_to_grid(n):\n\n x = n % MAX_Y\n y = int(n / MAX_X)\n return(x, y)",
"def get_neighbors(n):\n if n < 3:\n return ValueError(\"Integer must be greater than 3.\")\n p = generate()\n q = []\n l = 0\n g = 0\n while g <= n:\n q = next(p)\n g = q[-1]\n if q[-1] == n:\n l = q[0][-2]\n q = next(p)\n g = q[-1]\n elif q[-1] > n:\n l = q[0][-3]\n return l, g",
"def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg",
"def get_xy_grid(nx, ny):\n\tfor n in [nx, ny]:\n\t\tif not isodd(n):\n\t\t\traise Exception(\"[get_xy_grid] only accept odd number\")\n\n\tx, y = np.mgrid[-(nx-1)/2:(nx+1)/2, -(ny-1)/2:(ny+1)/2]\n\n\treturn x, y",
"def grid_value( self , kw , i , j , k):\n return self._grid_value( kw , i , j , k)",
"def index ( self, x, y, z ):\n if x < self.x_min or x > self.x_max:\n raise ValueError(\"x-value out of range %f (%f, %f)\" % \\\n (x,self.x_min,self.x_max))\n if y < self.y_min or y > self.y_max:\n raise ValueError(\"y-value out of range %f (%f, %f)\" % \\\n (y,self.y_min,self.y_max))\n if z < self.z_min or z > self.z_max:\n raise ValueError(\"z-value out of range %f (%f, %f)\" % \\\n (z,self.z_min,self.z_max))\n xi = int(round((x-self.x_min)/self.increment3D[0]))\n yi = int(round((y-self.y_min)/self.increment3D[1]))\n zi = int(round((z-self.z_min)/self.increment3D[2]))\n return xi, yi, zi",
"def getNeighborNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(x+1, y+1, z+1), (x+1, y, z+1), (x+1, y-1, z+1),\n (x, y+1, z+1), (x, y, z+1), (x, y-1, z+1),\n (x-1, y+1, z+1), (x-1, y, z+1), (x-1, y-1, z+1),\n (x+1, y+1, z-1), (x+1, y, z-1), (x+1, y-1, z-1),\n (x, y+1, z-1), (x, y, z-1), (x, y-1, z-1),\n (x-1, y+1, z-1), (x-1, y, z-1), (x-1, y-1, z-1),\n (x+1, y+1, z), (x+1, y, z), (x+1, y-1, z),\n (x, y+1, z), (x, y, z), (x, y-1, z),\n (x-1, y+1, z), (x-1, y, z), (x-1, y-1, z)]",
"def get_cell_edge_coordinates(self):\n import numpy as np\n x1, x2, x3 = np.ix_(*self.cell_edge_coordinates)\n if self.geometry == 'cartesian':\n x, y, z = x1, x2, x3\n elif self.geometry == 'spherical':\n x = x1 * np.sin(x2) * np.cos(x3)\n y = x1 * np.sin(x2) * np.sin(x3)\n z = x1 * np.cos(x2)\n return x, y, z",
"def coord (i, j):\r\n return j, i",
"def fetch_voxel_neighbors(x, y, z, vtk_volume):\n s = (vtk_volume.GetScalarComponentAsFloat(x-1, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y-1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y+1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y, z-1, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y, z+1, 0))\n return s",
"def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]",
"def fn(i, j):\n if i == 0 and j == 0: return grid[0][0], grid[0][0]\n if i < 0 or j < 0: return -inf, inf\n if grid[i][j] == 0: return 0, 0\n mx1, mn1 = fn(i-1, j) # from top\n mx2, mn2 = fn(i, j-1) # from left \n mx, mn = max(mx1, mx2)*grid[i][j], min(mn1, mn2)*grid[i][j]\n return (mx, mn) if grid[i][j] > 0 else (mn, mx)",
"def get_grid(self, mol_data):\n\n if self.grid_info is None:\n\n try:\n\n x = mol_data['grid_points/x'][()]\n y = mol_data['grid_points/y'][()]\n z = mol_data['grid_points/z'][()]\n\n except BaseException:\n\n raise ValueError(\n \"Grid points not found in the data file\")\n\n else:\n\n center = mol_data['grid_points/center'][()]\n npts = np.array(self.grid_info['number_of_points'])\n res = np.array(self.grid_info['resolution'])\n\n halfdim = 0.5 * (npts * res)\n\n low_lim = center - halfdim\n hgh_lim = low_lim + res * (npts - 1)\n\n x = np.linspace(low_lim[0], hgh_lim[0], npts[0])\n y = np.linspace(low_lim[1], hgh_lim[1], npts[1])\n z = np.linspace(low_lim[2], hgh_lim[2], npts[2])\n\n # there is stil something strange\n # with the ordering of the grid\n # also noted in GridTools define_grid_points()\n y, x, z = np.meshgrid(y, x, z)\n grid = (x, y, z)\n npts = (len(x), len(y), len(z))\n return grid, npts",
"def get_coord(self,x,y,z):\n a = 0\n b = 0\n c = 0\n \n distance = 0\n \n while (distance <= x):\n distance += SQUARE_SIZE\n if ( (x - distance) > - (SQUARE_SIZE / 2) ):\n a += 1\n distance = 0\n \n while (distance <= y):\n distance += SQUARE_SIZE\n if ( (y - distance) > - (SQUARE_SIZE / 2) ):\n b += 1\n distance = 0\n \n while (distance <= z):\n distance += SQUARE_SIZE\n if ( (z - distance) > - (SQUARE_SIZE / 2) ):\n c += 1\n distance = 0\n \n return(a,b,c)"
]
| [
"0.73686534",
"0.65047204",
"0.63919705",
"0.6296044",
"0.6275556",
"0.6220592",
"0.6207953",
"0.61805415",
"0.6143953",
"0.61251754",
"0.60409844",
"0.6003493",
"0.5980634",
"0.5960125",
"0.59550786",
"0.5921654",
"0.5883512",
"0.58813345",
"0.58503973",
"0.5846972",
"0.58274204",
"0.5771887",
"0.57591426",
"0.57589656",
"0.5752958",
"0.57341105",
"0.5731233",
"0.5713384",
"0.5702084",
"0.56562734"
]
| 0.73610574 | 1 |
This function returns the position of Vertex (i,j,k). The coordinates are in the inclusive interval [0,nx] x [0,ny] x [0,nz]. | def getNodeXYZ(self , i,j,k):
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
corner = 0
if i == nx:
i -= 1
corner += 1
if j == ny:
j -= 1
corner += 2
if k == nz:
k -= 1
corner += 4
if self._ijk_valid( i , j , k):
return self.get_corner_xyz( corner , global_index = i + j*nx + k*nx*ny )
else:
raise IndexError("Invalid coordinates: (%d,%d,%d) " % (i,j,k)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getNodePos(self , i , j , k):\n if not 0 <= i <= self.getNX():\n raise IndexError(\"Invalid I value:%d - valid range: [0,%d]\" % (i , self.getNX()))\n\n if not 0 <= j <= self.getNY():\n raise IndexError(\"Invalid J value:%d - valid range: [0,%d]\" % (j , self.getNY()))\n\n if not 0 <= k <= self.getNZ():\n raise IndexError(\"Invalid K value:%d - valid range: [0,%d]\" % (k , self.getNZ()))\n \n x = ctypes.c_double()\n y = ctypes.c_double()\n z = ctypes.c_double()\n self._get_corner_xyz( i,j,k , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z))\n return (x.value , y.value , z.value)",
"def getVerticePosition(self):\n #def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!\n return(self.X,self.Y,self.Z)",
"def findCellXY(self , x, y , k):\n if 0 <= k <= self.getNZ():\n i = ctypes.c_int()\n j = ctypes.c_int()\n ok = self._get_ij_xy( x,y,k , ctypes.byref(i) , ctypes.byref(j))\n if ok:\n return (i.value , j.value)\n else:\n raise ValueError(\"Could not find the point:(%g,%g) in layer:%d\" % (x,y,k))\n else:\n raise IndexError(\"Invalid layer value:%d\" % k)",
"def get_position(k):\r\n l = get_level(k)\r\n return (l, k - 2**l)",
"def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)",
"def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)",
"def coord (i, j):\r\n return j, i",
"def get_coord(path, n_i, n_k):\n fnm = \"%s/coord_mpi%02i%02i.nc\" % (path, n_i, n_k)\n fnc = netcdf.netcdf_file(fnm, 'r')\n x = fnc.variables['x'][:, :]\n z = fnc.variables['z'][:, :]\n return x, z",
"def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col",
"def get_position(self):\n return self._find_gnx_node(self.gnx)",
"def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])",
"def vertex_position(indexed_triangle, vertex_index):\n for i in range(3):\n if indexed_triangle[i] == vertex_index:\n return i\n return None",
"def get_points(self):\r\n return self.nx*self.ny*self.nz",
"def get_pos(self):\n return (self.x, self.y)",
"def get_ind(self,*q):\n try:\n if( len(q) == 1 ):\n x = q[0][:,0]\n y = q[0][:,1]\n z = q[0][:,2]\n else:\n x = q[0]\n y = q[1]\n z = q[2]\n try:\n cx = (x+0.5).astype(na.int32)\n cy = (y+0.5).astype(na.int32)\n cz = (z+0.5).astype(na.int32)\n except:\n cx = int(x+0.5)\n cy = int(y+0.5)\n cz = int(z+0.5)\n ind = cx + cy*self.dim[0]+cz*self.dim[0]*self.dim[1]\n return ind\n except Exception as error:\n print(error)\n return None",
"def get_position(self):\n return (self.x_pos, self.y_pos)",
"def __getitem__(self, k):\n return self._coords[k]",
"def get_position(self):\n return parsegeometry(self.geometry())[2:]",
"def get_vertex(self):\n V = circumcenter(self.Cents)\n return V",
"def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)",
"def pixel_coords_to_pos(i, j, maze_size):\n maze_border = ((maze_size - 1) // 2) * BLOCK_PIXEL_SIZE\n pos_x = (i + maze_border) // BLOCK_PIXEL_SIZE\n pos_y = (maze_border - j) // BLOCK_PIXEL_SIZE\n\n return int(pos_x), int(pos_y)",
"def kxyz(self, k: int) -> Point:\n x = self._read_inline(f\"kx({k})\")\n y = self._read_inline(f\"ky({k})\")\n z = self._read_inline(f\"kz({k})\")\n return Point(x, y, z)",
"def getCoords( self, i : int ):\n return enumerate(self._Vals[self._layout.dims_order[i]] \\\n [self._layout.starts[i]:self._layout.ends[i]])",
"def position(self) -> Tuple[int, int]:\n return self.row, self.col",
"def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y",
"def cell_verts(self, idx):\r\n coords = []\r\n xyz = [0, 0, 0]\r\n cell = self.grid.GetCell(idx[0] - 1, idx[1] - 1, idx[2] - 1)\r\n p_ids = cell.GetPointIds()\r\n n_ids = pointIds.GetNumberOfIds()\r\n for n in range(n_ids):\r\n p = p_ids.GetId(n)\r\n self.grid.GetPoint(p, xyz)\r\n coords.append(copy.deepcopy(xyz))\r\n return coords",
"def get_cell_vertices(self, i, j):\n self._copy_cache = False\n cell_verts = [(self.xvertices[i, j], self.yvertices[i, j]),\n (self.xvertices[i, j+1], self.yvertices[i, j+1]),\n (self.xvertices[i+1, j+1], self.yvertices[i+1, j+1]),\n (self.xvertices[i+1, j], self.yvertices[i+1, j]),]\n self._copy_cache = True\n return cell_verts",
"def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)",
"def get_position(self):\n ret = _pal.Vec3()\n _pal.lib.geometry_get_position(self._geometry, ret)\n return [x for x in ret]",
"def __pos(self, i, j):\n return i * (i - 1) / 2 + j"
]
| [
"0.75148904",
"0.6588608",
"0.6550321",
"0.6368179",
"0.63060415",
"0.623356",
"0.6167894",
"0.6144407",
"0.61222583",
"0.61099994",
"0.61073494",
"0.60824805",
"0.6025701",
"0.6015853",
"0.59869",
"0.59733015",
"0.5970571",
"0.59663826",
"0.595034",
"0.5938405",
"0.59281737",
"0.59215045",
"0.5911868",
"0.58906525",
"0.58878416",
"0.58861494",
"0.58825207",
"0.5879407",
"0.5868498",
"0.58621466"
]
| 0.7257748 | 1 |
Bottom of the reservoir; in the column ( , ). | def bottom( self , i , j ):
return self._get_bottom( i , j ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_replica_bottom(self):\n return self.dummy_row_insts[0].uy()",
"def bottom(self):\n return self.__b",
"def bottom(self):\n # type: () -> float\n return self._bottom",
"def bottom(self):\n return self._bottom",
"def move_bottom ( self ):\n list, index = self.get_info()\n self.value = list[:index] + list[index+1:] + [ list[index] ]",
"def bottom(self, value):\n\n pass",
"def bottom_left_tile_value(self):\n\t\treturn self.expected_cols * (self.expected_rows - 1) + 1",
"def bottom_right_tile_value(self):\n\t\treturn self.expected_rows * self.expected_cols",
"def bottom(self):\n\n return self._bottom",
"def bottom(self):\n return self.top + self.height",
"def get_main_array_bottom(self):\n return self.bitcell_array_inst.by()",
"def bottom(self, bottom):\n self.ptr.bottom(bottom)",
"def bottom_y(self):\r\n return self.position.y - self.size.y - self.bulk",
"def bb_bottom(self) -> float:\n return self._bb_bottom",
"def bottom(self):\n return self.points['bottomRight'].y",
"def bottom(self):\n return self.points['bottomRight'].y",
"def bottomright(self):\n return (self.right, self.bottom)",
"def bottom_offset(self):\n raise NotImplementedError",
"def _rect_bottom(self):\n\treturn min(self.y, self.y + self.h)",
"def bottom(self):\n return max(0, len(self.content) - self.visible_height)",
"def _generate_end_position(self):\n end_position = []\n new_row = []\n\n for i in range(1, self.PUZZLE_NUM_ROWS * self.PUZZLE_NUM_COLUMNS + 1):\n new_row.append(i)\n if len(new_row) == self.PUZZLE_NUM_COLUMNS:\n end_position.append(new_row)\n new_row = []\n\n end_position[-1][-1] = 0\n return end_position",
"def bottom_distance(self):\n return self.board.height - 1 - self.y",
"def bottom_right(self):\n return Point(self.right, self.bottom)",
"def get_end_cell(self):\n return (self.end_row, self.end_col)",
"def bottom(self, x):\n raise NotImplementedError(\"Abstract Method\")",
"def test_slice_last(self):\n self.table.append(['Tom', 26])\n self.table.append(['Chantelle', 24])\n self.assertEqual(self.table[-1], ['Chantelle', 24])",
"def _findBottom(self,col):\n min = GAME_HEIGHT\n mpos = 0\n for x in range(self.getLengthAlien()):\n if self._aliens[x][col] != None and self._aliens[x][col].y < min:\n min = self._aliens[x][col].y\n mpos = x\n return mpos",
"def visible_bottom(self):\n if self.bottom < self.visible_height:\n return self.bottom\n return len(self.visible_content) - 1",
"def getContainerBottom(self):\n return self.containerBottom",
"def midbottom(self):\n return (self.centerx, self.bottom)"
]
| [
"0.68143",
"0.65203387",
"0.63712764",
"0.6278976",
"0.6226127",
"0.6201487",
"0.6165377",
"0.61245996",
"0.61081696",
"0.610627",
"0.5982431",
"0.5977994",
"0.5907894",
"0.5894845",
"0.58603686",
"0.58603686",
"0.5837352",
"0.58325255",
"0.57657677",
"0.5724831",
"0.5713004",
"0.57012933",
"0.56731576",
"0.56492424",
"0.55632025",
"0.5507959",
"0.5504602",
"0.54909855",
"0.54901206",
"0.5480832"
]
| 0.66192627 | 1 |
Will locate the k value of cell containing specified depth. Will scan through the grid column specified by the input arguments and and search for a cell containing the depth given by input argument . The return value is the k value of cell containing . If is above the top of the reservoir the function will return 1, and if is below the bottom of the reservoir the function will return nz. | def locate_depth( self , depth , i , j ):
return self._locate_depth( depth , i , j) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def df_search(grid, level):\n states_we_have_seen_before = Set(grid)\n\n def recur(inner_grid, itter, level):\n counter = 0\n next_states = Set()\n\n for gg in legal_moves(inner_grid):\n if gg not in states_we_have_seen_before:\n states_we_have_seen_before.add(gg)\n next_states.add(gg)\n\n for t in next_states:\n if match_level(t, level):\n return (size * size * size - itter, t)\n\n if itter > 0:\n for t in next_states:\n r = recur(t, itter - 1, level)\n if r:\n return r\n return None\n\n return recur(grid, size * size * size, level)",
"def search(board):\n depth = 0\n while True:\n result = depth_first(board, depth)\n if result:\n return result\n else:\n depth += 1",
"def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result",
"def query(self, z):\n def KDSearch(current, nearest, d_star):\n \"\"\"Recurse through the tree as if searching for target z.\n\n Parameters:\n current (KDTNode): the node we are currently examining.\n nearest (KDTNode): the closest known node to z.\n d_star (int): the distance from nearest to target.\n Returns:\n nearest (KDTNode): the node closest to the target z.\n d_star (int): the distance from nearest to target.\n \"\"\"\n #Base case: dead end.\n if current is None:\n return nearest, d_star\n #set x to location of node we are examining\n x = current.value\n #set i to the pivot of node we are examining\n i = current.pivot\n #distance from x to z\n d_x_z = la.norm(x - z)\n #check if current is closer to z than nearest\n if d_x_z < d_star:\n nearest = current\n d_star = d_x_z\n #Search to the left\n if z[i] < x[i]:\n nearest, d_star = KDSearch(current.left, nearest, d_star)\n #Search to the right if needed\n if (z[i] + d_star) >= x[i]:\n nearest, d_star = KDSearch(current.right, nearest, d_star)\n #Search to the right\n else:\n nearest, d_star = KDSearch(current.right, nearest, d_star)\n #Search to the left if needed\n if (z[i] - d_star) <= x[i]:\n nearest, d_star = KDSearch(current.left, nearest, d_star)\n return nearest, d_star\n #If tree is empty, raise error\n if (self.root == None):\n raise ValueError(\"Tree is empty!!!\")\n nearest, d_star = KDSearch(self.root, self.root, la.norm(self.root.value - z))\n return nearest.value, d_star",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def findChildB(T,k):\r\n for i in range(len(T.data)):\r\n if k < T.data[i].word:\r\n return i\r\n return len(T.data)",
"def knear(self, k: int) -> int:\n result = self._read_inline(f\"knear({k})\")\n return int(result)",
"def search(self, depth, board, current_player, opposite_player):\n legal_boards = []\n for column in range(7):\n if board.get_hole(column, 0) is 0:\n temp = deepcopy(board)\n temp.set_column(column, current_player)\n legal_boards.append(temp)\n \n if depth == 0 or len(legal_boards) == 0 or self.game_is_over(board, current_player, opposite_player):\n return self.value(board, current_player, opposite_player)\n\n alpha = -99999999\n\n for legal_board in legal_boards:\n alpha = max(alpha, -self.search(depth-1, legal_board, opposite_player, current_player))\n return alpha",
"def find_layer(z, params):\r\n N = len(params['d_list'])\r\n for i in range(N):\r\n if z <= params['layer_bottom_list'][i]:\r\n return i-1\r\n return N-1",
"def find_cell( self , x , y , z , start_ijk = None):\n\n if start_ijk:\n start_index = self.__global_index( ijk = start_ijk )\n else:\n start_index = 0\n global_index = self._get_ijk_xyz( x , y , z , start_index)\n if global_index >= 0:\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n self._get_ijk1( global_index , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k)) \n return (i.value , j.value , k.value)\n else:\n return None",
"def find_new_kbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n\n #---> j loop\n for j in range(Ly):\n self.kbl[j] = N #initialize search at top\n\n # in fortran k=N-1,1,-1\n for k in range(N-1,0,-1):\n #INDEX MAP\n k_w = k\n k_r = k-1\n \n for j in range(Ly):\n if z_u_w[j,k_w] > z_u_w[j,N] - self.hbls[j]:\n self.kbl[j] = k_w",
"def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index",
"def KDSearch(current, nearest, d_star):\n #Base case: dead end.\n if current is None:\n return nearest, d_star\n #set x to location of node we are examining\n x = current.value\n #set i to the pivot of node we are examining\n i = current.pivot\n #distance from x to z\n d_x_z = la.norm(x - z)\n #check if current is closer to z than nearest\n if d_x_z < d_star:\n nearest = current\n d_star = d_x_z\n #Search to the left\n if z[i] < x[i]:\n nearest, d_star = KDSearch(current.left, nearest, d_star)\n #Search to the right if needed\n if (z[i] + d_star) >= x[i]:\n nearest, d_star = KDSearch(current.right, nearest, d_star)\n #Search to the right\n else:\n nearest, d_star = KDSearch(current.right, nearest, d_star)\n #Search to the left if needed\n if (z[i] - d_star) <= x[i]:\n nearest, d_star = KDSearch(current.left, nearest, d_star)\n return nearest, d_star",
"def dpsearch(points,k):\n\t#M = k\n\tpoints = np.sort(points,axis=0)\n\tL = len(points)\n\tM = k\n\tT = list(np.zeros(M+1,dtype='int'))\n\tT[0] = 0\t#first threshold is by default always set to index 0 in trellis graph.\n\tT[M] = L \t#last threshold is by default always set to last number in input points.\n\ttrellis_value = np.full((M+1,L+1),np.inf)\n\ttrellis_backpointer = np.full((M+1,L+1),np.inf)\n\n\t# Stage 1: m=1\t\n\tfor l in range(1,L-M+2):\n\t\ttrellis_value[1][l] = ((l-0)/float(L))*np.var(points[0:l])\n\t\ttrellis_backpointer[1][l] = 0\n\n\t\n\tif(M>2):\n\t\t# Stage 2: m=2 to m=M-1\n\t\tfor m in range(2,M):\n\t\t\tfor l in range(m,L-M+m+1):\n\t\t\t\t#finding optimal path\n\t\t\t\tJ_min = np.inf\n\t\t\t\tJ_temp = np.inf\n\t\t\t\tfor i in range(m-1,l):\n\t\t\t\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\t\t\t\tif J_temp < J_min:\n\t\t\t\t\t\tJ_min = J_temp\n\t\t\t\t\t\tptr = i\n\t\t\t\t\n\t\t\t\ttrellis_value[m][l],trellis_backpointer[m][l] = J_min,ptr\n\t\t\t\t\n\n\t# Stage 3: m=M\n\tm = M\n\tl = L\n\t#finding optimal path\n\tJ_min = np.inf\n\tJ_temp = np.inf\n\tfor i in range(m-1,l):\n\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\tif J_temp < J_min:\n\t\t\tJ_min = J_temp\n\t\t\tptr = i\n\n\t\n\ttrellis_value[M][L] = J_min\n\ttrellis_backpointer[M][L] = ptr\n\t\n\t\n\t# Backtracking\n\tl = L\n\tm = M\n\twhile m>=2:\n\t\tT[m-1] = int(trellis_backpointer[m][l])\n\t\tl = int(trellis_backpointer[m][l])\n\t\tm = m - 1\n\n\t#Assign cluster labels\n\tlabels = np.full(len(points),0)\n\tj = T[0]\n\tcounter = 0\n\tfor i in range(1,k+1):\n\t\tlabels[j:T[i]] = counter\n\t\tj = T[i]\n\t\tcounter += 1\n\n\n\treturn labels,T",
"def find_element(grid, target):\n \n # First, iterate over the row indices\n for row_number in range(len(grid)):\n \n# print(\"Checking row\", row_number)\n \n for col_number in range(len(grid[row_number])):\n \n# print(\"Checking column\", col_number)\n \n if grid[row_number][col_number] == target:\n return (row_number, col_number)\n \n return None",
"def linearSearch(A, k):\n\n #TODO: Implement without using python's in-built function\n if isinstance(A, list) == False or isinstance(k, int) == False:\n return -1\n else:\n for i in range(len(A)):\n if A[i] == k:\n return i\n return -1",
"def findChildA(T,k): \r\n for i in range(len(T.data)):\r\n if k.word < T.data[i].word:\r\n return i\r\n return len(T.data)",
"def k_nearest_neighbor(self, k, target, current_root, k_nearest_heap): # 1 step\r\n iter_list = [] # a stack to store iteration path # 1 step\r\n # step1: find the 'nearest' leaf\r\n nearest_leaf = current_root # 1 step\r\n while nearest_leaf is not None: # 2 steps: while, is not\r\n iter_list.append(nearest_leaf) # store the path # 1 step\r\n tt = nearest_leaf.point # 2 steps: nearest_leaf.point, tt = \r\n if target[nearest_leaf.axis] < nearest_leaf.point[nearest_leaf.axis]: # 6 steps: if, <, nearest_leaf.axis, nearest_leaf.point, nearest_leaf.point[],target[]\r\n if nearest_leaf.left is not None: # then go to the left child # 3 steps: if, is not, nearest_leaf.left\r\n nearest_leaf = nearest_leaf.left # 2 steps: nearest_leaf.left, nearest_leaf = \r\n else:\r\n break\r\n else:\r\n if nearest_leaf.right is not None: # else, go to the right child\r\n nearest_leaf = nearest_leaf.right\r\n else:\r\n break\r\n while nearest_leaf.left is not None or nearest_leaf.right is not None: # 6 steps: while, is not, or, is not, nearest_leaf.left, nearest_leaf.right\r\n if nearest_leaf.left is not None: # 3 steps: if, is not, nearest_leaf.left\r\n nearest_leaf = nearest_leaf.left # 2 steps: nearest_leaf.left, = \r\n iter_list.append(nearest_leaf) # 1 step\r\n if nearest_leaf.right is not None: # 3 steps: if, is not, nearest_leaf.right\r\n nearest_leaf = nearest_leaf.right # 2 steps: nearest_leaf.right, = \r\n iter_list.append(nearest_leaf) # 1 step\r\n tt = nearest_leaf.point # 2 steps: nearest_leaf.point, tt = \r\n \"\"\"\r\n step2: find the k nearest by backtracking upside\r\n Two situations to add the point into the heap k_nearest_heap\r\n A. when len(k_nearest_heap) < k\r\n B. when dis(point, target) < current_max_dis\r\n \"\"\"\r\n # k_nearest_heap = LargeHeap() # the large heap to store the current 'nearest' neighbors\r\n # the max distance is actually the distance between target and the top of the heap\r\n '''\r\n current_max_dis = self.distance(target, nearest_leaf.point[:self.n_dim])\r\n k_nearest_heap.add(nearest_leaf, current_max_dis)\r\n tmp = iter_list.pop()\r\n '''\r\n former_node = nearest_leaf # the former 'current_node', to indicate whether go through this child\r\n # 1 step\r\n while iter_list != []: # 2 steps: while, !=\r\n if k_nearest_heap.len > 0: # 3 steps: if, k_nearest_heap.len, >\r\n current_max_dis = k_nearest_heap.heaplist[0][1] # 4 steps: k_nearest_heap.heaplist, k_nearest_heap.heaplist[0], k_nearest_heap.heaplist[0][1], current_max_dis =\r\n else:\r\n current_max_dis = -1\r\n current_pointer = iter_list.pop() # 1+38 steps: 1 step - current_pointer = ; 38 steps - iter_list.pop()\r\n tt = current_pointer.point # 2 steps: current_pointer.point, tt=\r\n dis = self.distance(current_pointer.point[:self.n_dim], target) \r\n # 1+11 steps: 1 step - dis=, 11 steps - self.distance()\r\n if k_nearest_heap.len < k:\r\n k_nearest_heap.add(current_pointer, dis)\r\n elif dis < current_max_dis: # 2 steps: elif, <\r\n k_nearest_heap.pop() # 38 steps: k_nearest_heap.pop()\r\n k_nearest_heap.add(current_pointer, dis) # 30 steps: k_nearest_heap.add()\r\n # current_max_dis = self.distance(k_nearest_heap.heaplist[0][0].point[:self.n_dim], target)\r\n current_max_dis = k_nearest_heap.heaplist[0][1] # 4 steps: k_nearest_heap.heaplist, k_nearest_heap.heaplist[],k_nearest_heap.heaplist[][], current_max_dis =\r\n axis = current_pointer.axis # 2 steps: current_pointer.axis, axis = \r\n if abs(target[axis] - current_pointer.point[axis]) >= current_max_dis:\r\n # 6 steps: if, >=, target[axis], - , current_pointer.point[], abs()\r\n former_node = current_pointer # 1 step\r\n # if not intersect with\r\n continue # 1 step\r\n if current_pointer.left is not None and current_pointer.left != former_node:\r\n # 5 steps: if, is not, and, current_pointer.left, !=\r\n tt = current_pointer.left # 2 steps: current_pointer.left, tt =\r\n # iter_list.append(current_pointer.left)\r\n self.k_nearest_neighbor(k, target, current_pointer.left, k_nearest_heap)\r\n # T(n/2) steps: self.k_nearest_neighbor()\r\n if current_pointer.right is not None and current_pointer.right != former_node:\r\n # 5 steps: if, is not, and, current_pointer.left, !=\r\n tt = current_pointer.right # 2 steps: current_pointer.left, tt =\r\n # iter_list.append(current_pointer.righat)\r\n self.k_nearest_neighbor(k, target, current_pointer.right, k_nearest_heap)\r\n # T(n/2) steps: self.k_nearest_neighbor()\r\n former_node = current_pointer # 1 step\r\n rlist = [] # 1 step\r\n rdis = [] # 1 step\r\n for ele in k_nearest_heap.heaplist: # 2 steps: for, in \r\n rlist.append(ele[0].point) # 3 steps: append(), ele[0], ele[0].point\r\n rdis.append(ele[1]) # 2 steps: append(), ele[1]\r\n return rdis, rlist # 1 step\r",
"def pure_monte_carlo_tree_search(self, key, game_score_alg, depth=None):\n\n test_stuck_move_p1 = copy.deepcopy(self.matrix)\n\n # First movement in given direction\n self.key_down_char(key)\n\n # Tests if board has changed after first move\n # -> THE GIVEN DIRECTION CAN'T MOVE BOARD = RETURN LOWEST SCORE (0)\n if test_stuck_move_p1 == self.matrix:\n return 0\n\n # Random moves until no moves can be made\n while Logic.game_state(self.matrix) == 'not over':\n self.key_down_char(random.choice(self.array_keys_to_shuffle))\n\n # Returning gotten score\n return getattr(Logic, game_score_alg)(self.matrix)",
"def cuckoo_search(n=None, nd=None, Lb=None, Ub=None, pa=None):\n\tif n is None:\n\t\tn =25\n\n\tif nd is None:\n\t\tnd=21\n\n\tif Lb is None:\n\t\tLb = np.ones(nd)*0\n\tif Ub is None:\n\t\tUb = np.ones(nd)*5\n\n\tif pa is None:\n\t\tpa = 0.25\n\n\t# creation of the list for parameter pairs \n\t\n\tstep = 1\n\n # initialization of the nests\n\tnests = np.zeros((n,nd))\n\tfor i in range(n):\n\t\tnests[i,:] = Lb + (Ub-Lb)*np.random.rand(len(Lb))\n\n\tfitness = 10**10 * np.ones((n,1))\n\tbest_nest, fmin, nest, fitness, N_iter = single_cuckoo_search(nests,fitness,Lb,Ub,pa,step) \n\n\treturn best_nest, fmin, nest, fitness, N_iter",
"def find(self, k):\n if k == self.key:\n return self\n elif k < self.key:\n if self.left is None:\n return None\n else:\n return self.left.find(k)\n else:\n if self.right is None: \n return None\n else:\n return self.right.find(k)",
"def get_ray_index_for_grid_point(ray, grid_idx, n_depth_pts):\n if ray.mu < 0:\n return (grid_idx)\n else:\n return (n_depth_pts - (grid_idx + 1))",
"def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0",
"def search(T,k):\r\n for t in T.data:\r\n if k == t.word:\r\n return t\r\n if T.isLeaf:\r\n return None\r\n return search(T.child[findChildB(T,k)],k)",
"def query(self, z):\n root = self.root\n def KDSearch(current, nearest, dp):\n if current == None:\n return(nearest, dp)\n x = current.value\n i = current.pivot\n if la.norm(x-z) < dp:\n nearest = current\n dp = la.norm(x-z)\n if z[i] < x[i]: \n nearest, dp = KDSearch(current.left, nearest, dp)\n if z[i] + dp >= x[i]:\n nearest, dp = KDSearch(current.right, nearest, dp) #checks the sphere of radius to see if it should check the right subtree\n else:\n nearest, dp = KDSearch(current.right, nearest, dp)\n if z[i] - dp <= x[i]: #checks the sphere of radius to see if it should check the left subtree\n nearest, dp = KDSearch(current.left, nearest, dp) \n return(nearest, dp)\n node, dp = KDSearch(root, root, la.norm(root.value-z))\n return (node.value, dp)",
"def get_position(k):\r\n l = get_level(k)\r\n return (l, k - 2**l)",
"def minimax_searcher(depth, evaluate):\n def strategy(player, board):\n return minimax(player, board, depth, evaluate)[1]\n return strategy",
"def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n ## YOUR CODE HERE\n root_node = Node(problem.getStartState(), [], 0, None, 0)\n frontier = util.Stack()\n frontier.push(root_node)\n explored = []\n\n while not(frontier.isEmpty()):\n node_to_explore = frontier.pop()\n\n if problem.isGoalState(node_to_explore.state):\n return node_to_explore.state\n else:\n copy_state = node_to_explore.state.copy()\n \n if convertStateToHash(copy_state) not in explored:\n\t explored.append(convertStateToHash(copy_state))\n\t successors_state = problem.getSuccessors(copy_state)\n\t if len(successors_state) > 0:\n\t\t for state_action_cost in successors_state:\n\t\t if convertStateToHash(state_action_cost[0]) in explored:\n\t\t continue\n\t\t else:\n\t\t frontier.push(Node(state_action_cost[0], state_action_cost[1], node_to_explore.path_cost + 1, node_to_explore, node_to_explore.depth + 1))\n\n return False\n # util.raiseNotDefined()",
"def search(self, depth, state, curr_player):\n \n # enumerate all legal moves from this state\n legal_moves = []\n for i in range(7):\n # if column i is a legal move...\n if self.isLegalMove(i, state):\n # make the move in column i for curr_player\n temp = self.makeMove(state, i, curr_player)\n legal_moves.append(temp)\n \n # if this node (state) is a terminal node or depth == 0...\n if depth == 0 or len(legal_moves) == 0 or self.gameIsOver(state):\n # return the heuristic value of node\n return self.value(state, curr_player)\n \n # determine opponent's color\n if curr_player == self.colors[0]:\n opp_player = self.colors[1]\n else:\n opp_player = self.colors[0]\n\n alpha = -99999999\n for child in legal_moves:\n if child == None:\n print(\"child == None (search)\")\n alpha = max(alpha, -self.search(depth-1, child, opp_player))\n return alpha",
"def iterative_depth_search(self, board, player, t_max=30, min_depth=4, stop_at_depth=False):\n\n\t\tt_elapsed = 0.0\n\t\tbest_move, max_depth = None, 1\n\t\talpha, beta = -float('inf'), float('inf')\n\n\t\twhile max_depth <= min_depth or t_elapsed <= t_max:\n\t\t\tif stop_at_depth and max_depth > min_depth:\n\t\t\t\tbreak\n\n\t\t\tstart = time.time()\n\t\t\tbest_moves, best_val = self.alpha_beta_search(board, alpha, beta, player, 0, max_depth)\n\t\t\tt_elapsed += time.time() - start\n\t\t\tmax_depth += 1\n\t\t\tself.update()\n\n\t\t\t# Checkmate found.\n\t\t\tif abs(best_val) == float('inf'):\n\t\t\t\tself.moves_til_checkmate = len(best_moves)\n\t\t\t\tbreak\n\n\t\tbest_move = best_moves[0]\n\n\t\treturn best_move, best_val"
]
| [
"0.66241956",
"0.6500184",
"0.6004492",
"0.58622545",
"0.58384705",
"0.5737128",
"0.5730612",
"0.56912434",
"0.5666911",
"0.5647384",
"0.56222653",
"0.5618303",
"0.5603912",
"0.5595387",
"0.5587486",
"0.55810344",
"0.55666375",
"0.55655825",
"0.55468816",
"0.5525049",
"0.5520699",
"0.5512292",
"0.5511948",
"0.55079496",
"0.55021846",
"0.5496992",
"0.54942095",
"0.5492917",
"0.5492007",
"0.5481572"
]
| 0.653387 | 1 |
Lookup cell containg true position (x,y,z). Will locate the cell in the grid which contains the true position (,,), the return value is as a triplet (i,j,k). The underlying C implementation is not veeery efficient, and can potentially take quite long time. If you provide a good intial guess with the parameter (a tuple (i,j,k)) things can speed up quite substantially. If the location (,,) can not be found in the grid, the method will return None. | def find_cell( self , x , y , z , start_ijk = None):
if start_ijk:
start_index = self.__global_index( ijk = start_ijk )
else:
start_index = 0
global_index = self._get_ijk_xyz( x , y , z , start_index)
if global_index >= 0:
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
self._get_ijk1( global_index , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k))
return (i.value , j.value , k.value)
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findCellXY(self , x, y , k):\n if 0 <= k <= self.getNZ():\n i = ctypes.c_int()\n j = ctypes.c_int()\n ok = self._get_ij_xy( x,y,k , ctypes.byref(i) , ctypes.byref(j))\n if ok:\n return (i.value , j.value)\n else:\n raise ValueError(\"Could not find the point:(%g,%g) in layer:%d\" % (x,y,k))\n else:\n raise IndexError(\"Invalid layer value:%d\" % k)",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"",
"def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"",
"def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"",
"def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"",
"def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"",
"def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"",
"def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"",
"def _get_coordinates(x,y,z):\n\t\ttemp = Board.board\n\t\ttemp1 = temp=='M'\n\t\tfor i in range(6,x):\n\t\t\tfor j in range(y,z):\n\t\t\t\tif(temp1[i][j]==True):\n\t\t\t\t\tcurrent_x = i\n\t\t\t\t\tcurrent_y = j\n\n\t\treturn current_x,current_y",
"def _get_coordinates(self, tile, position=None):\n if not position:\n position = self.position\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if position[i][j] == tile:\n return i, j\n\n return RuntimeError('Invalid tile value')",
"def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn",
"def getCellFromPosition(self, xPos, yPos):\n for cell in self.cells:\n if(xPos == cell.x and yPos == cell.y):\n return cell\n return False",
"def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None",
"def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None",
"def find_value(self, x, y):\n for cell in self.cells:\n if cell.coordinates == (x,y):\n return cell.value\n else:\n return None",
"def find_element(grid, target):\n \n # First, iterate over the row indices\n for row_number in range(len(grid)):\n \n# print(\"Checking row\", row_number)\n \n for col_number in range(len(grid[row_number])):\n \n# print(\"Checking column\", col_number)\n \n if grid[row_number][col_number] == target:\n return (row_number, col_number)\n \n return None",
"def point_to_cell(self,xy):\n pnt = geometry.Point(xy)\n\n for c in self.select_cells_nearest(xy,10):\n if self.cell_polygon(c).intersects(pnt):\n return c\n return None",
"def findCellCornerXY(self , x, y , k):\n i,j = self.findCellXY(x,y,k)\n if k == self.getNZ():\n k -= 1\n corner_shift = 4\n else:\n corner_shift = 0\n \n nx = self.getNX()\n x0,y0,z0 = self.getCellCorner( corner_shift , ijk = (i,j,k))\n d0 = math.sqrt( (x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))\n c0 = i + j*(nx + 1)\n\n x1,y1,z1 = self.getCellCorner( 1 + corner_shift , ijk = (i,j,k))\n d1 = math.sqrt( (x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))\n c1 = i + 1 + j*(nx + 1)\n\n x2,y2,z2 = self.getCellCorner( 2 + corner_shift , ijk = (i,j,k))\n d2 = math.sqrt( (x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))\n c2 = i + (j + 1)*(nx + 1)\n\n x3,y3,z3 = self.getCellCorner( 3 + corner_shift , ijk = (i,j,k))\n d3 = math.sqrt( (x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))\n c3 = i + 1 + (j + 1)*(nx + 1)\n\n l = [(d0 , c0) , (d1,c1) , (d2 , c2) , (d3,c3)]\n l.sort( EclGrid.d_cmp )\n return l[0][1]",
"def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]",
"def location_of(self, c: str) -> tuple:\n\n c = c.upper()\n if c == 'J': c = 'I'\n\n row = 0\n while row < 5:\n col = self.key[row].find(c)\n\n if col != -1:\n return (row, col)\n\n row += 1\n\n raise ValueError(\"couldn't find letter %r in matrix %r\" % (c, self.key))",
"def find(self, value):\n for row in range(self.getHeight()):\n for column in range(self.getWidth()):\n if self[row][column] == value:\n return (row, column)\n return None",
"def coordLookup_l(i, j, k, I, J):\n return i + j*I + k*J*I",
"def find_next_empty_cell(grid):\n for i, row in enumerate(grid):\n for j, col in enumerate(row):\n if col == 0:\n return (i, j)\n return None",
"def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0",
"def get_cell_by_coords(self, coords):\n try:\n cell = GameCell.objects.get(row=coords[0], col=coords[1], game=self)\n return cell\n except GameCell.DoesNotExist:\n return None",
"def get_position(self, cell) -> tuple:\n for i, row in enumerate(self.cells):\n if cell in row:\n return row.index(cell), i\n if not isinstance(cell, Cell):\n raise TypeError(f\"Argument should be of type 'Cell', not '{cell.__class__.__name__}'.\")\n raise ValueError(\"The given cell is not a part of the grid.\")",
"def get_0_pos(grid):\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 0:\n return i, j\n return -1, -1",
"def locate_point(self, coord):\n lowest_lat = self.lower_left[0]\n leftmost_lng = self.lower_left[1]\n dist_lat = utils.haversine((coord[0], leftmost_lng), self.lower_left)*1000 # in meters\n dist_lng = utils.haversine((lowest_lat, coord[1]), self.lower_left)*1000 # in meters\n grid_coord = (floor(dist_lng/self.distance), floor(dist_lat/self.distance))\n if grid_coord in self.cells:\n return grid_coord\n return None",
"def locate_faces(points, grid):\n\n points = np.asarray(points, dtype=np.float64)\n just_one = (points.ndim == 1)\n points = points.reshape(-1, 2)\n\n tree = build_celltree(grid)\n indices = tree.locate(points)\n lon, lat = self._get_grid_vars(grid)\n x = indices % (lat.shape[1] - 1)\n y = indices // (lat.shape[1] - 1)\n ind = np.column_stack((y, x))\n ind[ind[:, 0] == -1] = [-1, -1]\n if just_one:\n res = ind[0]\n return res\n else:\n res = np.ma.masked_less(ind, 0)\n return res"
]
| [
"0.69080573",
"0.6779657",
"0.66930765",
"0.66930765",
"0.66930765",
"0.6678192",
"0.6678192",
"0.6678192",
"0.6678192",
"0.66224736",
"0.6613548",
"0.639877",
"0.6369274",
"0.6351354",
"0.6351354",
"0.6344353",
"0.6325002",
"0.63212687",
"0.63165545",
"0.6294562",
"0.6286179",
"0.6249105",
"0.6230479",
"0.62070054",
"0.61944795",
"0.6180894",
"0.6140109",
"0.6122447",
"0.6120158",
"0.61080015"
]
| 0.7881949 | 0 |
Will check if the cell contains point given by world coordinates (x,y,z). See method get_xyz() for documentation of , and . | def cell_contains( self , x , y , z , active_index = None , global_index = None , ijk = None):
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
return self._cell_contains( gi , x,y,z) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def contains(self, point):\n if in_range(point[0], self.xrange) and in_range(point[0], self.yrange) and in_range(point[0], self.zrange):\n return True\n return False",
"def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1",
"def ContainsPoint(*args, **kwargs):\n return _gdi_.Region_ContainsPoint(*args, **kwargs)",
"def check_by_xyz(cls, x=None, y=None, z=None):\n\n location_obj = cls.query.filter(cls.x_coord == x,\n cls.y_coord == y,\n cls.z_coord == z).first()\n return location_obj",
"def is_point_inside_hypercube(point: List[float], c: List[float], r: float) -> bool:\n diff = np.subtract(point, c)\n return np.all(np.absolute(diff) <= r)",
"def contains_xy(geom, x, y=None, **kwargs):\n if y is None:\n coords = np.asarray(x)\n x, y = coords[:, 0], coords[:, 1]\n return lib.contains_xy(geom, x, y, **kwargs)",
"def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval",
"def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls",
"def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height",
"def contains_point(self, point) -> bool:\n return (self.pos.x <= point[0] <= self.pos.x + self.size.x and\n self.pos.y <= point[1] <= self.pos.y + self.size.y and\n self.visible)",
"def xy_occupied(xy, board):\n return True if board[xy[0]][xy[1]] else False",
"def inside(self, x: int, y: int, z: int, chunk: bool=False) -> bool:\n factor = 32 if chunk else 512\n rx = x // factor\n rz = z // factor\n return not (rx != self.x or rz != self.z or y < 0 or y > 255)",
"def valid_coordinate(self,row,column):\r\n if row >= 0 and row < len(self.wordsearch):\r\n if column >= 0 and column < len(self.wordsearch[0]):\r\n return True\r\n return False",
"def check_point(point,points):\n if point in points:\n return True\n else:\n return False",
"def contains(self, xy):\n if np.ndim(xy) == 2:\n xp = xy[:, 0]\n yp = xy[:, 1]\n elif (np.ndim(xy) == 1) and (len(xy) == 2):\n xp = xy[0]\n yp = xy[1]\n else:\n raise ValueError(\"crazy\")\n\n xinside = (self.x0 <= xp) & (xp <= self.x1)\n yinside = (self.y0 <= yp) & (yp <= self.y1)\n return xinside & yinside",
"def contains(self, point):\n return super().contains((point[0] - self.x, point[1] - self.y))",
"def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2",
"def __contains__(self, pos):\n if pos in self._coordinates:\n return True\n return False",
"def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True",
"def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return v1.colinear(v2, e)",
"def _get_coordinates(x,y,z):\n\t\ttemp = Board.board\n\t\ttemp1 = temp=='M'\n\t\tfor i in range(6,x):\n\t\t\tfor j in range(y,z):\n\t\t\t\tif(temp1[i][j]==True):\n\t\t\t\t\tcurrent_x = i\n\t\t\t\t\tcurrent_y = j\n\n\t\treturn current_x,current_y",
"def contains(self, loc): \n return loc.distance(self.center) <= self.radius",
"def inrange(cc, point):\n return point.row in range(cc.top, cc.bottom+1) and point.col in range(cc.left, cc.right+1)",
"def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0",
"def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False",
"def values_in_world(self, x, y, z, interpolation=None):\n raise NotImplementedError",
"def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True",
"def exposed(self, position):\r\n x, y, z = position\r\n for dx, dy, dz in FACES:\r\n if (x + dx, y + dy, z + dz) not in self.world:\r\n return True\r\n return False"
]
| [
"0.7148915",
"0.6899895",
"0.6835324",
"0.67269105",
"0.66799223",
"0.6678204",
"0.66715646",
"0.6637392",
"0.66056424",
"0.660049",
"0.6567728",
"0.6531672",
"0.6508277",
"0.6455558",
"0.64402044",
"0.6436551",
"0.64294076",
"0.64017344",
"0.6400132",
"0.63888776",
"0.6327097",
"0.63234806",
"0.6317243",
"0.63153565",
"0.6291851",
"0.6283056",
"0.62443835",
"0.6243794",
"0.6240933",
"0.62393343"
]
| 0.7042436 | 1 |
Will find the i,j of cell with utm coordinates x,y. The input is the layer you are interested in, the allowed values for k are [0,nz]. If the coordinates (x,y) are found to be outside the grid a ValueError exception is raised. | def findCellXY(self , x, y , k):
if 0 <= k <= self.getNZ():
i = ctypes.c_int()
j = ctypes.c_int()
ok = self._get_ij_xy( x,y,k , ctypes.byref(i) , ctypes.byref(j))
if ok:
return (i.value , j.value)
else:
raise ValueError("Could not find the point:(%g,%g) in layer:%d" % (x,y,k))
else:
raise IndexError("Invalid layer value:%d" % k) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_cell( self , x , y , z , start_ijk = None):\n\n if start_ijk:\n start_index = self.__global_index( ijk = start_ijk )\n else:\n start_index = 0\n global_index = self._get_ijk_xyz( x , y , z , start_index)\n if global_index >= 0:\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n self._get_ijk1( global_index , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k)) \n return (i.value , j.value , k.value)\n else:\n return None",
"def findCellCornerXY(self , x, y , k):\n i,j = self.findCellXY(x,y,k)\n if k == self.getNZ():\n k -= 1\n corner_shift = 4\n else:\n corner_shift = 0\n \n nx = self.getNX()\n x0,y0,z0 = self.getCellCorner( corner_shift , ijk = (i,j,k))\n d0 = math.sqrt( (x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))\n c0 = i + j*(nx + 1)\n\n x1,y1,z1 = self.getCellCorner( 1 + corner_shift , ijk = (i,j,k))\n d1 = math.sqrt( (x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))\n c1 = i + 1 + j*(nx + 1)\n\n x2,y2,z2 = self.getCellCorner( 2 + corner_shift , ijk = (i,j,k))\n d2 = math.sqrt( (x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))\n c2 = i + (j + 1)*(nx + 1)\n\n x3,y3,z3 = self.getCellCorner( 3 + corner_shift , ijk = (i,j,k))\n d3 = math.sqrt( (x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))\n c3 = i + 1 + (j + 1)*(nx + 1)\n\n l = [(d0 , c0) , (d1,c1) , (d2 , c2) , (d3,c3)]\n l.sort( EclGrid.d_cmp )\n return l[0][1]",
"def getNodeXYZ(self , i,j,k):\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n\n corner = 0\n \n if i == nx:\n i -= 1\n corner += 1\n\n if j == ny:\n j -= 1\n corner += 2\n\n if k == nz:\n k -= 1\n corner += 4\n\n if self._ijk_valid( i , j , k):\n return self.get_corner_xyz( corner , global_index = i + j*nx + k*nx*ny )\n else:\n raise IndexError(\"Invalid coordinates: (%d,%d,%d) \" % (i,j,k))",
"def getNodePos(self , i , j , k):\n if not 0 <= i <= self.getNX():\n raise IndexError(\"Invalid I value:%d - valid range: [0,%d]\" % (i , self.getNX()))\n\n if not 0 <= j <= self.getNY():\n raise IndexError(\"Invalid J value:%d - valid range: [0,%d]\" % (j , self.getNY()))\n\n if not 0 <= k <= self.getNZ():\n raise IndexError(\"Invalid K value:%d - valid range: [0,%d]\" % (k , self.getNZ()))\n \n x = ctypes.c_double()\n y = ctypes.c_double()\n z = ctypes.c_double()\n self._get_corner_xyz( i,j,k , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z))\n return (x.value , y.value , z.value)",
"def grid_value( self , kw , i , j , k):\n return self._grid_value( kw , i , j , k)",
"def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]",
"def _get_coordinates(self, tile, position=None):\n if not position:\n position = self.position\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if position[i][j] == tile:\n return i, j\n\n return RuntimeError('Invalid tile value')",
"def fetch_gridcell(x, y, z, vtk_volume):\n s = (vtk_volume.GetScalarComponentAsFloat(x, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y+1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y+1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y, z+1, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y, z+1, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y+1, z+1, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y+1, z+1, 0))\n return s",
"def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]",
"def two_dim_index(self, k):\n ind_x = k % self.nx\n ind_y = (k - ind_x) / self.nx\n return (int(ind_y), int(ind_x))",
"def get_coord(path, n_i, n_k):\n fnm = \"%s/coord_mpi%02i%02i.nc\" % (path, n_i, n_k)\n fnc = netcdf.netcdf_file(fnm, 'r')\n x = fnc.variables['x'][:, :]\n z = fnc.variables['z'][:, :]\n return x, z",
"def get_tile(self, row, col):\r\n # replace with your code\r\n return self._cells[row][col]",
"def test_get_cell(self):\n self.assertEqual(5, self.sudoku.get_cell((6, 3)))\n self.assertEqual(1, self.sudoku.get_cell((3, 7)))\n self.assertEqual(2, self.sudoku.get_cell((8, 8)))",
"def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]",
"def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]",
"def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]",
"def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index",
"def coordLookup_l(i, j, k, I, J):\n return i + j*I + k*J*I",
"def get_tile(self, row, col):\n # replace with your code\n return self._cells[row][col]",
"def get_tile(self, row, col):\n # replace with your code\n return 0",
"def _get_coordinates(x,y,z):\n\t\ttemp = Board.board\n\t\ttemp1 = temp=='M'\n\t\tfor i in range(6,x):\n\t\t\tfor j in range(y,z):\n\t\t\t\tif(temp1[i][j]==True):\n\t\t\t\t\tcurrent_x = i\n\t\t\t\t\tcurrent_y = j\n\n\t\treturn current_x,current_y",
"def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right",
"def get_tile(self, row, col):\r\n # replace with your code\r\n return self._grid_tile[row][col]",
"def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn",
"def cell_at(self, x, y):\n\n return self.maze_map[x][y]",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]",
"def find_element(grid, target):\n \n # First, iterate over the row indices\n for row_number in range(len(grid)):\n \n# print(\"Checking row\", row_number)\n \n for col_number in range(len(grid[row_number])):\n \n# print(\"Checking column\", col_number)\n \n if grid[row_number][col_number] == target:\n return (row_number, col_number)\n \n return None",
"def get_tile(self, row, col):\n # replace with your code\n if row < self._grid_height and col < self._grid_width:\n return self._grid_2048[row][col]",
"def get_tile(self, row, col):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # only return if the row and column are ok\n return self._grid[row][col]"
]
| [
"0.63163114",
"0.6309335",
"0.61713743",
"0.6017499",
"0.5768573",
"0.57222867",
"0.5700856",
"0.56826043",
"0.5657158",
"0.5632853",
"0.5626586",
"0.56039524",
"0.5587968",
"0.5584937",
"0.5584937",
"0.5584937",
"0.5544519",
"0.55267566",
"0.55147874",
"0.5498938",
"0.5486063",
"0.5478522",
"0.5477876",
"0.5468816",
"0.5446553",
"0.54419553",
"0.54227895",
"0.5421063",
"0.5411813",
"0.5407578"
]
| 0.7422864 | 0 |
Will find the corner nr of corner closest to utm coordinates x,y. The input is the layer you are interested in, the allowed values for k are [0,nz]. If the coordinates (x,y) are found to be outside the grid a ValueError exception is raised. | def findCellCornerXY(self , x, y , k):
i,j = self.findCellXY(x,y,k)
if k == self.getNZ():
k -= 1
corner_shift = 4
else:
corner_shift = 0
nx = self.getNX()
x0,y0,z0 = self.getCellCorner( corner_shift , ijk = (i,j,k))
d0 = math.sqrt( (x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))
c0 = i + j*(nx + 1)
x1,y1,z1 = self.getCellCorner( 1 + corner_shift , ijk = (i,j,k))
d1 = math.sqrt( (x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))
c1 = i + 1 + j*(nx + 1)
x2,y2,z2 = self.getCellCorner( 2 + corner_shift , ijk = (i,j,k))
d2 = math.sqrt( (x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))
c2 = i + (j + 1)*(nx + 1)
x3,y3,z3 = self.getCellCorner( 3 + corner_shift , ijk = (i,j,k))
d3 = math.sqrt( (x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))
c3 = i + 1 + (j + 1)*(nx + 1)
l = [(d0 , c0) , (d1,c1) , (d2 , c2) , (d3,c3)]
l.sort( EclGrid.d_cmp )
return l[0][1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findCellXY(self , x, y , k):\n if 0 <= k <= self.getNZ():\n i = ctypes.c_int()\n j = ctypes.c_int()\n ok = self._get_ij_xy( x,y,k , ctypes.byref(i) , ctypes.byref(j))\n if ok:\n return (i.value , j.value)\n else:\n raise ValueError(\"Could not find the point:(%g,%g) in layer:%d\" % (x,y,k))\n else:\n raise IndexError(\"Invalid layer value:%d\" % k)",
"def knear(self, k: int) -> int:\n result = self._read_inline(f\"knear({k})\")\n return int(result)",
"def getNodeXYZ(self , i,j,k):\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n\n corner = 0\n \n if i == nx:\n i -= 1\n corner += 1\n\n if j == ny:\n j -= 1\n corner += 2\n\n if k == nz:\n k -= 1\n corner += 4\n\n if self._ijk_valid( i , j , k):\n return self.get_corner_xyz( corner , global_index = i + j*nx + k*nx*ny )\n else:\n raise IndexError(\"Invalid coordinates: (%d,%d,%d) \" % (i,j,k))",
"def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result",
"def take_corner():\n if board[0] == '_':\n return 0\n elif board[2] == '_':\n return 2\n elif board[6] == '_':\n return 6\n elif board[8] == '_':\n return 8\n else:\n return -1",
"def getNodePos(self , i , j , k):\n if not 0 <= i <= self.getNX():\n raise IndexError(\"Invalid I value:%d - valid range: [0,%d]\" % (i , self.getNX()))\n\n if not 0 <= j <= self.getNY():\n raise IndexError(\"Invalid J value:%d - valid range: [0,%d]\" % (j , self.getNY()))\n\n if not 0 <= k <= self.getNZ():\n raise IndexError(\"Invalid K value:%d - valid range: [0,%d]\" % (k , self.getNZ()))\n \n x = ctypes.c_double()\n y = ctypes.c_double()\n z = ctypes.c_double()\n self._get_corner_xyz( i,j,k , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z))\n return (x.value , y.value , z.value)",
"def _get_closest_control_zone(x: float, y: float, hp_info: pd.DataFrame) -> int:\n\n min_dist = CONTROL_ZONE_RADIUS\n min_ind = 0\n\n for ind in hp_info.index:\n hp_x = hp_info[0][ind]\n hp_y = hp_info[1][ind]\n\n dist = np.sqrt((x - hp_x) ** 2 + (y - hp_y) ** 2)\n\n if dist < min_dist:\n min_dist = dist\n min_ind = ind\n\n return min_ind",
"def get_index(corners, i, jk):\n if type(jk) != list:\n jk = list(jk)\n assert corners.shape[1] == 3\n sol = np.where(np.bitwise_or(np.all(corners == [i] + jk, axis=1), \n np.all(corners == [i] + jk[::-1], axis=1)))[0]\n if len(sol) > 0: \n return sol[0]",
"def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index",
"def getNearestPointInLayer(self, data, delta, locn):\n\n# TODO: speed this up - kdtree?\n\n (cx, cy) = locn\n res = None\n dist = None\n for p in data:\n x = p[0]\n y = p[1]\n d = (x-cx)*(x-cx) + (y-cy)*(y-cy)\n if dist:\n if d < dist:\n dist = d\n res = (x, y)\n else:\n dist = d\n res = (x, y)\n\n if dist <= delta:\n return res\n return None",
"def find_k(self, kpt):\n kpt = np.array(kpt)\n ns = np.linalg.norm(self.kpts - kpt[None, :], axis=1)\n ik = np.argmin(ns)\n return ik",
"def neighbor(self, m, k, func):\n data = np.random.random((m, k))\n target = np.random.random(k)\n tree = KDTree(data)\n dist, index = tree.query(target)\n point = tree.data[index]\n spoint, sdist = func(data, target) # func solves the problem\n p1 = self.numTest(point, spoint,\n \"\\n\\t\"+func.__name__+\"() failed: incorrect nearest neighbor\")\n p2 = self.numTest(dist, sdist, \n \"\\n\\t\"+func.__name__+\"() failed: incorrect minimum distance\")\n return p1 + p2",
"def find_layer(z, params):\r\n N = len(params['d_list'])\r\n for i in range(N):\r\n if z <= params['layer_bottom_list'][i]:\r\n return i-1\r\n return N-1",
"def resolve_k(k: float or Callable[[int], float] or None, n: int, k_max: int = None):\n if k_max is None:\n k_max = n\n if callable(k):\n k = k(n)\n elif k is None:\n k = k_max\n elif not 1 <= k <= k_max:\n raise ValueError(f'{k} is too many nearest neighbours, number has to be between 1 and {k_max}.')\n return min(max(1, round(k)), k_max)",
"def calc_nearest_ind(self, robot_pose):\n pass",
"def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj",
"def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass",
"def findKthNumber(self, m: int, n: int, k: int) -> int:\n l, r = 1, m * n\n while l < r:\n mid = l + ((r - l) >> 1)\n\n # Check if there are k or more values that are less than mid.\n # For each row, its elements look like 1*i, 2*i, ... n*i, so the\n # largest number that is less than x will be x // i. But if x is\n # too large for the current row, the total count for this row\n # will be n instead.\n if sum(min(mid // r, n) for r in range(1, m + 1)) >= k:\n # mid is our candidate.\n r = mid\n else:\n l = mid + 1\n\n return l",
"def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def find_knn(self, k, coordinate, threshold=0):\n def r_square(c1, c2):\n return (c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2\n\n h = []\n for sno in self._coordinates:\n heapq.heappush(\n h, (r_square(coordinate, self._coordinates[sno]), sno))\n\n knn = []\n for unused_i in range(k):\n knn.append(self._stations[heapq.heappop(h)[1]])\n\n min_dist = r_square((float(knn[0]['lat']), float(knn[0]['lng'])),\n coordinate)\n if threshold and min_dist > threshold ** 2:\n return []\n\n return knn",
"def findNearestUnstructNode(xFRF, yFRF, ugridDict):\n\n assert 'xFRF' in list(ugridDict.keys()), 'Error: xFRF is a required key in ugridDict'\n assert 'yFRF' in list(ugridDict.keys()), 'Error: yFRF is a required key in ugridDict'\n\n points = np.column_stack((ugridDict['xFRF'], ugridDict['yFRF']))\n qPt = np.column_stack((xFRF, yFRF))\n\n # compute nearest neighbor\n kdt = scipy.spatial.cKDTree(points)\n dist, ind = kdt.query(qPt, 1)\n\n return ind, dist",
"def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists",
"def find_nearest_neighbors(p, points, k):\n\timport numpy as np\n\tdistances = np.zeros(points.shape[0])\n\tfor i in range(len(distances)):\n\t\tdistances[i] = distance(p,points[i])\n\tind = np.argsort(distances)\n\treturn ind[0:k]",
"def solution(n, m, r, c, k) -> int:\n xs = []\n # Add all the non-zero room widths to xs\n last_column_wall = None\n for col in c:\n if last_column_wall is not None and col - last_column_wall - 1 > 0:\n xs.append(col - last_column_wall - 1)\n last_column_wall = col\n ys = []\n # Add all the non-zero room heights to ys\n last_row_wall = None\n for row in r:\n if last_row_wall is not None and row - last_row_wall - 1 > 0:\n ys.append(row - last_row_wall - 1)\n last_row_wall = row\n return aux(xs, ys, k)",
"def corners_player(self, board):\n valid_moves = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n rows, columns = np.where(valid_moves == 1)\n max_corners = -200\n location = (-2, -2)\n for i in range(len(rows)):\n temp_board = np.copy(board)\n temp_board = self.game.flip_opponent_stones((rows[i], columns[i]), temp_board, self.board_size,\n self.computer_num, self.opponent_num)\n corners_value = self.stone_parity(temp_board)\n if corners_value > max_corners:\n max_corners = corners_value\n location = (rows[i], columns[i])\n return location",
"def get_position(k):\r\n l = get_level(k)\r\n return (l, k - 2**l)",
"def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right",
"def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 10)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size",
"def nearest_input_pts(\n in_latlons: ndarray, out_latlons: ndarray, k: int\n) -> Tuple[ndarray, ndarray]:\n # Convert input latitude and longitude to XYZ coordinates, then create KDtree\n in_x, in_y, in_z = ecef_coords(in_latlons[:, 0].flat, in_latlons[:, 1].flat)\n in_coords = np.c_[in_x, in_y, in_z]\n in_kdtree = KDTree(in_coords)\n # Convert output to XYZ and query the KDtree for nearby input points\n out_x, out_y, out_z = ecef_coords(out_latlons[:, 0].flat, out_latlons[:, 1].flat)\n out_coords = np.c_[out_x, out_y, out_z]\n distances, indexes = in_kdtree.query(out_coords, k)\n # Avoid single dimension output for k=1 case\n if distances.ndim == 1:\n distances = np.expand_dims(distances, axis=1)\n if indexes.ndim == 1:\n indexes = np.expand_dims(indexes, axis=1)\n return distances, indexes"
]
| [
"0.6126895",
"0.60942805",
"0.59902865",
"0.5941996",
"0.57641095",
"0.559557",
"0.55891967",
"0.55502415",
"0.5548755",
"0.55165607",
"0.5490814",
"0.5472218",
"0.5468806",
"0.5456647",
"0.5424942",
"0.5324604",
"0.5322794",
"0.52776706",
"0.5259948",
"0.52301705",
"0.522965",
"0.5227914",
"0.5223359",
"0.5203932",
"0.51700693",
"0.5139214",
"0.51338255",
"0.51321065",
"0.5131217",
"0.5112359"
]
| 0.699358 | 0 |
Will return a tuple (dx,dy,dz) for cell dimension. The dx and dy values are best effor estimates of the cell size along the i and j directions respectively. The three values | def getCellDims(self , active_index = None , global_index = None , ijk = None):
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index )
dx = self._get_cell_dx( gi )
dy = self._get_cell_dy( gi )
dz = self._get_cell_thickness( gi )
return (dx,dy,dz) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cellsize_2d(self):\t\r\n return self.dx * self.dy",
"def cell_dz( self , active_index = None , global_index = None , ijk = None):\n gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index )\n return self._get_cell_thickness( gi )",
"def dz(self):\n if self._uniform_cell_size[2] == gxapi.rDUMMY:\n return None\n return self._uniform_cell_size[2]",
"def calculate_dimensions(self):\n x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node\n self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size\n self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)\n self.nr_nodes_x = self.nr_elements_x + 1\n self.nr_elements_z = self.nr_nodes_z - 1",
"def dim(self) -> Tuple[Tuple[int, int], Tuple[int, int]]:",
"def getDimensions():",
"def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)",
"def getDim(self):\n return \"%dx%d\" % (self.rows, self.cols)",
"def green_meshes_case_D(nz, nx, dz, dx, rho=None, beta=None, lamb=None):\n rho_sign = 1 if rho>=0 else -1\n \n # Change to internal coordinates\n dx = dx/rho\n dz = dz/(2*abs(rho))\n \n # Double-sized array for convolution with the density\n zvec2 = np.arange(-nz+1,nz+1,1)*dz # center = 0 is at [nz-1]\n xvec2 = np.arange(-nx+1,nx+1,1)*dx # center = 0 is at [nx-1]\n \n \n zm2, xm2 = np.meshgrid(zvec2, xvec2, indexing=\"ij\")\n \n Es_case_D_grid = Es_case_D(zm2, xm2, beta, lamb)\n \n return Es_case_D_grid, zvec2*2*rho, xvec2*rho",
"def dimension(self):\n\t\treturn self.d",
"def dimensions():",
"def dim(self):\n\t\treturn self.D",
"def get_dimension(pts):\n return pts[3][0] - pts[0][0], pts[3][1] -",
"def _faceToCellDistanceRatio(self):\n XYdis = numerix.zeros((self.nx, self.ny, self.nz + 1), 'd')\n XYdis[:] = 0.5\n XYdis[..., 0] = 1\n XYdis[..., -1] = 1\n\n XZdis = numerix.zeros((self.nx, self.ny + 1, self.nz), 'd')\n XZdis[:] = 0.5\n XZdis[:, 0,:] = 1\n XZdis[:, -1,:] = 1\n\n YZdis = numerix.zeros((self.nx + 1, self.ny, self.nz), 'd')\n YZdis[:] = 0.5\n YZdis[ 0, ...] = 1\n YZdis[-1, ...] = 1\n\n return numerix.concatenate((numerix.ravel(XYdis.swapaxes(0, 2)),\n numerix.ravel(XZdis.swapaxes(0, 2)),\n numerix.ravel(YZdis.swapaxes(0, 2))))",
"def calc_dimension(your_mesh):\n minx = your_mesh.x.min()\n maxx = your_mesh.x.max()\n miny = your_mesh.y.min()\n maxy = your_mesh.y.max()\n minz = your_mesh.z.min()\n maxz = your_mesh.z.max()\n return minx, maxx, miny, maxy, minz, maxz",
"def dx(self):\n if self._uniform_cell_size[0] == gxapi.rDUMMY:\n return None\n return self._uniform_cell_size[0]",
"def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)",
"def get_dim():\n return (Settings.width, Settings.height)",
"def getdim(self):\n return round(self.w() / self.c)",
"def shape(self):\r\n\r\n if self.gtype == 'points':\r\n out = (self.nx, self.ny, self.nz)\r\n elif self.gtype == 'cells':\r\n # By default, nx, ny and nz contains the size of the grid\r\n # in terms of points. When one dimension is 1, then along\r\n # that direction number of cells == number of points.\r\n out = (self.nx-1 if self.nx > 1 else 1,\r\n self.ny-1 if self.ny > 1 else 1,\r\n self.nz-1 if self.nz > 1 else 1)\r\n return out",
"def d(self):\r\n return self.size.z",
"def dimension(self):\n return 3*self.genus - 3 + self.n",
"def get_distance(self, x, y, z):\n\n up = [abs((p % self.unitcell) - self.unitcell/2) for p in (x,y,z)]\n dmin = 9999999.\n for l in self.types[self.type]:\n sp = [self.pointlist[l[0]][i] * self.unitcell for i in range(3)]\n ep = [self.pointlist[l[1]][i] * self.unitcell for i in range(3)]\n v = [ep[i]-sp[i] for i in range(3)]\n d = [up[i]-sp[i] for i in range(3)]\n # dot products\n c2 = sum([i*j for (i, j) in zip(v, v)])\n c1 = sum([i*j for (i, j) in zip(d, v)])\n\n b = c1/c2\n p = [sp[i] + b * v[i] for i in range(3)]\n dmin = min(dmin, sum([(up[i]-p[i])**2 for i in range(3)]))\n return math.sqrt(dmin) - self.thickness/2.0",
"def dim(self):\n return self._d",
"def _estimateDepth(self, size, neighbourRadius):\n neighbourRadius *= 1.5\n for i in xrange(100):\n j = 2**i\n spacings = [c/j for c in size]\n maxSpace = max(spacings)\n if maxSpace < neighbourRadius:\n return i+1",
"def _mpo_get_d(self, W):\n din = W.shape[3]\n dout = W.shape[1]\n return dout, din",
"def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)",
"def get_grid_dimensions(current_problem_size, params, grid_div, block_size_names):\n def get_dimension_divisor(divisor_list, default, params):\n if divisor_list is None:\n if default in params:\n divisor_list = [default]\n else:\n return 1\n return numpy.prod([int(eval(replace_param_occurrences(s, params))) for s in divisor_list])\n divisors = [get_dimension_divisor(d, block_size_names[i], params) for i, d in enumerate(grid_div)]\n return tuple(int(numpy.ceil(float(current_problem_size[i]) / float(d))) for i, d in enumerate(divisors))",
"def dimensions(self) -> typing.Tuple[int, int]:\n dimensions = self.data[2]\n dimensions = re.findall(r'(\\d+)\\s+x\\s+(\\d+)\\s+M', dimensions.replace('-', '0'))\n return dimensions[0] if dimensions else (0, 0)",
"def dims(self):\n return (self.startx, self.starty, self.endx, self.endy)"
]
| [
"0.69722414",
"0.6508007",
"0.64366984",
"0.60504264",
"0.6036826",
"0.600399",
"0.5995232",
"0.59840685",
"0.5981503",
"0.5959597",
"0.5954673",
"0.5930613",
"0.5919213",
"0.58766544",
"0.5867574",
"0.58416903",
"0.5830659",
"0.58156824",
"0.58141303",
"0.5770785",
"0.5738006",
"0.57125837",
"0.5708999",
"0.5694614",
"0.5685204",
"0.5684852",
"0.56792194",
"0.5649578",
"0.5609882",
"0.5609705"
]
| 0.7103264 | 0 |
Query if the grid has an LGR with name . | def has_lgr( self , lgr_name ):
if self._has_lgr( lgr_name ):
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_occupied(\n grid: List[List[str]], row: int, col: int, dx: int, dy: int) -> bool:\n while 0 <= (row + dy) < len(grid) and 0 <= (col + dx) < len(grid[0]):\n row += dy\n col += dx\n if grid[row][col] == 'L':\n return False\n if grid[row][col] == '#':\n return True\n return False",
"def get_lgr( self , lgr_name ):\n if self._has_lgr( lgr_name ):\n lgr = self._get_lgr( name )\n lgr.setParent( self )\n return lgr\n else:\n raise KeyError(\"No such LGR:%s\" % lgr_name)",
"def check_gs_name(self, name):\n if name in self.currentconfig.list_greyscales():\n QtWidgets.QMessageBox.warning(self, \"Name error\", \"Greyscale name\" + name + \" clashes with existing one\")\n return True\n return False",
"def is_grid_search(self) -> bool:\r\n return os.path.exists(self._grid_search_path)",
"def is_in_retina_graders_group(user):\n return user.groups.filter(name=settings.RETINA_GRADERS_GROUP_NAME).exists()",
"def is_non_reccuring_charge_populated_in_the_grid(self, charge_name):\n status = None\n try:\n charge_name_locator = (By.XPATH, \"//div[contains(@id, 'divTrafficGrid_') and @data-grid-name='NonUsageChargesGrid']/descendant::span[text()=%s]\" %(charge_name))\n self.wait().until(EC.presence_of_element_located(charge_name_locator))\n status = True\n except:\n status = False\n finally:\n return status",
"def load_grid(world_state):\n while world_state.is_mission_running:\n #sys.stdout.write(\".\")\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n if len(world_state.errors) > 0:\n raise AssertionError('Could not load grid.')\n\n if world_state.number_of_observations_since_last_state > 0:\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n grid = observations.get(u'floorAll', 0)\n break\n return grid",
"def load_grid(world_state):\n while world_state.is_mission_running:\n #sys.stdout.write(\".\")\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n if len(world_state.errors) > 0:\n raise AssertionError('Could not load grid.')\n\n if world_state.number_of_observations_since_last_state > 0:\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n grid = observations.get(u'floorAll', 0)\n break\n return grid",
"def check_grue(self, tile):\n if tile[2] == 'grue':\n if self.lab.inventory > 0:\n self.lab.fire()\n print 'Lighted match'",
"def check_groupname_existance(group_name):\n query=\"SELECT * FROM groups WHERE group_name='{}'\".format(group_name)\n cur.execute(query)\n return cur.fetchone()",
"def exists_type(self, type):\n for i in range(1, self.grid_size - 1):\n for j in range(1, self.grid_size - 1):\n obj = self.grid.get(i, j)\n if obj and obj.type == type:\n return True\n return False",
"def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]",
"async def is_garage_name_exit(self, garage_name:str):\r\n async with self._db.acquire() as conn:\r\n result= [dict(row.items()) async for row in await conn.execute(\r\n Garage.select().where((Garage.c.garage_name == garage_name)))\r\n ]\r\n return len(result) >0 and True or False",
"def pl_exists(self, name):\n for i, d in enumerate(self.ui.leftwin.data):\n if d.data.name == name:\n return i\n\n return -1",
"def available(name):\n return name in get_all()",
"def __contains__(self, arg):\r\n\r\n return arg in self.grfx[0]",
"def is_ligand(self):\n if any(LigandComponentAdaptor().fetch_by_residue_id(r.residue_id) for r in self.Residues):\n return True\n else:\n return False",
"def getNumLGR(self):\n return self._num_lgr( )",
"def test_get_grid_type(flopy_dis_mf6):\n flopy_dis, mf6 = flopy_dis_mf6\n mf6.initialize()\n\n # Getting the grid id from the model, requires specifying one variable\n k11_tag = mf6.get_var_address(\"K11\", flopy_dis.model_name, \"NPF\")\n grid_id = mf6.get_var_grid(k11_tag)\n assert mf6.get_grid_type(grid_id) == \"rectilinear\"",
"def _get_grid_coord(wl, bl):\n row = None\n col = None\n\n for i, (l, h) in QLDbEntry.wl_map.items():\n if wl >= l and wl <= h:\n row = i\n break\n\n for i, (l, h) in QLDbEntry.bl_map.items():\n if bl >= l and bl <= h:\n col = i\n break\n\n return col, row",
"def check_grid(gridname,modeldirs=None):\n\n chgriddir(gridname)\n \n # guess the model directories \n modeldirs = get_modeldirs(modeldirs)\n \n for modeldir in modeldirs:\n if not os.path.isfile(modeldir+\"/finished.out\"):\n print(\"Model \"+modeldir+\" failed:\")",
"def check_general(self, gb, gr):\n gb = General(\"BLUE\")\n gr = General(\"RED\")\n # Look to see if the generals are in the same column\n \n gr_row = self.ind(new_pos)[0]\n gr_col = self.ind(new_pos)[1]\n gb_row = self.ind(cur_pos)[0]\n gb_col = self.ind(cur_pos)[1]",
"def see_occupant(self, x, y, dx, dy):\r\n if dx == 0 and dy == 0: # Makes looping easier\r\n return False\r\n x += dx\r\n y += dy\r\n while 0 <= x < self.width and 0 <= y < self.height:\r\n if self.grid[y][x] == '#':\r\n return True\r\n if self.grid[y][x] == 'L':\r\n return False\r\n x += dx\r\n y += dy\r\n return False",
"def check_layer_name(field):\n \n hygienize = field.replace(\"\\\"\", \"\")\n layer_name = (hygienize.split(\".\"))[0]\n \n if layer_name in layer_names:\n return True\n return False",
"def has_group(self,groupname):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" WHERE $groupname_field$='$groupname$'\",{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: has_group: %s\" % (query,))\n\n cursor.execute(query)\n for row in cursor:\n return True\n return False",
"def test_grdimage_global_subset(grid_360):\n # Get a slice of South America and Africa only (lat=-90:31, lon=-180:41)\n sliced_grid = grid_360[0:121, 0:221]\n assert sliced_grid.gmt.registration == 0 # gridline registration\n assert sliced_grid.gmt.gtype == 0 # Cartesian coordinate system\n\n fig = Figure()\n fig.grdimage(\n grid=sliced_grid, cmap=\"vik\", region=\"g\", projection=\"W0/3.5c\", frame=True\n )\n return fig",
"def is_roi_col(col_name):\n return re.match(r\"[L|R][0-9]+$\", col_name)",
"def getGlovesFromName(self, idx):\n\t\tfile = self.all_file_names[idx]\n\t\tgloveIndex = file.find(\"GLOVES\") + 7\n\t\tif file[sleeveIndex] == 'Y':\n\t\t\treturn True\n\t\treturn False",
"def has_grid(obj, is_json=False):\n if is_json:\n try:\n # this is the case where we're in a pure dict\n tv_grid = obj.get(constants.TV_GRID)\n except:\n # Thi is the case where we're in the data object\n tv_grid = obj.tv_grid\n\n return not is_empty_grid(tv_grid)\n else:\n tv_grid = obj.tv_grid\n if tv_grid and tv_grid.width != 0 or tv_grid.height != 0 or tv_grid.position != 0 or tv_grid.device_ids:\n return True\n return False",
"def in_row(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x != col and n == grid[row][x]:\n return True\n return False"
]
| [
"0.61112547",
"0.57133603",
"0.56065786",
"0.5493625",
"0.54852134",
"0.5471643",
"0.5407397",
"0.5407397",
"0.53794944",
"0.53548443",
"0.5354632",
"0.53347063",
"0.52872145",
"0.52827424",
"0.5275375",
"0.5262673",
"0.52566737",
"0.5246809",
"0.5244999",
"0.52418375",
"0.52278805",
"0.5208467",
"0.51639336",
"0.5125663",
"0.5102378",
"0.51015276",
"0.5101217",
"0.5096176",
"0.5089898",
"0.50882727"
]
| 0.7519792 | 0 |
Get EclGrid instance with LGR content. Return an EclGrid instance based on the LGR named . The LGR grid instance is in most questions like an ordinary grid instance; the only difference is that it can not be used for further queries about LGRs. If the grid does not contain an LGR with this name the method will return None. | def get_lgr( self , lgr_name ):
if self._has_lgr( lgr_name ):
lgr = self._get_lgr( name )
lgr.setParent( self )
return lgr
else:
raise KeyError("No such LGR:%s" % lgr_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getGrid(self):\n\n\t\t\treturn self._logic.getGrid()",
"def get_cell_lgr( self, active_index = None , global_index = None , ijk = None):\n gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)\n lgr = self._get_cell_lgr( gi )\n if lgr:\n lgr.setParent( self )\n return lgr\n else:\n raise IndexError(\"No LGR defined for this cell\")",
"def get_grid(self):\r\n return self.grid",
"def getGrid(self):\n\n\t\treturn self._grid",
"def get_grid( self ):\n\n return self.__grid",
"def get_grid(self):\n return self._grid",
"def get_grid(grid_url, raw=False):\n fid = parse_grid_id_args(None, grid_url)\n response = v2.grids.content(fid)\n parsed_content = response.json()\n\n if raw:\n return parsed_content\n return Grid(parsed_content, fid)",
"def grid(self):\n if hasattr(self.cls, \"grid\"):\n return self.cls.grid",
"def loadFromGrdecl(cls , filename):\n\n if os.path.isfile(filename):\n with open(filename) as f:\n specgrid = EclKW.read_grdecl(f, \"SPECGRID\", ecl_type=EclTypeEnum.ECL_INT_TYPE, strict=False)\n zcorn = EclKW.read_grdecl(f, \"ZCORN\")\n coord = EclKW.read_grdecl(f, \"COORD\")\n try:\n actnum = EclKW.read_grdecl(f, \"ACTNUM\", ecl_type=EclTypeEnum.ECL_INT_TYPE)\n except ValueError:\n actnum = None\n\n try:\n mapaxes = EclKW.read_grdecl(f, \"MAPAXES\")\n except ValueError:\n mapaxes = None\n\n return EclGrid.create( specgrid , zcorn , coord , actnum , mapaxes )\n else:\n raise IOError(\"No such file:%s\" % filename)",
"def get_grid(loc=GRID_LOC):\n\n sref = ncepgrib2.Grib2Decode(loc, gribmsg=False)\n lats, lons = sref[0].grid()\n\n return lats, lons",
"def load_grid(world_state):\n while world_state.is_mission_running:\n #sys.stdout.write(\".\")\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n if len(world_state.errors) > 0:\n raise AssertionError('Could not load grid.')\n\n if world_state.number_of_observations_since_last_state > 0:\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n grid = observations.get(u'floorAll', 0)\n break\n return grid",
"def load_grid(world_state):\n while world_state.is_mission_running:\n #sys.stdout.write(\".\")\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n if len(world_state.errors) > 0:\n raise AssertionError('Could not load grid.')\n\n if world_state.number_of_observations_since_last_state > 0:\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n grid = observations.get(u'floorAll', 0)\n break\n return grid",
"def loadFromFile(cls , filename):\n if FortIO.isFortranFile( filename ):\n return EclGrid( filename )\n else:\n return EclGrid.loadFromGrdecl( filename )",
"def import_grid(file_name):\n\n return FileReader(file_name=file_name).grid",
"def grid(self):\n return self._grid",
"def get_grid(self, mol_data):\n\n if self.grid_info is None:\n\n try:\n\n x = mol_data['grid_points/x'][()]\n y = mol_data['grid_points/y'][()]\n z = mol_data['grid_points/z'][()]\n\n except BaseException:\n\n raise ValueError(\n \"Grid points not found in the data file\")\n\n else:\n\n center = mol_data['grid_points/center'][()]\n npts = np.array(self.grid_info['number_of_points'])\n res = np.array(self.grid_info['resolution'])\n\n halfdim = 0.5 * (npts * res)\n\n low_lim = center - halfdim\n hgh_lim = low_lim + res * (npts - 1)\n\n x = np.linspace(low_lim[0], hgh_lim[0], npts[0])\n y = np.linspace(low_lim[1], hgh_lim[1], npts[1])\n z = np.linspace(low_lim[2], hgh_lim[2], npts[2])\n\n # there is stil something strange\n # with the ordering of the grid\n # also noted in GridTools define_grid_points()\n y, x, z = np.meshgrid(y, x, z)\n grid = (x, y, z)\n npts = (len(x), len(y), len(z))\n return grid, npts",
"def grid(self):\n return self.__grid",
"def get_grid_line_collection(self, **kwargs):\n return self.__cls.get_grid_line_collection(**kwargs)",
"def get_nc_BGrid_GFDL(grdfile, name='GFDL_CM2.1_North_Pacific', area='regional', \\\n xrange=(60,175), yrange=(120, 190), ystart=235):\n\n nc = pyroms.io.Dataset(grdfile)\n\n lon_t = nc.variables['geolon_t'][:]\n lat_t = nc.variables['geolat_t'][:]\n lon_uv = nc.variables['geolon_c'][:]\n lat_uv = nc.variables['geolat_c'][:]\n\n h = nc.variables['ht'][:]\n\n f = nc.variables['coriolis_param'][:]\n\n kmt = nc.variables['kmt'][:]\n z_t = nc.variables['st_ocean'][:]\n z_t_edges = nc.variables['st_edges_ocean'][:]\n\n kmu = nc.variables['kmu'][:]\n z_uv = nc.variables['sw_ocean'][:]\n z_uv_edges = nc.variables['sw_edges_ocean'][:]\n\n # compute mask at t-point\n M_t, L_t = kmt.shape\n N_t = z_t.shape[0]\n mask_t = np.zeros((N_t, M_t, L_t))\n for j in range(M_t):\n for i in range(L_t):\n try:\n mask_t[0:int(kmt[j,i]), j,i] = 1\n except:\n mask_t[:, j,i] = 0\n\n # compute mask at uv-point\n M_uv, L_uv = kmu.shape\n N_uv = z_uv.shape[0]\n mask_uv = np.zeros((N_uv, M_uv, L_uv))\n for j in range(M_uv):\n for i in range(L_uv):\n try:\n mask_uv[0:int(kmu[j,i]), j,i] = 1\n except:\n mask_uv[:, j,i] = 0\n\n if area == 'npolar':\n #add two rows in the north and the south\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = 90\n lat_t[-1,:] = 91\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = 90\n lat_uv[-1,:] = 91\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n if area == 'tripole':\n #add two rows in the north and the south\n fold1 = L_t//2\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t[-2,:fold1] = lon_t[-3,L_t:fold1-1:-1]\n lon_t[-2,L_t:fold1-1:-1] = lon_t[-3,:fold1]\n lon_t[-1,:fold1] = lon_t[-4,L_t:fold1-1:-1]\n lon_t[-1,L_t:fold1-1:-1] = lon_t[-4,:fold1]\n\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = lat_t[-3,:]\n lat_t[-1,:] = lat_t[-4,:]\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n\n lon_uv[-2,:fold1] = lon_uv[-4,L_t:fold1-1:-1]\n lon_uv[-2,L_t:fold1-1:-1] = lon_uv[-4,:fold1]\n lon_uv[-1,:fold1] = lon_uv[-5,L_t:fold1-1:-1]\n lon_uv[-1,L_t:fold1-1:-1] = lon_uv[-5,:fold1]\n\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = lat_uv[-3,:]\n lat_uv[-1,:] = lat_uv[-4,:]\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \\\n mask_t, mask_uv, h, z_t, z_t_edges, \\\n z_uv, z_uv_edges, f, \\\n name, xrange=xrange, yrange=yrange)",
"def has_lgr( self , lgr_name ):\n if self._has_lgr( lgr_name ):\n return True\n else:\n return False",
"def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz",
"def get_grid(self, gribfile, using_pygrib_derived_coords=False):\n gr = pygrib.open(gribfile)\n\n g = gr[1]\n\n latdim = g.Nj\n londim = g.Ni\n\n if not using_pygrib_derived_coords:\n try:\n latFirst = g.latitudeOfFirstGridPointInDegrees\n lonFirst = g.longitudeOfFirstGridPointInDegrees\n latLast = g.latitudeOfLastGridPointInDegrees\n lonLast = g.longitudeOfLastGridPointInDegrees\n dy = g.jDirectionIncrementInDegrees\n dx = g.iDirectionIncrementInDegrees\n latPole = g.latitudeOfSouthernPoleInDegrees\n lonPole = g.longitudeOfSouthernPoleInDegrees\n\n lons, lats = np.meshgrid(np.linspace(\n lonFirst, lonLast, londim), np.linspace(latFirst, latLast, latdim))\n\n if not latPole==0 and not lonPole==0:\n log.info('Found rotated coordinates - converting to regular coordinates')\n lons, lats = regrot.rot_to_reg(lonPole,latPole,lons,lats)\n\n except RuntimeError:\n using_pygrib_derived_coords = True\n warnings.warn('Falling back to pygrib derived coordinates')\n lats, lons = g.latlons()\n using_pygrib_derived_coords=True\n if using_pygrib_derived_coords:\n lats, lons = g.latlons()\n\n data_date = g.dataDate\n data_time = g.dataTime\n\n starttime = dt.datetime.strptime(('%i-%.2i')%(data_date,data_time),'%Y%m%d-%H%M')\n\n gr.close()\n\n return lats.flatten(), lons.flatten(), latdim, londim",
"def getGrid(self):\n\n return self.board",
"def pull_rhds_group(self, name):\n return self.ldap_connection.search_s(\"ou=managedGroups,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE,\"cn={0}\".format(name))",
"def create_lower_level_grid(self):\n if self.level == 1:\n return False\n else:\n return LowerLevelGrid(level=self.level - 1, parent_hypercubes_number=self.hypercubes_number, parent_hypercubes=self.hypercubes, dims=self.dims)",
"def build_grid_generator(cfg, input_shape):\n grid_generator = cfg.MODEL.GRID_GENERATOR.NAME\n return GRID_GENERATOR_REGISTRY.get(grid_generator)(cfg, input_shape)",
"def load_grid(world_state):\n grid = list()\n while world_state.is_mission_running:\n sys.stdout.write(\".\")\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n if len(world_state.errors) > 0:\n raise AssertionError('Could not load grid.')\n\n if world_state.number_of_observations_since_last_state > 0:\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n grid = observations.get(u'floorAll', 0)\n break\n return grid",
"def grid(self) -> aa.Grid2D:\r\n return self.analysis.dataset.grid",
"def gridrealm(self):\n if self._gridrealm_cache:\n return self._gridrealm_cache\n gr_defaults = Section.gridrealm.value\n opt_dict = dict([(obj.name, obj.value) for obj in gr_defaults])\n config_vals = self._section_as_dict(Section.gridrealm.name)\n opt_dict.update(config_vals)\n self._gridrealm_cache = AttrDict(**opt_dict)\n return self._gridrealm_cache",
"def __init__(self, name, grid):\n self.name = name\n self.space_dimensions = grid.dimensions\n self.step_dimension = grid.stepping_dim\n self.dtype = grid.dtype\n\n # All known solutions and grids in this context\n self.solutions = []\n self.grids = {}"
]
| [
"0.6133424",
"0.5980499",
"0.5965584",
"0.59655136",
"0.5924115",
"0.5908138",
"0.5881674",
"0.5713956",
"0.5583135",
"0.54097676",
"0.52644074",
"0.52644074",
"0.5248038",
"0.52336603",
"0.52278453",
"0.52225953",
"0.515064",
"0.5137618",
"0.51305234",
"0.5071367",
"0.5060548",
"0.5019512",
"0.50044525",
"0.50034285",
"0.4999076",
"0.49834728",
"0.49819362",
"0.49488586",
"0.4927596",
"0.4917461"
]
| 0.6739804 | 0 |
Get EclGrid instance located in cell. Will query the current grid instance if the cell given by , or has been refined with an LGR. Will return None if the cell in question has not been refined, the return value can be used for further queries. See get_xyz() for documentation of the input parameters. | def get_cell_lgr( self, active_index = None , global_index = None , ijk = None):
gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)
lgr = self._get_cell_lgr( gi )
if lgr:
lgr.setParent( self )
return lgr
else:
raise IndexError("No LGR defined for this cell") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cell(self, cell, dbg=False):\n a = b = 0\n try:\n a, b = self.__ret_cell(cell)\n if dbg:\n return self._grid[a][b], a, b\n else:\n return self._grid[a][b]\n except IndexError as e:\n self.perror(\"Error: '%s'.\" % e, cell, a, b, 5)\n sys.exit()",
"def get_grid(self):\r\n return self.grid",
"def get_grid(loc=GRID_LOC):\n\n sref = ncepgrib2.Grib2Decode(loc, gribmsg=False)\n lats, lons = sref[0].grid()\n\n return lats, lons",
"def get_grid(self):\n return self._grid",
"def getGrid(self):\n\n\t\t\treturn self._logic.getGrid()",
"def get_grid( self ):\n\n return self.__grid",
"def getGrid(self):\n\n\t\treturn self._grid",
"def get_game_cell(self, row, col):\n try:\n return GameCell.objects.get(game=self, row=row, col=col)\n except GameCell.DoesNotExist:\n return None",
"def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]",
"def get_cell(self, point):\n return self._grid[point.x][point.y]",
"def find_cell( self , x , y , z , start_ijk = None):\n\n if start_ijk:\n start_index = self.__global_index( ijk = start_ijk )\n else:\n start_index = 0\n global_index = self._get_ijk_xyz( x , y , z , start_index)\n if global_index >= 0:\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n self._get_ijk1( global_index , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k)) \n return (i.value , j.value , k.value)\n else:\n return None",
"def _get_grid_coord(wl, bl):\n row = None\n col = None\n\n for i, (l, h) in QLDbEntry.wl_map.items():\n if wl >= l and wl <= h:\n row = i\n break\n\n for i, (l, h) in QLDbEntry.bl_map.items():\n if bl >= l and bl <= h:\n col = i\n break\n\n return col, row",
"def FindGrid(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...",
"def get_grid(self, mol_data):\n\n if self.grid_info is None:\n\n try:\n\n x = mol_data['grid_points/x'][()]\n y = mol_data['grid_points/y'][()]\n z = mol_data['grid_points/z'][()]\n\n except BaseException:\n\n raise ValueError(\n \"Grid points not found in the data file\")\n\n else:\n\n center = mol_data['grid_points/center'][()]\n npts = np.array(self.grid_info['number_of_points'])\n res = np.array(self.grid_info['resolution'])\n\n halfdim = 0.5 * (npts * res)\n\n low_lim = center - halfdim\n hgh_lim = low_lim + res * (npts - 1)\n\n x = np.linspace(low_lim[0], hgh_lim[0], npts[0])\n y = np.linspace(low_lim[1], hgh_lim[1], npts[1])\n z = np.linspace(low_lim[2], hgh_lim[2], npts[2])\n\n # there is stil something strange\n # with the ordering of the grid\n # also noted in GridTools define_grid_points()\n y, x, z = np.meshgrid(y, x, z)\n grid = (x, y, z)\n npts = (len(x), len(y), len(z))\n return grid, npts",
"def grid(self):\n if hasattr(self.cls, \"grid\"):\n return self.cls.grid",
"def locate_point(self, coord):\n lowest_lat = self.lower_left[0]\n leftmost_lng = self.lower_left[1]\n dist_lat = utils.haversine((coord[0], leftmost_lng), self.lower_left)*1000 # in meters\n dist_lng = utils.haversine((lowest_lat, coord[1]), self.lower_left)*1000 # in meters\n grid_coord = (floor(dist_lng/self.distance), floor(dist_lat/self.distance))\n if grid_coord in self.cells:\n return grid_coord\n return None",
"def get_cell(self, location: Hashable) -> Cell:\n\t\treturn self._location_to_cell_map.get(location, None)",
"def get_cell(self, cell_id: str) -> Optional[Cell]:\n\n for cell in self.cells:\n if cell.id == cell_id:\n return cell\n return None",
"def get_cell(self, x, y):\n x1, y1 = self.transpose_coordinates(x, y)\n if self.is_in_field(x1, y1):\n return self._cells[y1][x1]\n return None",
"def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None",
"def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None",
"def query(self, cell: Tuple[int, int]):\n return self._board[cell[0]][cell[1]]",
"def get_bl_cell(self):\n return self._bl_cell",
"def mouse_on_grid(self):\n if self.mousePos[0] < grid_pos[0] or self.mousePos[1] < grid_pos[1]:\n return None\n if self.mousePos[0] > grid_pos[0] + grid_size or self.mousePos[1] > grid_pos[1] + grid_size:\n return None\n temp = (self.mousePos[0] - grid_pos[0]\n ) // cell_size, (self.mousePos[1] - grid_pos[1]) // cell_size\n if not self.check_if_locked(temp):\n return temp",
"def getObject(self, row, column, gameGrid=None):\n if not gameGrid:\n gameGrid = self.gameGrid\n return gameGrid.getItem(row, column)",
"def get(self, point):\n\t\treturn self._grid.get(point)",
"def return_cell(self):\n\n pos = pygame.mouse.get_pos()\n\n x = pos[1] // (self.cell_size+1)\n y = pos[0] // (self.cell_size+1)\n\n return self.grid[x][y]",
"def get_cell_by_coords(self, coords):\n try:\n cell = GameCell.objects.get(row=coords[0], col=coords[1], game=self)\n return cell\n except GameCell.DoesNotExist:\n return None",
"def grid(self):\n return self._grid",
"def get(self, **cell):\n cellobj = Cell(**cell)\n if not cellobj.is_terrestial:\n raise Exception(\"Priogrid only has data for terrestial cells, you are trying to get data for a non-terrestial cell\")\n\n if not self.name in CACHED_VARS:\n CACHED_VARS[self.name] = dict()\n CACHED_VARS[self.name][\"data\"] = get_unknown_data(self.name, startyr=self.startyr, endyr=self.endyr, interpolated=self.interpolated)\n\n # TODO: Test that doesnt slow down...\n## if not all((yr in CACHED_VARS[self.name][\"data\"][cellobj.gid][\"data\"] for yr in range(self.startyr, self.endyr+1))):\n## newdata = get_unknown_data(self.name, startyr=self.startyr, endyr=self.endyr, interpolated=self.interpolated)\n## yrdict = CACHED_VARS[self.name][\"data\"][cellobj.gid][\"data\"]\n## yrdict.update( newdata[cellobj.gid][\"data\"] )\n \n value = CACHED_VARS[self.name][\"data\"][cellobj.gid][\"data\"]\n return value"
]
| [
"0.6090308",
"0.60243547",
"0.59928375",
"0.59226567",
"0.5912492",
"0.58661693",
"0.58586776",
"0.5803431",
"0.5781136",
"0.57725286",
"0.5739436",
"0.5710825",
"0.56304365",
"0.5583593",
"0.5551934",
"0.5551149",
"0.55327576",
"0.5495326",
"0.54909706",
"0.54815984",
"0.54815984",
"0.54772186",
"0.5472963",
"0.5425122",
"0.5411652",
"0.54086375",
"0.5406256",
"0.53935885",
"0.53532237",
"0.53205144"
]
| 0.6312405 | 0 |
Will evalute in location (,,). The ECLIPSE properties and solution vectors are stored in restart and init files as 1D vectors of length nxnxnz or nactive. The grid_value() method is a minor convenience function to convert the (,,) input values to an appropriate 1D index. Depending on the length of kw the input arguments are converted either to an active index or to a global index. If the length of kw does not fit with either the global size of the grid or the active size of the grid things will fail hard. | def grid_value( self , kw , i , j , k):
return self._grid_value( kw , i , j , k) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve(grid):\n #setting the units\n values = grid_values(grid)\n #display(values)\n sol = search(values)\n return sol",
"def solve(grid):\n values = grid_values(grid)\n \n values = search(values)\n\n return values",
"def grid_values(*args):\n return GridValues(args)",
"def evaluate(params, grid):\n return np.zeros(grid.shape)",
"def solve(grid):\n values = grid_values(grid)\n values = search(values)\n return values",
"def solve(grid):\n values = grid_values(grid)\n values = search(values)\n\n return values",
"def solve(grid):\n values = grid_values(grid)\n search(values)\n\n return values",
"def grid_search(self):\n\t\t''' common settings without grid-search '''\n\t\tvali_k, cutoffs = 5, [1, 3, 5, 10, 20, 50]\n\n\t\tdo_log = False if self.debug else True\n\t\tcommon_eval_dict = dict(debug=self.debug, grid_search=True, dir_output=self.dir_output,\n\t\t\t\t\t\t vali_k=vali_k, cutoffs=cutoffs, do_log=do_log, log_step=2, do_summary=False, loss_guided=False)\n\n\t\t''' some settings for grid-search '''\n\t\tchoice_validation = [False] if self.debug else [True] # True, False\n\t\tchoice_epoch = [20] if self.debug else [100]\n\t\tchoice_mask_label = [False] if self.debug else [False]\n\t\tchoice_mask_ratios = [0.2] if self.debug else [0.2, 0.4, 0.6, 0.8] # 0.5, 1.0\n\t\tchoice_mask_type = ['rand_mask_rele'] if self.debug else ['rand_mask_rele']\n\n\t\tfor do_validation, num_epochs, mask_label in product(choice_validation, choice_epoch, choice_mask_label):\n\t\t\tif mask_label:\n\t\t\t\tfor mask_ratio, mask_type in product(choice_mask_ratios, choice_mask_type):\n\t\t\t\t\tself.eval_dict = dict(do_validation=do_validation, epochs=num_epochs, mask_label=mask_label,\n\t\t\t\t\t mask_ratio=mask_ratio, mask_type=mask_type)\n\t\t\t\t\tself.eval_dict.update(common_eval_dict)\n\t\t\t\t\tyield self.eval_dict\n\t\t\telse:\n\t\t\t\tself.eval_dict = dict(do_validation=do_validation, epochs=num_epochs, mask_label=mask_label)\n\t\t\t\tself.eval_dict.update(common_eval_dict)\n\t\t\t\tyield self.eval_dict",
"def grid_eval(self, gridaxes):\n assert len(gridaxes) == self.sdim, \"Input has wrong dimension\"\n # make sure axes are one-dimensional\n if not all(np.ndim(ax) == 1 for ax in gridaxes):\n gridaxes = tuple(np.squeeze(ax) for ax in gridaxes)\n assert all(ax.ndim == 1 for ax in gridaxes), \\\n \"Grid axes should be one-dimensional\"\n colloc = [collocation(self.kvs[i], gridaxes[i]) for i in range(self.sdim)]\n return apply_tprod(colloc, self.coeffs)",
"def solve(grid):\n\tvalues = grid2values(grid)\n\tvalues = search(values)\n\treturn values",
"def solve(grid):\n values = grid2values(grid)\n values = search(values)\n return values",
"def solve(grid):\n values = grid2values(grid)\n values = search(values)\n return values",
"def grid_evaluation(self, x, y, size=256):\n\t\t# Create matrix x and y coordinates\n\t\tL = self.L\n\t\t[xx, yy] = meshgrid(linspace(-L, L, size), linspace(-L, L, size))\n\t\tpts = np.array([xx, yy])\n\t\tux = batch_eval(x, pts)\n\t\tuy = batch_eval(y, pts)\n\t\treturn xx, yy, ux, uy",
"def value(self, grid):\n pass",
"def solve(grid):\n assignment = grid_values(grid)\n assignment = eliminate(assignment)\n return assignment",
"def value(self, grid):\n if not self._fix_val or self.val is None:\n # self.check_params()\n self._fix_val = self.cache_val\n self.val = self.evaluate(self.params, grid)\n # centralization\n # self.val -= np.mean(self.val)\n assert self.val.shape == grid.shape, \"Value must be the same shape as grid\"\n return self.val",
"def solve(grid):\n\n return search(grid_values(grid))",
"def solve(grid):\n\n return search(grid_values(grid))",
"def solve(grid):\n return search(grid_values(grid))",
"def solve(grid):\n return search(grid_values(grid))",
"def cell_params(x,y,**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n lookup_table = cloudy_library._restore_lookup_table()\n lookup_table['logG0s'] = lookup_table['logFUVs']\n if x == 'NH': \n x_cloudy,R_NIR_FUV_cl = aux.get_NH_from_cloudy()\n else:\n x_cloudy = np.unique(lookup_table['log'+x+'s'])\n if y == 'NH': \n y_cloudy,R_NIR_FUV_cl = aux.get_NH_from_cloudy()\n else:\n y_cloudy = np.unique(lookup_table['log'+y+'s'])\n\n if not p.ylim:\n p.ylim = [1e-3,30]\n if not p.xlim:\n p.xlim = [1e-7,1e3]\n \n # SELECT GALAXIES\n rand_gal_index = np.random.randint(0, GR.N_gal, size=(p.bins))\n if p.bins == GR.N_gal: rand_gal_index = np.arange(GR.N_gal)\n if p.gal_index: \n rand_gal_index = [p.gal_index]\n print(rand_gal_index)\n xs = np.array([])\n ys = np.array([])\n m_tot,m_encomp,m_y0 = 0,0,0\n for gal_index in rand_gal_index:\n print(gal_index)\n gal_ob = gal.galaxy(gal_index)\n df = gal_ob.cell_data.get_dataframe()\n df['nSFR'] = df.nSFR.values#/(0.2**3)\n #df['nSFR'] = df['SFR_density']\n #df['NH'] = 10.**df['NH']\n x1 = df[x].values\n y1 = df[y].values\n x1[x1 <= p.xlim[0]] = p.xlim[0]\n y1[y1 <= p.ylim[0]] = p.ylim[0]\n m_tot += np.sum(df.m.values)\n m_encomp += np.sum(df.m[(x1>=p.xlim[0]) & (y1>=p.ylim[0])].values)\n m_y0 += np.sum(df.m[(y1 == 0)].values)\n #print(x,x1.min(),x1.max())\n #print(y,y1.min(),y1.max())\n ys = np.append(ys,y1[(x1>=p.xlim[0]) & (y1>=p.ylim[0])])\n xs = np.append(xs,x1[(x1>=p.xlim[0]) & (y1>=p.ylim[0])])\n print('Min max of %s:' % x)\n print(xs.min(),xs.max())\n print('Min max of %s:' % y)\n print(ys.min(),ys.max())\n fig,ax = plt.subplots(figsize=(10,8))\n hb = ax.hexbin(xs,ys,xscale='log',yscale='log',bins='log',mincnt=1,lw=None,gridsize=50,cmap='inferno')\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label('Number of cells in %i galaxies' % len(rand_gal_index))\n ax.set_xlabel(getlabel(x))\n ax.set_ylabel(getlabel(y))\n print('Total gas mass fraction encompassed: %.4f%%' % (m_encomp/m_tot*100))\n print('Total gas mass fraction with y = 0: %.4f%%' % (m_y0/m_tot*100))\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n # Overplot Cloudy grid params\n print(x,x_cloudy)\n print(y,y_cloudy)\n for x1 in x_cloudy:\n ax.plot([10**x1,10**x1],ax.get_ylim(),'-',color='white',alpha=0.7)\n ax.plot([10**x1,10**x1],ax.get_ylim(),'--k',alpha=0.7)\n for y1 in y_cloudy:\n ax.plot(ax.get_xlim(),[10.**y1,10.**y1],'-',color='white',alpha=0.7)\n ax.plot(ax.get_xlim(),[10.**y1,10.**y1],'--k',alpha=0.7)\n\n if not os.path.isdir(p.d_plot + 'cell_data/'): os.mkdir(p.d_plot + 'cell_data/') \n plt.savefig('plots/cell_data/%s%s_cell_params_%s_%s_%s.png' % (p.sim_name,p.sim_run,p.z1,x,y),dpi=250, facecolor='w')",
"def solve(grid):\n #translate from string representation to dict to solve it further\n values = grid_values(grid)\n return search(values)",
"def solve(grid):\n values = grid_values(grid);\n values = search(values);\n if values is False:\n return False\n return values;",
"def solve(grid):\n puzzle_dict = grid_values(grid)\n return search(puzzle_dict)",
"def grid_vals(grid):\n\tletters = list(grid)\n\t#print \"---------------------------------\\n-------------------\"\n\t#print letters\n\t#print \"----------------------------------\\n-------------------\"\n\tassert len(letters) == 81\n\ttempdict = zip(squares, letters)\n\treturn dict(tempdict)",
"def solve(self, grid):\n return self.search(self.parse_grid(grid))",
"def eta_grid( self ):\n return self._Vals",
"def eval_model_on_grid(model, bbox, tx, voxel_grid_size, cell_vox_min=None, cell_vox_max=None, print_message=True):\n bbox_origin, bbox_size = bbox\n voxel_size = bbox_size / voxel_grid_size # size of a single voxel cell\n\n if cell_vox_min is None:\n cell_vox_min = torch.tensor([0, 0, 0], dtype=torch.int32)\n\n if cell_vox_max is None:\n cell_vox_max = voxel_grid_size\n\n if print_message:\n print(f\"Evaluating model on grid of size {[_.item() for _ in (cell_vox_max - cell_vox_min)]}.\")\n eval_start_time = time.time()\n\n xmin = bbox_origin + (cell_vox_min + 0.5) * voxel_size\n xmax = bbox_origin + (cell_vox_max - 0.5) * voxel_size\n\n xmin = affine_transform_pointcloud(xmin.unsqueeze(0), tx).squeeze()\n xmax = affine_transform_pointcloud(xmax.unsqueeze(0), tx).squeeze()\n\n xmin, xmax = xmin.numpy(), xmax.numpy()\n cell_vox_size = (cell_vox_max - cell_vox_min).numpy()\n\n xgrid = np.stack([_.ravel() for _ in np.mgrid[xmin[0]:xmax[0]:cell_vox_size[0] * 1j,\n xmin[1]:xmax[1]:cell_vox_size[1] * 1j,\n xmin[2]:xmax[2]:cell_vox_size[2] * 1j]], axis=-1)\n xgrid = torch.from_numpy(xgrid).to(model.alpha_.dtype)\n xgrid = torch.cat([xgrid, torch.ones(xgrid.shape[0], 1).to(xgrid)], dim=-1).to(model.alpha_.dtype)\n\n ygrid = model.predict(xgrid).reshape(tuple(cell_vox_size.astype(np.int))).detach().cpu()\n\n if print_message:\n print(f\"Evaluated model in {time.time() - eval_start_time}s.\")\n\n return ygrid",
"def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment",
"def value_iteration_on_grid_world() -> PolicyAndValueFunction:\n result = get_value_iteration(grid_world, 0.99, 0.01)\n export_to_json(result.pi, 'value_iteration_grid_world')\n return result"
]
| [
"0.58016324",
"0.5538702",
"0.5527784",
"0.55101055",
"0.54955435",
"0.5486036",
"0.5471686",
"0.544268",
"0.53674495",
"0.53422844",
"0.5341827",
"0.5341827",
"0.53167075",
"0.5303801",
"0.52493125",
"0.5226353",
"0.5225476",
"0.5225476",
"0.5201638",
"0.5201638",
"0.51946217",
"0.5129573",
"0.508311",
"0.5071131",
"0.5005809",
"0.49533904",
"0.4939215",
"0.49265283",
"0.49173492",
"0.4891915"
]
| 0.67332304 | 0 |
Creates an EclKW instance based on existing 3D numpy object. The method create3D() does the inverse operation; creating a 3D numpy object from an EclKW instance. If the argument is true the resulting keyword will have length 'nactive', otherwise the element will have length nxnynz. | def createKW( self , array , kw_name , pack):
if array.ndim == 3:
dims = array.shape
if dims[0] == self.getNX() and dims[1] == self.getNY() and dims[2] == self.getNZ():
dtype = array.dtype
if dtype == numpy.int32:
type = EclTypeEnum.ECL_INT_TYPE
elif dtype == numpy.float32:
type = EclTypeEnum.ECL_FLOAT_TYPE
elif dtype == numpy.float64:
type = EclTypeEnum.ECL_DOUBLE_TYPE
else:
sys.exit("Do not know how to create ecl_kw from type:%s" % dtype)
if pack:
size = self.getNumActive()
else:
size = self.getGlobalSize()
if len(kw_name) > 8:
# Silently truncate to length 8 - ECLIPSE has it's challenges.
kw_name = kw_name[0:8]
kw = EclKW( kw_name , size , type )
active_index = 0
global_index = 0
for k in range( self.nz ):
for j in range( self.ny ):
for i in range( self.nx ):
if pack:
if self.active( global_index = global_index ):
kw[active_index] = array[i,j,k]
active_index += 1
else:
if dtype == numpy.int32:
kw[global_index] = int( array[i,j,k] )
else:
kw[global_index] = array[i,j,k]
global_index += 1
return kw
raise ValueError("Wrong size / dimension on array") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create3D( self , ecl_kw , default = 0):\n if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():\n array = numpy.ones( [ self.getGlobalSize() ] , dtype = ecl_kw.dtype) * default\n kwa = ecl_kw.array\n if len(ecl_kw) == self.size:\n for i in range(kwa.size):\n array[i] = kwa[i]\n else:\n data_index = 0\n for global_index in range(self.getGlobalSize()):\n if self.active( global_index = global_index ):\n array[global_index] = kwa[data_index]\n data_index += 1\n \n array = array.reshape( [self.getNX() , self.getNY() , self.getNZ()] , order = 'F')\n return array\n else:\n raise ValueError(\"Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d\" % (ecl_kw.name , ecl_kw.size , self.nactive ,self.size))",
"def Has3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Has3d(self, *args)",
"def is3_d(self, is3_d):\n\n self.container['is3_d'] = is3_d",
"def is3_d(self):\n return self.container['is3_d']",
"def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def New(*args, **kargs):\n obj = itkThresholdSegmentationLevelSetImageFilterID3ID3D.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkCosImageFilterID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkBoundedReciprocalImageFilterID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def create_reverb_3d(self):\n r_ptr = c_void_p()\n ckresult(_dll.FMOD_System_CreateReverb3D(self._ptr, byref(r_ptr)))\n return get_class(\"Reverb3D\")(r_ptr)",
"def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)",
"def _setitem3d(self, index, value):\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iy, slice): sss[1:1] = [1]\n if not isinstance(iz, slice): sss[2:2] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value",
"def New(*args, **kargs):\n obj = itkShapePriorMAPCostFunctionID3D.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def get_3d_valid(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_valid['3d'][:, to_select, :][:, to_sort, :]",
"def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]",
"def fz3d_2_ndhwc_compute(self):\n tik_instance = self.set_tik_instance()\n branch = self.check_branch()\n\n if branch == \"c_align_small\":\n tik_instance = self.c_align_small(tik_instance)\n elif branch == \"c_align_split_n\":\n tik_instance = self.c_align_split_n(tik_instance)\n elif branch == \"c_not_align_small_fp16\":\n tik_instance = self.c_not_align_small_fp16(tik_instance)\n elif branch == \"c_not_align_split_n_fp32\":\n tik_instance = self.c_not_align_split_n_fp32(tik_instance)\n\n return tik_instance",
"def New(*args, **kargs):\n obj = itkShapePriorMAPCostFunctionBaseID3D.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def Deboucle3D(*args):\n return _BRepAlgo.BRepAlgo_Tool_Deboucle3D(*args)",
"def New(*args, **kargs):\n obj = itkContourDirectedMeanDistanceImageFilterID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkCosImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def create_3d_image_test_example(\n image_height: int,\n image_width: int,\n image_volume: int,\n image_channel: int,\n output_serialized_example: bool = False) -> tf.train.Example:\n image = fake_feature_generator.generate_image_np(image_height, image_width,\n image_channel)\n images = image[:, :, np.newaxis, :]\n images = np.tile(images, [1, 1, image_volume, 1]).astype(np.float32)\n\n shape = [image_height, image_width, image_volume, image_channel]\n labels = fake_feature_generator.generate_classes_np(\n 2, np.prod(shape)).reshape(shape).astype(np.float32)\n\n builder = tf_example_builder.TfExampleBuilder()\n example = builder.add_bytes_feature(IMAGE_KEY,\n images.tobytes()).add_bytes_feature(\n CLASSIFICATION_LABEL_KEY,\n labels.tobytes()).example\n if output_serialized_example:\n return example.SerializeToString()\n return example",
"def New(*args, **kargs):\n obj = itkScalarConnectedComponentImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def create_reference_array(self):\r\n self.active = True\r\n self.pxarray = pygame.surfarray.pixels3d(self.surface)\r\n pygame.display.flip()\r\n return",
"def New(*args, **kargs):\n obj = itkBinaryContourImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def tensor3(name=None, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n type = CudaNdarrayType(dtype=dtype, broadcastable=(False, False, False))\r\n return type(name)",
"def New(*args, **kargs):\n obj = itkTranslationTransformD3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def createVolumeKeyword(self , active_size = True): \n\n return self._create_volume_keyword( active_size )",
"def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)"
]
| [
"0.7482991",
"0.59509605",
"0.5510952",
"0.5499722",
"0.54056096",
"0.52415323",
"0.51461595",
"0.51215535",
"0.5113031",
"0.51026446",
"0.50913775",
"0.5080292",
"0.506541",
"0.50582623",
"0.50578034",
"0.503252",
"0.50322217",
"0.50146544",
"0.49972647",
"0.49777192",
"0.49719587",
"0.49676168",
"0.4958173",
"0.49578187",
"0.49284363",
"0.49244615",
"0.49114287",
"0.49098635",
"0.4909749",
"0.49051857"
]
| 0.6924006 | 1 |
Will return the number of coarse groups in this grid. | def coarse_groups(self):
return self._num_coarse_groups( ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ngroups(self):\n return self._ngroups",
"def getNumEnergyGroups(self):\n return self.lib.numGroups",
"def getNumGrids(self):\n c = list(self.gridVars.keys())\n return len(list(self.gridVars[c[0]].values()))",
"def getNumGroups(self):\n return len(np.unique(self._group_index))",
"def num_node_groups(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"num_node_groups\")",
"def num_nodes(self):\n return self._grid",
"def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups",
"def number_of_electrodes(self):\n return self._pre_kernel.shape[1]",
"def grid_point_count(self):\n return pytools.product(self.grid_point_counts())",
"def giniIndex(self, groups):\n n = sum([len(g) for g in groups])\n gini = 0.0\n for g in groups:\n if len(g) == 0: continue\n score = 0\n for c in self.classes:\n p = [r[-1] for r in g].count(c) / len(g)\n score += p * p\n gini += (1 - score) * len(g) / n\n return gini",
"def n_clusters(self):\n return self.model.n_clusters",
"def num_cells_global(self):\n return self.get_dim_attribute('num_cells')",
"def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed",
"def get_num_features(self, ndim: int) -> int:\n nb_features = 0\n for feature_group in self.features_group_list:\n nb_features += feature_group.num_features(ndim)\n return nb_features",
"def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")",
"def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")",
"def n_clusters(self):\n return len(self.clusters)",
"def voxel_count(self):\n return self.cols * self.rows * self.sections",
"def group_size(self):\n return self._gsize",
"def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)",
"def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)",
"def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)",
"def GetGlobalNumRows(self):\n return _hypre.HypreParMatrix_GetGlobalNumRows(self)",
"def get_n_splits(\n self,\n X: DataFrame,\n sections: Union[str, List[str]],\n y: Optional[Union[Series, ndarray]] = None,\n groups: Optional[Union[Series, ndarray]] = None,\n ) -> int:\n return X.groupby(sections).count().index.shape[0] * self.n_splits",
"def n_rounds(self) -> int:\n return self.y.shape[0]",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def group_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"group_count\")",
"def _find_max_number_of_grouping(cls, reserved_seats, k):\n # print(reserved_seats)\n n = len(reserved_seats)\n count_groups = 0\n count_empty_contigous_seats = 0\n i = 0\n while i < n:\n if reserved_seats[i] != 0:\n # print('continue', i)\n count_empty_contigous_seats = 0\n i += 1\n continue\n\n count_empty_contigous_seats += 1\n # print('empty', i, count_empty_contigous_seats)\n if count_empty_contigous_seats >= k:\n count_groups += 1\n # print('found', i, count_groups)\n\n if ((i + 1) % len(cls._PLANE_ROW)) == 0:\n # print('new row', i)\n count_empty_contigous_seats = 0\n\n i += 1\n\n return count_groups",
"def num_grna(self) -> int:\n return len(self.gRNAs)",
"def N(self):\n return len(self.cavity_grid.cavities) + 1"
]
| [
"0.6764765",
"0.65031046",
"0.6454099",
"0.64497554",
"0.62625027",
"0.6090639",
"0.608471",
"0.6063721",
"0.60628855",
"0.6060807",
"0.6049989",
"0.603716",
"0.59877247",
"0.59371305",
"0.5924305",
"0.5924305",
"0.5873342",
"0.5842059",
"0.5803679",
"0.58003265",
"0.57772416",
"0.5762055",
"0.57223344",
"0.57206094",
"0.5709886",
"0.5703129",
"0.5697776",
"0.56933904",
"0.56926036",
"0.5691657"
]
| 0.8727954 | 0 |
Will return True or False if the cell is part of coarse group. | def in_coarse_group(self , global_index = None , ijk = None , active_index = None):
global_index = self.__global_index( active_index = active_index , ijk = ijk , global_index = global_index)
return self._in_coarse_group1( global_index ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_clayey(self):\n group_index = self._data[SoilProperty.GI]\n return group_index[0] not in ['S','G']",
"def coarse_groups(self):\n return self._num_coarse_groups( )",
"def is_crossed(self):\n left_boundary_clusters = np.extract(self.cluster[0] > 0,\n self.cluster[0])\n right_boundary_clusters = np.extract(self.cluster[-1] > 0,\n self.cluster[-1])\n return np.in1d(left_boundary_clusters, right_boundary_clusters).any()",
"def is_corridor_cell(cell: Cell) -> bool:\n open_walls = list(cell.open_walls)\n return len(open_walls) == 2 and open_walls[0].opposite == open_walls[1]",
"def is_percolates(self):\n return self._uf.connected(self._top_idx, self._bottom_idx)",
"def _is_in_grid(self, atom_coordinate):\n return c_is_in_grid(atom_coordinate, self._origin_crd, self._uper_most_corner_crd)",
"def valid_coverage_cell(self, row, column):\n\n if (row < self.cov_grid.shape[0] and\n row >= 0) and \\\n (column < self.cov_grid.shape[1] and\n column >= 0):\n return True\n else:\n return False",
"def is_center(self):\n if self.pupils_located:\n return self.is_right() is not True and self.is_left() is not True",
"def check_surroundings(x_coord, y_coord, value):\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\texamining = world[x_coord - 1 + i][y_coord - 1 + j]\n\t\t\t\tif examining.name == value:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\treturn False",
"def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]",
"def isSplittable(self,cutLoci):\n mirror = self.copy()\n # Make new random cuts\n mirror.cutNow(cutLoci)\n # Remove the CL concerning the cleavages\n if self.keepCL == False:\n mirror.removeCL() \n return mirror.isConnected()",
"def is_self_crossing(cursor, marker, grid):\n grid_point_wires = grid.get(cursor[0], cursor[1])\n\n for wire in grid_point_wires:\n if wire['marker'] == marker:\n return True\n return False",
"def isInCluster(self):\n logger.debug(\"Checking if %s is a part of cluster\" % self)\n role = self.getClusterRole()\n return role is not None and role != \"DISABLED\"",
"def __cell_is_occupied(self, x, y) -> bool:\n return self.occupancy_map.data[self.__get_cell_index(x, y)] != 0",
"def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height",
"def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False",
"def IsGroup(self):\n return isinstance(self, TestGroup)",
"def is_perfect(self):\n if self._is_perfect is None:\n self._is_perfect = self.equals(self.derived_subgroup())\n return self._is_perfect",
"def covered_by(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoCoveredBy(self, right).to_expr()",
"def is_group(self):\n return self._is_group",
"def group_is_surrounded(group, board):\n if group_adjacents(group, board, filter_by=\"None\"):\n return False\n else:\n return True",
"def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col",
"def has_geom(self):\n return bool(self.give_geom())",
"def is_potential_group(self, player: int, row: int, col: int, row_diff: int, col_diff: int):\n opponent = 1 - player\n for _ in range(4):\n square = Square(row, col)\n if not self.is_valid(square):\n return False\n if self.state[opponent][row][col]:\n # If there is a token that belongs to the opponent in this group,\n # then this group is not a potential group that belongs to the given player.\n return False\n row, col = row + row_diff, col + col_diff\n return True",
"def is_boundary_cell(self,c):\n edges=self.cell_to_edges(c)\n return np.any( self.edge_to_cells()[edges] < 0 )",
"def is_possible_grid(self,row,col,user_value):\n start_row = row - (row % 3)\n start_col = col - (col % 3)\n for x in range(3):\n for y in range(3):\n if self.arr[x+start_row][y+start_col] == user_value:\n logging.debug(f\"is_posssible_grid(): (False) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} == {user_value}\")\n return False\n logging.debug(f\"is_posssible_grid(): (True) x: {x} y: {y} s_row: {start_row} s_col: {start_col} arr[x+start_row][y+start_col]: {self.arr[x+start_row][y+start_col]} != {user_value}\")\n return True",
"def is_cell(self, c, d):\n if (c[0] == self.nrows-1 and d == NORTH) or \\\n (c[1] == self.ncols-1 and d == EAST) or \\\n (c[0] == 0 and d == SOUTH) or \\\n (c[1] == 0 and d == WEST): return False\n else: return True",
"def check_visibility(self):\r\n\r\n for gs in self.ground_stations:\r\n if self.visible ^ (elevation_dot_product(self.r_ecef,self.ground_stations[gs][1],self.earth) > 0.0):\r\n self.visible ^= 1\r\n self.gs_id = self.ground_stations[gs][0]\r\n return True",
"def is_sum_component_fusion(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n sum_ob = GriddedPerm((1, 0), (scell, fcell))\n else:\n sum_ob = GriddedPerm((1, 0), (fcell, scell))\n return sum_ob in self._tiling.obstructions",
"def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1"
]
| [
"0.63103396",
"0.629997",
"0.60771424",
"0.6011075",
"0.58738124",
"0.58659416",
"0.5851295",
"0.5821069",
"0.57979316",
"0.57953095",
"0.5793142",
"0.57072425",
"0.5687644",
"0.56837296",
"0.5677589",
"0.5670765",
"0.56575155",
"0.56542456",
"0.56453484",
"0.56305254",
"0.56174767",
"0.5603489",
"0.5596916",
"0.5595642",
"0.55886614",
"0.55790234",
"0.55740863",
"0.55713",
"0.5559233",
"0.5553929"
]
| 0.63103807 | 0 |
Creates a 3D numpy array object with the data from . Observe that 3D numpy object is a copy of the data in the EclKW instance, i.e. modification to the numpy object will not be reflected in the ECLIPSE keyword. The methods createKW() does the inverse operation; creating an EclKW instance from a 3D numpy object. | def create3D( self , ecl_kw , default = 0):
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
array = numpy.ones( [ self.getGlobalSize() ] , dtype = ecl_kw.dtype) * default
kwa = ecl_kw.array
if len(ecl_kw) == self.size:
for i in range(kwa.size):
array[i] = kwa[i]
else:
data_index = 0
for global_index in range(self.getGlobalSize()):
if self.active( global_index = global_index ):
array[global_index] = kwa[data_index]
data_index += 1
array = array.reshape( [self.getNX() , self.getNY() , self.getNZ()] , order = 'F')
return array
else:
raise ValueError("Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d" % (ecl_kw.name , ecl_kw.size , self.nactive ,self.size)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createKW( self , array , kw_name , pack):\n if array.ndim == 3:\n dims = array.shape\n if dims[0] == self.getNX() and dims[1] == self.getNY() and dims[2] == self.getNZ():\n dtype = array.dtype\n if dtype == numpy.int32:\n type = EclTypeEnum.ECL_INT_TYPE\n elif dtype == numpy.float32:\n type = EclTypeEnum.ECL_FLOAT_TYPE\n elif dtype == numpy.float64:\n type = EclTypeEnum.ECL_DOUBLE_TYPE\n else:\n sys.exit(\"Do not know how to create ecl_kw from type:%s\" % dtype)\n \n if pack:\n size = self.getNumActive()\n else:\n size = self.getGlobalSize()\n \n if len(kw_name) > 8:\n # Silently truncate to length 8 - ECLIPSE has it's challenges.\n kw_name = kw_name[0:8] \n\n kw = EclKW( kw_name , size , type )\n active_index = 0\n global_index = 0\n for k in range( self.nz ):\n for j in range( self.ny ):\n for i in range( self.nx ):\n if pack:\n if self.active( global_index = global_index ):\n kw[active_index] = array[i,j,k]\n active_index += 1\n else:\n if dtype == numpy.int32:\n kw[global_index] = int( array[i,j,k] )\n else:\n kw[global_index] = array[i,j,k]\n \n global_index += 1\n return kw\n raise ValueError(\"Wrong size / dimension on array\")",
"def __getitem__(self, key: Tuple) -> np.array:\n # If the user has requested XYZ mode, the first thing to do is reverse\n # the array indices. Then you can continue this fn without any\n # additional changes.\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Next, we need to get the shape of the dataset. We do this currently\n # by getting the coordinate frame, which means that we need the\n # coordframe data and experiment data if we don't have it already. In\n # the future, we may also want to allow the user to specify general\n # shape information so that we can avoid calling the API.\n\n # Populate the experiment metadata if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Populate the coordinate frame metadata if not yet set:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n # Now we can begin. There is a wide variety of indexing options\n # available, including single-integer indexing, tuple-of-slices\n # indexing, tuple-of-int indexing...\n\n # First we'll address if the user presents a single integer.\n # ```\n # my_array[500]\n # ```\n # In this case, the user is asking for a single Z slice (or single X\n # slice if in XYZ order... But that's a far less common use case.)\n # We will get the full XY extents and download a single 2D array:\n if isinstance(key, int):\n # Get the full Z slice:\n xs = (0, self.shape[2])\n ys = (0, self.shape[1])\n zs = (key, key + 1)\n else:\n # We also support indexing with units. For example, you can ask for\n # ```\n # my_array[0:10, 0:10, 0:10, \"nanometers\"]\n # ```\n # which will download as many pixels as are required in order to\n # download 10nm in each dimension. We do this by storing a\n # \"normalized units\" measure which is a rescale factor for each\n # dimension (in the same order, e.g. ZYX, as the array).\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n # We will now do the following codeblock three times, for X,Y,Z:\n # First, we check to see if this index is a single integer. If so,\n # the user is requesting a 2D array with zero depth along this\n # dimension. For example, if the user asks for\n # ```\n # my_data[0:120, 0:120, 150]\n # ```\n # Then \"150\" suggests that the user just wants one single X slice.\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n # If the key is a Slice, then it has .start and .stop attrs.\n # (The user is requesting an array with more than one slice\n # in this dimension.)\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = int(start / _normalize_units[0])\n stop = int(stop / _normalize_units[0])\n\n # Cast the coords to integers (since Boss needs int coords)\n xs = (int(start), int(stop))\n\n # Do the same thing again for the next dimension: Either a single\n # integer, or a slice...\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n # Do the same thing again for the last dimension: Either a single\n # integer, or a slice...\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n # Finally, we can perform the cutout itself, using the x, y, and z\n # coordinates that we computed in the previous step.\n cutout = self.volume_provider.get_cutout(\n self._channel, self.resolution, xs, ys, zs\n )\n\n # Data are returned in ZYX order:\n if self.axis_order == AxisOrder.XYZ:\n data = np.rollaxis(np.rollaxis(cutout, 1), 2)\n elif self.axis_order == AxisOrder.ZYX:\n data = cutout\n\n # If any of the dimensions are of length 1, it's because the user\n # requested a single slice in their key; flatten the array in that\n # dimension. For example, if you request `[10, 0:10, 0:10]` then the\n # result should be 2D (no Z component).\n _shape = data.shape\n if _shape[0] == 1:\n data = data[0, :, :]\n if _shape[1] == 1:\n data = data[:, 0, :]\n if _shape[2] == 1:\n data = data[:, :, 0]\n return data",
"def CreateDataArray(name, shape, cDims, type):\n # Create a numpy array of ones to hold our data\n num_array = np.ndarray(shape, dtype=type, order=\"C\")\n\n z = np.asarray(num_array)\n if not z.flags.contiguous:\n z = np.ascontiguousarray(z)\n z.fill(0)\n\n shape = z.shape\n assert z.flags.contiguous, 'Only contiguous arrays are supported.'\n assert not np.issubdtype(z.dtype, np.complex128), \\\n \"Complex numpy arrays cannot be converted to vtk arrays.\"\\\n \"Use real() or imag() to get a component of the array before\"\\\n \" passing it to vtk.\"\n\n # Get the Pointer to the numpy array\n z_flat = np.ravel(z)\n \n #np.info(z)\n \n # Declare the number of components for the array\n if type == np.int8:\n array = simpl.Int8ArrayType(z_flat, cDims, name, False)\n elif type == np.uint8:\n array = simpl.UInt8ArrayType(z_flat, cDims, name, False)\n elif type == np.int16:\n array = simpl.Int16ArrayType(z_flat, cDims, name, False)\n elif type == np.uint16:\n array = simpl.UInt16ArrayType(z_flat, cDims, name, False)\n elif type == np.int32:\n array = simpl.Int32ArrayType(z_flat, cDims, name, False)\n elif type == np.uint32:\n array = simpl.UInt32ArrayType(z_flat, cDims, name, False)\n elif type == np.int64:\n array = simpl.Int64ArrayType(z_flat, cDims, name, False)\n elif type == np.uint64:\n array = simpl.UInt64ArrayType(z_flat, cDims, name, False)\n elif type == np.float32:\n array = simpl.FloatArrayType(z_flat, cDims, name, False)\n elif type == np.double:\n array = simpl.DoubleArrayType(z_flat, cDims, name, False) \n \n # we need to return the 'z' numpy array so it does not go out of scope.\n return (z, array)",
"def _setup_ndarrays(self) -> None:\n empty = self.ele_orig * 0\n # 2D arrays\n self.ele = np.copy(self.ele_orig) # Elevation including glaciers\n self.slp = np.copy(empty) # Slope with glacier geometry\n self.asp = np.copy(empty) # Classified aspect with glacier geometry\n self.h = np.copy(empty) # Local glacier height\n self.u = np.copy(empty) # Local glacier velocity\n self.hs = hillshade(\n self.ele_orig,\n self.PLOT_HILLSHADE_AZIMUTH,\n self.PLOT_HILLSHADE_ALTITUDE,\n ) # HS\n\n # Initialize array store\n self.store = ArrayStore()\n self.store.create(\"h\", self.MODEL_RECORD_SIZE)\n self.store.create(\"u\", self.MODEL_RECORD_SIZE)",
"def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary",
"def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterISS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterISS3ISS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIUS3ISS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkCosImageFilterID3ID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIF3ISS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def init_three_d_array(dimens, val):\n w, x, y = dimens\n return [[[val for k in range(y)] for j in range(x)] for i in range(w)]",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIUC3ISS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkVTKPolyDataReaderMD3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def CreateDataContainerArray():\n dca = simpl.DataContainerArray.New()\n return dca",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterISS3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIF3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIUC3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def __init__(self, *args):\n _itkArray2DPython.itkArray2DD_swiginit(self, _itkArray2DPython.new_itkArray2DD(*args))",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIUS3IUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkMeshSourceMD3Q.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def split_3Darray(array2d, L_window):\n N, ch = array2d.shape\n n_windows = N//L_window\n array3d = np.zeros((n_windows, L_window, ch))\n for i in range(n_windows):\n array3d[i]=array2d[i*L_window: (i+1)*L_window,:] \n \n return array3d",
"def New(*args, **kargs):\n obj = itkThresholdSegmentationLevelSetImageFilterID3ID3D.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def _create_array(self, array_name, ndim=1, dtype=None, zeros=True, derived=False, shared=None):\n\n # Does this actually correspond to a slice into a 3D array?\n NDname = self._array_name_1D_to_ND(array_name)\n if NDname:\n self._create_array(\n NDname, ndim=3, dtype=dtype, zeros=zeros, derived=derived)\n return\n\n if ndim == 1:\n dims = self._num_particles\n else:\n dims = (self._num_particles, ndim)\n\n if shared is None:\n shared = self._shared_arrays\n\n new_array = array._array_factory(dims, dtype, zeros, shared)\n new_array._sim = weakref.ref(self)\n new_array._name = array_name\n new_array.family = None\n # new_array.set_default_units(quiet=True)\n self._arrays[array_name] = new_array\n\n if derived:\n if array_name not in self._derived_array_names:\n self._derived_array_names.append(array_name)\n\n if ndim == 3:\n array_name_1D = self._array_name_ND_to_1D(array_name)\n\n for i, a in enumerate(array_name_1D):\n self._arrays[a] = new_array[:, i]\n self._arrays[a]._name = a",
"def build_dataset(self):\n self.dataset = KITTIBEVDataset(self.dataset_config, self.transform)\n return self.dataset",
"def make_dataset(\n *,\n k_size: int = 5,\n j_size: int,\n i_size: int,\n time_size: int = 4,\n grid_type: Type[ShocGridGenerator] = DiagonalShocGrid,\n corner_size: int = 0,\n) -> xarray.Dataset:\n coordinate_centre_mask = numpy.full((j_size, i_size), True)\n # Cut a chunk out of the corner where the coordinates will not be defined.\n if corner_size > 1:\n coordinate_centre_mask[-(corner_size - 1):, :+(corner_size - 1)] = False\n\n # SHOC files have a 1-cell border around the outside where the cells have\n # coordinates, but no data.\n wet_centre_mask = numpy.full((j_size, i_size), True)\n if corner_size > 0:\n wet_centre_mask[-corner_size:, :+corner_size] = False\n wet_centre_mask[-corner_size:, -corner_size:] = False\n wet_centre_mask[:+1, :] = False\n wet_centre_mask[-1:, :] = False\n wet_centre_mask[:, :+1] = False\n wet_centre_mask[:, -1:] = False\n wet_mask = c_mask_from_centres(wet_centre_mask, {\n ArakawaCGridKind.face: ('j_centre', 'i_centre'),\n ArakawaCGridKind.back: ('j_back', 'i_back'),\n ArakawaCGridKind.left: ('j_left', 'i_left'),\n ArakawaCGridKind.node: ('j_node', 'i_node'),\n })\n\n # These DataArrays are the long/lats of the grid corners. The centres are\n # derived from these by averaging the surrounding four corners.\n grid = grid_type(j=j_size, i=i_size, face_mask=coordinate_centre_mask)\n layers = ShocLayerGenerator(k=k_size)\n\n t = xarray.DataArray(\n # Note: Using pandas.date_range() directly here will lead to strange\n # behaviours, where the `record` dimension becomes a data variable with\n # a datetime64 dtype. Using a list of datetimes instead seems to avoid\n # this, resulting in record simply being a dimension.\n data=list(pandas.date_range(\"2021-11-11\", periods=time_size)),\n dims=[\"record\"],\n attrs={\n \"long_name\": \"Time\",\n \"standard_name\": \"time\",\n \"coordinate_type\": \"time\",\n },\n )\n # Note: xarray will reformat this in to 1990-01-01T00:00:00+10:00, which\n # EMS fails to parse. There is no way around this using xarray natively,\n # you have to adjust it with nctool after saving it.\n t.encoding[\"units\"] = \"days since 1990-01-01 00:00:00 +10\"\n\n botz = xarray.DataArray(\n data=numpy.random.random((j_size, i_size)) * 10 + 50,\n dims=wet_mask[\"face_mask\"].dims,\n attrs={\n \"units\": \"metre\",\n \"long_name\": \"Z coordinate at sea-bed at cell centre\",\n \"standard_name\": \"depth\",\n \"positive\": \"down\",\n \"outside\": \"9999\",\n \"missing_value\": -99.,\n }\n ).where(wet_mask.data_vars[\"face_mask\"])\n botz.values[1, 1] = -99.\n\n eta = xarray.DataArray(\n data=numpy.random.normal(0, 0.2, (time_size, j_size, i_size)),\n dims=[\"record\", *wet_mask[\"face_mask\"].dims],\n attrs={\n \"units\": \"metre\",\n \"long_name\": \"Surface elevation\",\n \"standard_name\": \"sea_surface_height_above_geoid\",\n }\n ).where(wet_mask.data_vars[\"face_mask\"])\n temp = xarray.DataArray(\n data=numpy.random.normal(12, 0.5, (time_size, k_size, j_size, i_size)),\n dims=[\"record\", \"k_centre\", *wet_mask[\"face_mask\"].dims],\n attrs={\n \"units\": \"degrees C\",\n \"long_name\": \"Temperature\",\n },\n ).where(wet_mask.data_vars[\"face_mask\"])\n\n u1 = xarray.DataArray(\n data=numpy.random.normal(0, 2, (time_size, k_size, j_size, i_size + 1)),\n dims=[\"record\", \"k_centre\", *wet_mask.data_vars[\"left_mask\"].dims],\n attrs={\n \"units\": \"metre second-1\",\n \"long_name\": \"I component of current at left face\",\n }\n )\n u2 = xarray.DataArray(\n data=numpy.random.normal(0, 2, (time_size, k_size, j_size + 1, i_size)),\n dims=[\"record\", \"k_centre\", *wet_mask.data_vars[\"back_mask\"].dims],\n attrs={\n \"units\": \"metre per second\",\n \"long_name\": \"I component of current at back face\",\n }\n )\n flag = xarray.DataArray(\n data=numpy.random.randint(0, 256, (time_size, k_size, j_size + 1, i_size + 1)),\n dims=[\"record\", \"k_centre\", *wet_mask.data_vars[\"node_mask\"].dims],\n attrs={\"long_name\": \"SHOC masking flags\"},\n )\n\n dataset = xarray.Dataset(\n data_vars={\n **layers.standard_vars,\n **grid.standard_vars,\n \"botz\": botz,\n \"t\": t,\n \"eta\": eta,\n \"temp\": temp,\n \"u1\": u1,\n \"u2\": u2,\n \"flag\": flag,\n },\n attrs={\n \"title\": \"Example SHOC dataset\",\n \"ems_version\": \"v1.2.3 fake\",\n \"Conventions\": \"CMR/Timeseries/SHOC\",\n \"nce1\": j_size,\n \"nce2\": i_size,\n \"nfe1\": j_size + 1,\n \"nfe2\": i_size + 1,\n \"gridtype\": \"NUMERICAL\",\n },\n )\n dataset.encoding[\"unlimited_dims\"] = {\"record\"}\n return dataset",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIUC3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkCosImageFilterIF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIF3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkScalarImageKmeansImageFilterIUS3IUC3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj"
]
| [
"0.72853476",
"0.5536447",
"0.548136",
"0.5479699",
"0.54082584",
"0.53730893",
"0.5352577",
"0.5350692",
"0.53032625",
"0.5276474",
"0.52748704",
"0.5271365",
"0.52682966",
"0.5257674",
"0.5239569",
"0.520957",
"0.52051854",
"0.5195257",
"0.51888484",
"0.5175834",
"0.5165416",
"0.5144176",
"0.5137353",
"0.5122534",
"0.5122317",
"0.51112884",
"0.5109417",
"0.50666434",
"0.50534993",
"0.50479305"
]
| 0.7509103 | 0 |
Will save the current grid as a EGRID file. | def save_EGRID( self , filename , output_unit = EclUnitTypeEnum.ERT_ECL_METRIC_UNITS):
self._fwrite_EGRID2( filename, output_unit ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_GRID( self , filename ):\n self._fwrite_GRID( filename )",
"def save_grid(fname, grid):\n\twith open((\"%sGridFix\" % fname), 'w') as file_handler:\n\t for item in grid:\n\t file_handler.write(\"{}\\n\".format(item))\n\t pass\n # Return the name of the file\n\treturn (\"%sGridFix\" % fname)",
"def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()",
"def exportECL(self, fname):\r\n\r\n # TODO add consistency of dimensions across the inputs\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) - 1 # ECLIPSE\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid geometry and properties) (ASCII)\\n')\r\n # f.write('-- Exported by : Petrel 2013.7 (64-bit) Schlumberger\\n'\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write('SPECGRID -- Generated : ReGrid\\n')\r\n f.write(' %i %i %i 1 F /\\n\\n' % (self.ne, self.nn, self.nz))\r\n f.write('COORDSYS -- Generated : ReGrid\\n')\r\n f.write(' 1 4 /\\n\\n') # what is this line?\r\n\r\n f.write('COORD -- Generated : ReGrid\\n')\r\n nz = self.nz\r\n fstr = str(\" \")\r\n\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(0)\r\n fstr = self.printCOORDS(f, p0, fstr)\r\n p1 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(4)\r\n fstr = self.printCOORDS(f, p1, fstr)\r\n # outside edge on far x\r\n p2 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, p2, fstr)\r\n p3 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, p3, fstr)\r\n # outside edge on far y\r\n for ix in range(self.ne):\r\n p8 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(3)\r\n fstr = self.printCOORDS(f, p8, fstr)\r\n p9 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(7)\r\n fstr = self.printCOORDS(f, p9, fstr)\r\n # outside edge on far northeast\r\n p14 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, p14, fstr)\r\n p15 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, p15, fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n\r\n f.write('ZCORN -- Generated : ReGrid\\n')\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(0)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(3)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # bottom layer\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(4)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(7)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write('ACTNUM -- Generated : ReGrid\\n')\r\n\r\n c = -999\r\n N = 0\r\n for iac in self.ActiveCells.flatten(order='F'):\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999:\r\n fstr = self.printAC(f, c, N, fstr)\r\n c = iac\r\n N = 1\r\n fstr = self.printAC(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n else:\r\n print(\"Only structured grids can be converted to ECLIPSE files\")",
"def save_to_file(grid, filepath):\n outfile = codecs.open(filepath, mode='w+', encoding='utf-8')\n outfile.writelines([((''.join(row)) + u'\\n') for row in grid])",
"def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})",
"def save(self,outPath=None):\n if (not self.canSave or self.skipObjRecords): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.changed = 1\n self.tes3.hedr.changed = 1\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Size Cell Records\n cntRecords = 0\n progress = self.progress\n progress.setMax(len(self.cells))\n progress(0.0,'Saving '+self.fileInfo.name)\n for record in self.cells:\n record.getSize()\n #--Progress\n cntRecords += 1\n progress(cntRecords)\n #--Other Records\n for record in self.records:\n record.getSize() #--Should already be done, but just in case.\n record.dump(out)\n out.close()",
"def dump(self, path, mode='standalone'):\n if mode == 'standalone':\n with open(path+\"/export_grid_standalone\"+str(self._id)+\".html\", 'w+') as f:\n f.write(self.export_html(build=True))\n elif mode == 'all':\n widget_export = self.export_html(build=False)\n with open(path+\"/export_scripts.html\", \"w+\") as f:\n f.write(widget_export['script_tags'])\n with open(path+\"/export_html_state.html\", \"w+\") as f:\n f.write(widget_export['html_state'])\n with open(path+\"/export_state_\"+str(self._id)+\".json\", \"w+\") as f:\n f.write(json.dumps(widget_export['manager_state']))\n with open(path+\"/export_grid_\"+str(self._id)+\".html\", \"w+\") as f:\n f.write(widget_export['grid_div'])",
"def write_grid(self, file_path, fmt='%0.16g'):\n with open(file_path, 'w') as outfile:\n if self.grid.size == 3:\n outfile.write('{}\\t{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))\n else:\n outfile.write('{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n with open(file_path, 'ab') as outfile:\n numpy.savetxt(outfile, numpy.c_[self.grid[0]], fmt=fmt)\n numpy.savetxt(outfile, numpy.c_[self.grid[1]], fmt=fmt)\n if self.grid.size == 3:\n numpy.savetxt(outfile, numpy.c_[self.grid[2]], fmt=fmt)",
"def exportTOUGH2(self, fname):\r\n STR = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) # - 1 #\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename, 'w', newline='\\r\\n') as f:\r\n f.write(\"ELEME\")\r\n # debug\r\n f.write(\r\n \"\"\"\r\n 1 10 20 30 40 50 60 70 80\r\n |--------|---------|---------|---------|---------|---------|---------|---------|\r\n 12345678901234567890123456789012345678901234567890123456789012345678901234567890\r\n \"\"\")\r\n\r\n ii = 0\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # f.write(str(iy)+str(ix)+\"\\n\")\r\n # first base\r\n b2 = ii // (len(STR) * len(STR))\r\n b1 = (ii - len(STR) * b2) // len(STR)\r\n b0 = ii % len(STR)\r\n\r\n f.write(STR[b2] + STR[b1] + STR[b0] + \"\\t\" + str(ii) + \"\\n\")\r\n ii += 1",
"def saveGridValues( self ):\n assert(self.hasSaveMemory)\n assert(self.notSaved)\n\n self._my_data[self._saveIdx][:self._layout.size] = self._f[:].flatten()\n self._savedLayout = self._current_layout_name\n\n self.notSaved = False",
"def save_enu(self, filename):\n x, y, z = self.get_coords_enu()\n coords = np.vstack([x, y, z]).T\n np.savetxt(filename, coords, fmt=b'%.12e')",
"def save(self, grid, output_dir, output_name):\n save_grayscale(grid, output_dir, output_name)",
"def save_mrc(grid_object, path):\n\n return MRC_Grid_Write(grid_object, path)",
"def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)",
"def save(self):\n # TODO: save the file",
"def save(self):\n filename = os.path.expanduser(\"~/\" + self.name)\n print(filename)\n np.savetxt(filename + \"_left.txt\", self.central)\n np.savetxt(filename + \"_right.txt\", self.boundaries)",
"def save(self, export_path: str):",
"def saveAs(self):\n self.saveFile()",
"def save(self, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n params = {\n 'model': self.__class__.__name__,\n 'elements': self.elements,\n 'r_cut': self.r_cut,\n 'fitted': self.gp.fitted,\n 'gp': {\n 'kernel': self.gp.kernel.kernel_name,\n 'n_train': self.gp.n_train,\n 'sigma': self.gp.kernel.theta[0],\n 'noise': self.gp.noise,\n 'r0': self.gp.kernel.theta[2]\n },\n 'grid': {\n 'r_min': self.grid_start,\n 'r_max': self.grid_end,\n 'r_num': self.grid_num,\n 'filename': {}\n } if self.grid else {}\n }\n\n gp_filename = \"GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy\".format(\n p=params)\n\n params['gp']['filename'] = gp_filename\n self.gp.save(path / gp_filename)\n\n for k, grid in self.grid.items():\n key = str(k)\n grid_filename = \"GRID_{}_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz\".format(\n key, p=params)\n params['grid']['filename'][key] = grid_filename\n grid.save(path / grid_filename)\n\n with open(path / \"MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params), 'w') as fp:\n json.dump(params, fp, indent=4, cls=NpEncoder)\n\n print(\"Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params))",
"def save(self, ofilename, oname, noisy_only = True):\n ofile = ROOT.TFile(ofilename, 'recreate')\n\n outhists = [h.Clone(oname % (i + 1)) for i, h in enumerate(self.modules)]\n for h, cells in zip(outhists, self.cells):\n if noisy_only: h.Reset()\n for cell in cells: h.SetBinContent(cell[0], cell[1], noisy_only * 1.)\n # h.Write()\n\n ofile.Write()\n ofile.Close()",
"def intf_MMSAVE(E):\n global SAVEFILE\n with open(SAVEFILE,'w') as f:\n f.write( MMEL.simplistic_mm_save_format() )\n print(\"Model script written to: %s\\n\" % SAVEFILE)",
"def pickle_MegaGrid(self):\n pickle.dump(self, open(join(self.out_dir, '{}_{}_MegaGrid.p'.format(self.prot_name, self.probe)), 'wb'))",
"def saveToFile():\n save_interface = Tk()\n save_interface.filename = filedialog.asksaveasfilename(initialdir = os.getcwd(), defaultextension=\".bti\", title = \"Save as\",filetypes = ((\"Bit Tune Image File\",\"*.bti\"),(\"All Files\",\"*.*\")))\n save_interface.destroy()\t\n\n with open (save_interface.filename,'w') as f:\n f.write(str(coordinates))",
"def export(**kwargs):\n\n import os\n\n interface = None # Holds the actual FileInterface for the specified data format\n vertex_index_to_file_key_map = None\n element_index_to_file_key_map = None\n\n if 'file_name' in kwargs:\n fname = kwargs['file_name']\n else:\n raise ValueError(\"file_name must be specified.\")\n \n extension = os.path.splitext(fname)[1].lower()\n\n if extension=='.msh':\n from bempp.api.file_interfaces import gmsh\n interface = gmsh.GmshInterface()\n \n if int('grid' in kwargs) + int('grid_function' in kwargs) != 1:\n raise ValueError(\"Exactly one of 'grid' or 'grid_function' must be specified\")\n\n if 'grid' in kwargs:\n grid = kwargs['grid']\n elif 'grid_function' in kwargs:\n grid = kwargs['grid_function'].grid\n\n number_of_vertices = grid.leaf_view.entity_count(2)\n number_of_elements = grid.leaf_view.entity_count(0)\n\n offset = interface.index_offset\n\n if 'vertex_index_to_file_key_map' in kwargs:\n vertex_index_to_file_key_map = kwargs['vertex_index_to_file_key_map']\n else:\n vertex_index_to_file_key_map = range(offset,number_of_vertices+offset)\n if 'element_index_to_file_key_map' in kwargs:\n element_index_to_file_key_map = kwargs['element_index_to_file_key_map']\n else:\n element_index_to_file_key_map = range(offset,number_of_elements+offset)\n\n # Create the vertex and element structure\n\n from collections import OrderedDict\n\n vertex_iterator = grid.leaf_view.entity_iterator(2)\n element_iterator = grid.leaf_view.entity_iterator(0)\n index_set = grid.leaf_view.index_set()\n\n vertices = OrderedDict([(vertex_index_to_file_key_map[index_set.entity_index(vertex)],vertex.geometry.corners[:,0])\n for vertex in vertex_iterator])\n elements = OrderedDict([(element_index_to_file_key_map[index_set.entity_index(element)],\n {'data':[vertex_index_to_file_key_map[index_set.sub_entity_index(element,n,2)] for n in range(3)],\n 'domain_index':element.domain}) for element in element_iterator])\n\n interface.add_grid_data(vertices,elements)\n\n # Evaluate data\n\n if 'grid_function' in kwargs:\n fun = kwargs['grid_function']\n data_type = kwargs.get('data_type',interface.default_data_type)\n\n if 'transformation' in kwargs:\n transformation = kwargs['transformation']\n else:\n transformation = lambda x: x\n\n index_set = grid.leaf_view.index_set()\n\n if data_type == 'element_node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates))\n interface.add_element_node_data(data,kwargs.get('label','element_node_data'))\n elif data_type == 'node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(vertex_index_to_file_key_map)\n for element in grid.leaf_view.entity_iterator(0):\n local_data = transformation(fun.evaluate(element,local_coordinates))\n for i in range(3):\n data[vertex_index_to_file_key_map[index_set.sub_entity_index(element,i,2)]] = local_data[:,i]\n interface.add_node_data(data,kwargs.get('label','node_data'))\n elif data_type == 'element':\n local_coordinates = _np.array([[1./3],[1./3]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates).ravel())\n interface.add_element_data(data,kwargs.get('label','element_data'))\n else:\n raise ValueError(\"data_type must be one of 'node', 'element', or 'element_node'\")\n\n interface.write(kwargs['file_name'])",
"def save(self):\n data = \"\"\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n data += self.blocks[y][x]\n data += '\\n'\n print data\n options = {'defaultextension': '.lvl',\n 'filetypes': [('Levels', '.lvl'), ('All files', '*')],\n 'initialdir': 'levels',\n 'initialfile': '',\n 'title': 'Save level'}\n # filename = tkFileDialog.asksaveasfile(**options)\n filename = asksaveasfilename(**options)\n if filename:\n with open(filename, \"w\") as level:\n level.write(data)",
"def save_grid(\n rho, psi, resol,\n save_options,\n npy, npz, hdf5,\n loc, ix, its_per_save,\n ):\n\n save_num = int((ix + 1) / its_per_save)\n\n if (save_options[0]):\n if (npy):\n file_name = \"rho_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n rho\n )\n if (npz):\n file_name = \"rho_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n rho\n )\n if (hdf5):\n file_name = \"rho_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=rho)\n f.close()\n if (save_options[2]):\n plane = rho[:, :, int(resol / 2)]\n if (npy):\n file_name = \"plane_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n plane\n )\n if (npz):\n file_name = \"plane_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n plane\n )\n if (hdf5):\n file_name = \"plane_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=plane)\n f.close()\n if (save_options[1]):\n if (npy):\n file_name = \"psi_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n psi\n )\n if (npz):\n file_name = \"psi_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n psi\n )\n if (hdf5):\n file_name = \"psi_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=psi)\n f.close()\n if (save_options[4]):\n line = rho[:, int(resol / 2), int(resol / 2)]\n file_name2 = \"line_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name2),\n line\n )",
"def save(self):\n return self.save_as(self.filename)",
"def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")",
"def exportVTK(self, fname):\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n sWrite = vtk.vtkXMLStructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vts\")\r\n sWrite.Write()\r\n elif self.GridType == \"vtkUnstructuredGrid\":\r\n sWrite = vtk.vtkXMLUnstructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vtu\")\r\n sWrite.Write()\r\n else:\r\n print(\"Grid type is not recognized\")"
]
| [
"0.8100519",
"0.7128202",
"0.6636067",
"0.65955096",
"0.6486249",
"0.6424892",
"0.6414643",
"0.635306",
"0.6349147",
"0.6248159",
"0.6176786",
"0.6169242",
"0.61596185",
"0.6142453",
"0.6125207",
"0.6119916",
"0.6115344",
"0.611224",
"0.6074905",
"0.6066688",
"0.60082483",
"0.5975167",
"0.59736925",
"0.59488094",
"0.5946152",
"0.59063935",
"0.59023464",
"0.58985007",
"0.5876776",
"0.58715314"
]
| 0.7662099 | 1 |
Will save the current grid as a EGRID file. | def save_GRID( self , filename ):
self._fwrite_GRID( filename ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_EGRID( self , filename , output_unit = EclUnitTypeEnum.ERT_ECL_METRIC_UNITS):\n self._fwrite_EGRID2( filename, output_unit )",
"def save_grid(fname, grid):\n\twith open((\"%sGridFix\" % fname), 'w') as file_handler:\n\t for item in grid:\n\t file_handler.write(\"{}\\n\".format(item))\n\t pass\n # Return the name of the file\n\treturn (\"%sGridFix\" % fname)",
"def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()",
"def exportECL(self, fname):\r\n\r\n # TODO add consistency of dimensions across the inputs\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) - 1 # ECLIPSE\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid geometry and properties) (ASCII)\\n')\r\n # f.write('-- Exported by : Petrel 2013.7 (64-bit) Schlumberger\\n'\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write('SPECGRID -- Generated : ReGrid\\n')\r\n f.write(' %i %i %i 1 F /\\n\\n' % (self.ne, self.nn, self.nz))\r\n f.write('COORDSYS -- Generated : ReGrid\\n')\r\n f.write(' 1 4 /\\n\\n') # what is this line?\r\n\r\n f.write('COORD -- Generated : ReGrid\\n')\r\n nz = self.nz\r\n fstr = str(\" \")\r\n\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(0)\r\n fstr = self.printCOORDS(f, p0, fstr)\r\n p1 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(4)\r\n fstr = self.printCOORDS(f, p1, fstr)\r\n # outside edge on far x\r\n p2 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, p2, fstr)\r\n p3 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, p3, fstr)\r\n # outside edge on far y\r\n for ix in range(self.ne):\r\n p8 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(3)\r\n fstr = self.printCOORDS(f, p8, fstr)\r\n p9 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(7)\r\n fstr = self.printCOORDS(f, p9, fstr)\r\n # outside edge on far northeast\r\n p14 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, p14, fstr)\r\n p15 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, p15, fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n\r\n f.write('ZCORN -- Generated : ReGrid\\n')\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(0)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(3)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # bottom layer\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(4)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(7)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write('ACTNUM -- Generated : ReGrid\\n')\r\n\r\n c = -999\r\n N = 0\r\n for iac in self.ActiveCells.flatten(order='F'):\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999:\r\n fstr = self.printAC(f, c, N, fstr)\r\n c = iac\r\n N = 1\r\n fstr = self.printAC(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n else:\r\n print(\"Only structured grids can be converted to ECLIPSE files\")",
"def save_to_file(grid, filepath):\n outfile = codecs.open(filepath, mode='w+', encoding='utf-8')\n outfile.writelines([((''.join(row)) + u'\\n') for row in grid])",
"def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})",
"def save(self,outPath=None):\n if (not self.canSave or self.skipObjRecords): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.changed = 1\n self.tes3.hedr.changed = 1\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Size Cell Records\n cntRecords = 0\n progress = self.progress\n progress.setMax(len(self.cells))\n progress(0.0,'Saving '+self.fileInfo.name)\n for record in self.cells:\n record.getSize()\n #--Progress\n cntRecords += 1\n progress(cntRecords)\n #--Other Records\n for record in self.records:\n record.getSize() #--Should already be done, but just in case.\n record.dump(out)\n out.close()",
"def dump(self, path, mode='standalone'):\n if mode == 'standalone':\n with open(path+\"/export_grid_standalone\"+str(self._id)+\".html\", 'w+') as f:\n f.write(self.export_html(build=True))\n elif mode == 'all':\n widget_export = self.export_html(build=False)\n with open(path+\"/export_scripts.html\", \"w+\") as f:\n f.write(widget_export['script_tags'])\n with open(path+\"/export_html_state.html\", \"w+\") as f:\n f.write(widget_export['html_state'])\n with open(path+\"/export_state_\"+str(self._id)+\".json\", \"w+\") as f:\n f.write(json.dumps(widget_export['manager_state']))\n with open(path+\"/export_grid_\"+str(self._id)+\".html\", \"w+\") as f:\n f.write(widget_export['grid_div'])",
"def write_grid(self, file_path, fmt='%0.16g'):\n with open(file_path, 'w') as outfile:\n if self.grid.size == 3:\n outfile.write('{}\\t{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))\n else:\n outfile.write('{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n with open(file_path, 'ab') as outfile:\n numpy.savetxt(outfile, numpy.c_[self.grid[0]], fmt=fmt)\n numpy.savetxt(outfile, numpy.c_[self.grid[1]], fmt=fmt)\n if self.grid.size == 3:\n numpy.savetxt(outfile, numpy.c_[self.grid[2]], fmt=fmt)",
"def exportTOUGH2(self, fname):\r\n STR = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) # - 1 #\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename, 'w', newline='\\r\\n') as f:\r\n f.write(\"ELEME\")\r\n # debug\r\n f.write(\r\n \"\"\"\r\n 1 10 20 30 40 50 60 70 80\r\n |--------|---------|---------|---------|---------|---------|---------|---------|\r\n 12345678901234567890123456789012345678901234567890123456789012345678901234567890\r\n \"\"\")\r\n\r\n ii = 0\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # f.write(str(iy)+str(ix)+\"\\n\")\r\n # first base\r\n b2 = ii // (len(STR) * len(STR))\r\n b1 = (ii - len(STR) * b2) // len(STR)\r\n b0 = ii % len(STR)\r\n\r\n f.write(STR[b2] + STR[b1] + STR[b0] + \"\\t\" + str(ii) + \"\\n\")\r\n ii += 1",
"def saveGridValues( self ):\n assert(self.hasSaveMemory)\n assert(self.notSaved)\n\n self._my_data[self._saveIdx][:self._layout.size] = self._f[:].flatten()\n self._savedLayout = self._current_layout_name\n\n self.notSaved = False",
"def save_enu(self, filename):\n x, y, z = self.get_coords_enu()\n coords = np.vstack([x, y, z]).T\n np.savetxt(filename, coords, fmt=b'%.12e')",
"def save(self, grid, output_dir, output_name):\n save_grayscale(grid, output_dir, output_name)",
"def save_mrc(grid_object, path):\n\n return MRC_Grid_Write(grid_object, path)",
"def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)",
"def save(self):\n # TODO: save the file",
"def save(self):\n filename = os.path.expanduser(\"~/\" + self.name)\n print(filename)\n np.savetxt(filename + \"_left.txt\", self.central)\n np.savetxt(filename + \"_right.txt\", self.boundaries)",
"def save(self, export_path: str):",
"def saveAs(self):\n self.saveFile()",
"def save(self, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n params = {\n 'model': self.__class__.__name__,\n 'elements': self.elements,\n 'r_cut': self.r_cut,\n 'fitted': self.gp.fitted,\n 'gp': {\n 'kernel': self.gp.kernel.kernel_name,\n 'n_train': self.gp.n_train,\n 'sigma': self.gp.kernel.theta[0],\n 'noise': self.gp.noise,\n 'r0': self.gp.kernel.theta[2]\n },\n 'grid': {\n 'r_min': self.grid_start,\n 'r_max': self.grid_end,\n 'r_num': self.grid_num,\n 'filename': {}\n } if self.grid else {}\n }\n\n gp_filename = \"GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy\".format(\n p=params)\n\n params['gp']['filename'] = gp_filename\n self.gp.save(path / gp_filename)\n\n for k, grid in self.grid.items():\n key = str(k)\n grid_filename = \"GRID_{}_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz\".format(\n key, p=params)\n params['grid']['filename'][key] = grid_filename\n grid.save(path / grid_filename)\n\n with open(path / \"MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params), 'w') as fp:\n json.dump(params, fp, indent=4, cls=NpEncoder)\n\n print(\"Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params))",
"def save(self, ofilename, oname, noisy_only = True):\n ofile = ROOT.TFile(ofilename, 'recreate')\n\n outhists = [h.Clone(oname % (i + 1)) for i, h in enumerate(self.modules)]\n for h, cells in zip(outhists, self.cells):\n if noisy_only: h.Reset()\n for cell in cells: h.SetBinContent(cell[0], cell[1], noisy_only * 1.)\n # h.Write()\n\n ofile.Write()\n ofile.Close()",
"def intf_MMSAVE(E):\n global SAVEFILE\n with open(SAVEFILE,'w') as f:\n f.write( MMEL.simplistic_mm_save_format() )\n print(\"Model script written to: %s\\n\" % SAVEFILE)",
"def pickle_MegaGrid(self):\n pickle.dump(self, open(join(self.out_dir, '{}_{}_MegaGrid.p'.format(self.prot_name, self.probe)), 'wb'))",
"def saveToFile():\n save_interface = Tk()\n save_interface.filename = filedialog.asksaveasfilename(initialdir = os.getcwd(), defaultextension=\".bti\", title = \"Save as\",filetypes = ((\"Bit Tune Image File\",\"*.bti\"),(\"All Files\",\"*.*\")))\n save_interface.destroy()\t\n\n with open (save_interface.filename,'w') as f:\n f.write(str(coordinates))",
"def export(**kwargs):\n\n import os\n\n interface = None # Holds the actual FileInterface for the specified data format\n vertex_index_to_file_key_map = None\n element_index_to_file_key_map = None\n\n if 'file_name' in kwargs:\n fname = kwargs['file_name']\n else:\n raise ValueError(\"file_name must be specified.\")\n \n extension = os.path.splitext(fname)[1].lower()\n\n if extension=='.msh':\n from bempp.api.file_interfaces import gmsh\n interface = gmsh.GmshInterface()\n \n if int('grid' in kwargs) + int('grid_function' in kwargs) != 1:\n raise ValueError(\"Exactly one of 'grid' or 'grid_function' must be specified\")\n\n if 'grid' in kwargs:\n grid = kwargs['grid']\n elif 'grid_function' in kwargs:\n grid = kwargs['grid_function'].grid\n\n number_of_vertices = grid.leaf_view.entity_count(2)\n number_of_elements = grid.leaf_view.entity_count(0)\n\n offset = interface.index_offset\n\n if 'vertex_index_to_file_key_map' in kwargs:\n vertex_index_to_file_key_map = kwargs['vertex_index_to_file_key_map']\n else:\n vertex_index_to_file_key_map = range(offset,number_of_vertices+offset)\n if 'element_index_to_file_key_map' in kwargs:\n element_index_to_file_key_map = kwargs['element_index_to_file_key_map']\n else:\n element_index_to_file_key_map = range(offset,number_of_elements+offset)\n\n # Create the vertex and element structure\n\n from collections import OrderedDict\n\n vertex_iterator = grid.leaf_view.entity_iterator(2)\n element_iterator = grid.leaf_view.entity_iterator(0)\n index_set = grid.leaf_view.index_set()\n\n vertices = OrderedDict([(vertex_index_to_file_key_map[index_set.entity_index(vertex)],vertex.geometry.corners[:,0])\n for vertex in vertex_iterator])\n elements = OrderedDict([(element_index_to_file_key_map[index_set.entity_index(element)],\n {'data':[vertex_index_to_file_key_map[index_set.sub_entity_index(element,n,2)] for n in range(3)],\n 'domain_index':element.domain}) for element in element_iterator])\n\n interface.add_grid_data(vertices,elements)\n\n # Evaluate data\n\n if 'grid_function' in kwargs:\n fun = kwargs['grid_function']\n data_type = kwargs.get('data_type',interface.default_data_type)\n\n if 'transformation' in kwargs:\n transformation = kwargs['transformation']\n else:\n transformation = lambda x: x\n\n index_set = grid.leaf_view.index_set()\n\n if data_type == 'element_node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates))\n interface.add_element_node_data(data,kwargs.get('label','element_node_data'))\n elif data_type == 'node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(vertex_index_to_file_key_map)\n for element in grid.leaf_view.entity_iterator(0):\n local_data = transformation(fun.evaluate(element,local_coordinates))\n for i in range(3):\n data[vertex_index_to_file_key_map[index_set.sub_entity_index(element,i,2)]] = local_data[:,i]\n interface.add_node_data(data,kwargs.get('label','node_data'))\n elif data_type == 'element':\n local_coordinates = _np.array([[1./3],[1./3]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates).ravel())\n interface.add_element_data(data,kwargs.get('label','element_data'))\n else:\n raise ValueError(\"data_type must be one of 'node', 'element', or 'element_node'\")\n\n interface.write(kwargs['file_name'])",
"def save(self):\n data = \"\"\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n data += self.blocks[y][x]\n data += '\\n'\n print data\n options = {'defaultextension': '.lvl',\n 'filetypes': [('Levels', '.lvl'), ('All files', '*')],\n 'initialdir': 'levels',\n 'initialfile': '',\n 'title': 'Save level'}\n # filename = tkFileDialog.asksaveasfile(**options)\n filename = asksaveasfilename(**options)\n if filename:\n with open(filename, \"w\") as level:\n level.write(data)",
"def save_grid(\n rho, psi, resol,\n save_options,\n npy, npz, hdf5,\n loc, ix, its_per_save,\n ):\n\n save_num = int((ix + 1) / its_per_save)\n\n if (save_options[0]):\n if (npy):\n file_name = \"rho_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n rho\n )\n if (npz):\n file_name = \"rho_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n rho\n )\n if (hdf5):\n file_name = \"rho_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=rho)\n f.close()\n if (save_options[2]):\n plane = rho[:, :, int(resol / 2)]\n if (npy):\n file_name = \"plane_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n plane\n )\n if (npz):\n file_name = \"plane_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n plane\n )\n if (hdf5):\n file_name = \"plane_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=plane)\n f.close()\n if (save_options[1]):\n if (npy):\n file_name = \"psi_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name),\n psi\n )\n if (npz):\n file_name = \"psi_#{0}.npz\".format(save_num)\n np.savez(\n os.path.join(os.path.expanduser(loc), file_name),\n psi\n )\n if (hdf5):\n file_name = \"psi_#{0}.hdf5\".format(save_num)\n file_name = os.path.join(os.path.expanduser(loc), file_name)\n f = h5py.File(file_name, 'w')\n dset = f.create_dataset(\"init\", data=psi)\n f.close()\n if (save_options[4]):\n line = rho[:, int(resol / 2), int(resol / 2)]\n file_name2 = \"line_#{0}.npy\".format(save_num)\n np.save(\n os.path.join(os.path.expanduser(loc), file_name2),\n line\n )",
"def save(self):\n return self.save_as(self.filename)",
"def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")",
"def exportVTK(self, fname):\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n sWrite = vtk.vtkXMLStructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vts\")\r\n sWrite.Write()\r\n elif self.GridType == \"vtkUnstructuredGrid\":\r\n sWrite = vtk.vtkXMLUnstructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vtu\")\r\n sWrite.Write()\r\n else:\r\n print(\"Grid type is not recognized\")"
]
| [
"0.7662099",
"0.7128202",
"0.6636067",
"0.65955096",
"0.6486249",
"0.6424892",
"0.6414643",
"0.635306",
"0.6349147",
"0.6248159",
"0.6176786",
"0.6169242",
"0.61596185",
"0.6142453",
"0.6125207",
"0.6119916",
"0.6115344",
"0.611224",
"0.6074905",
"0.6066688",
"0.60082483",
"0.5975167",
"0.59736925",
"0.59488094",
"0.5946152",
"0.59063935",
"0.59023464",
"0.58985007",
"0.5876776",
"0.58715314"
]
| 0.8100519 | 0 |
Writes an EclKW instance as an ECLIPSE grdecl formatted file. The input argument must be an EclKW instance of size nactive or nxnynz. If the size is nactive the inactive cells will be filled with ; hence the function will always write nxnynz elements. The data in the argument can be of type integer, float, double or bool. In the case of bool the default value must be specified as 1 (True) or 0 (False). The input argument should be a valid python filehandle opened for writing; i.e. pyfile = open("PORO.GRDECL" , "w") grid.write_grdecl( poro_kw , pyfile , default_value = 0.0) grid.write_grdecl( permx_kw , pyfile , default_value = 0.0) pyfile.close() | def write_grdecl( self , ecl_kw , pyfile , special_header = None , default_value = 0):
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
cfile = CFILE( pyfile )
self._fwrite_grdecl( ecl_kw , special_header , cfile , default_value )
else:
raise ValueError("Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d" % (ecl_kw.getName() , len(ecl_kw) , self.getNumActive() , self.getGlobalSize())) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_grdecl(self , pyfile):\n cfile = CFILE( pyfile )\n self._fprintf_grdecl( cfile )",
"def exportECL(self, fname):\r\n\r\n # TODO add consistency of dimensions across the inputs\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) - 1 # ECLIPSE\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid geometry and properties) (ASCII)\\n')\r\n # f.write('-- Exported by : Petrel 2013.7 (64-bit) Schlumberger\\n'\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write('SPECGRID -- Generated : ReGrid\\n')\r\n f.write(' %i %i %i 1 F /\\n\\n' % (self.ne, self.nn, self.nz))\r\n f.write('COORDSYS -- Generated : ReGrid\\n')\r\n f.write(' 1 4 /\\n\\n') # what is this line?\r\n\r\n f.write('COORD -- Generated : ReGrid\\n')\r\n nz = self.nz\r\n fstr = str(\" \")\r\n\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(0)\r\n fstr = self.printCOORDS(f, p0, fstr)\r\n p1 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(4)\r\n fstr = self.printCOORDS(f, p1, fstr)\r\n # outside edge on far x\r\n p2 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, p2, fstr)\r\n p3 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, p3, fstr)\r\n # outside edge on far y\r\n for ix in range(self.ne):\r\n p8 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(3)\r\n fstr = self.printCOORDS(f, p8, fstr)\r\n p9 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(7)\r\n fstr = self.printCOORDS(f, p9, fstr)\r\n # outside edge on far northeast\r\n p14 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, p14, fstr)\r\n p15 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, p15, fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n\r\n f.write('ZCORN -- Generated : ReGrid\\n')\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(0)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(3)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # bottom layer\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(4)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(7)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write('ACTNUM -- Generated : ReGrid\\n')\r\n\r\n c = -999\r\n N = 0\r\n for iac in self.ActiveCells.flatten(order='F'):\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999:\r\n fstr = self.printAC(f, c, N, fstr)\r\n c = iac\r\n N = 1\r\n fstr = self.printAC(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n else:\r\n print(\"Only structured grids can be converted to ECLIPSE files\")",
"def save_grdfile(grddata,depdata,outname,is31=True):\n \n if outname==None:\n print('save_grdfile requires a filename to save.')\n return\n try:\n fp=open(outname,'w')\n except IOError:\n print('save_grdfile: invalid filename.')\n return data\n if is31:\n fp.write('Node Number = %d\\n' % len(depdata['node_num']) )\n fp.write('Cell Number = %d\\n' % len(grddata['nv']) )\n for i in range(0,len(grddata['nv'])):\n fp.write('%d %d %d %d %d\\n'% (grddata['ele_num'][i],grddata['nv'][i,0],grddata['nv'][i,1],grddata['nv'][i,2],0))\n\n for i in range(0,len(depdata['node_num'])):\n fp.write('%d %f %f %f\\n'% (depdata['node_num'][i],depdata['x'][i],depdata['y'][i],depdata['h'][i]))\n fp.close()\n \n return",
"def write_gds(outfile,\n cells=None,\n name='library',\n unit=1.0e-6,\n precision=1.0e-9):\n current_library.name = name\n current_library.unit = unit\n current_library.precision = precision\n current_library.write_gds(outfile, cells)",
"def write_field(self, filename, field,griddescfile=None,fieldname=None):\n\n with scipyio.FortranFile(filename,mode='w') as f: #@UndefinedVariable\n print(\"Writing output to {0}\".format(filename))\n f.write_record(field.get_data())",
"def write_shapefile(self, filename='grid.shp', epsg=None, prj=None):\n from ..export.shapefile_utils import write_grid_shapefile2\n if epsg is None and prj is None:\n epsg = self.epsg\n write_grid_shapefile2(filename, self, array_dict={}, nan_val=-1.0e9,\n epsg=epsg, prj=prj)",
"def write_field(self,filename,field,griddescfile=None,fieldname=None):\n\n print(\"Writing output to {0}\".format(filename))\n np.savetxt(filename, field.get_data())",
"def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)",
"def write_ptm_gridfile(self,fn):\n vertex_hdr = \" Vertex Data: vertex_number, x, y\"\n poly_hdr = \" Polygon Data: polygon_number, number_of_sides,center_x, center_y, center_depth, side_indices(number_of_sides), marker(0=internal,1=open boundary)\"\n side_hdr = \" Side Data: side_number, side_depth, node_indices(2), cell_indices(2), marker(0=internal,1=external,2=flow boundary,3=open boundary)\"\n\n with open(fn,'wt') as fp:\n # write header counts\n fp.write(\" Number of Vertices\\n\")\n fp.write(\" %20d\\n\"%self.Nnodes())\n fp.write(\" Number of Polygons\\n\")\n fp.write(\" %20d\\n\"%self.Ncells())\n fp.write(\" Number of Sides\\n\")\n fp.write(\" %20d\\n\"%self.Nedges())\n fp.write(\" NODATA (land) value\\n\")\n fp.write(\" -9999.000000000\\n\")\n\n # write vertex info\n fp.write(vertex_hdr+\"\\n\")\n for v in range(self.Nnodes()):\n fp.write(\" %10d %16.7f %16.7f\\n\"%(v+1,\n self.nodes['x'][v,0],\n self.nodes['x'][v,1]))\n\n # write polygon info\n fp.write(poly_hdr+\"\\n\")\n cell_write_str1 = \" %10d %10d %16.7f %16.7f %16.7f \"\n cell_depths = self.cell_depths()\n for e in range(self.Ncells()):\n edges = self.cells['edges'][e,:]\n edges[edges<0] = -1\n edge_str = \" \".join( [\"%10d\"%(s+1) for s in edges] )\n edge_str = edge_str+\" %10d\\n\"%(self.cells['mark'][e])\n nsides = sum(edges>=0)\n fp.write(cell_write_str1%(e+1,\n nsides,\n self.cells['_center'][e,0],\n self.cells['_center'][e,1],\n cell_depths[e]))\n fp.write(edge_str)\n \n # write side info\n fp.write(side_hdr+\"\\n\")\n edge_depths = self.edge_depths()\n edge_write_str = \" %10d %16.7f %10d %10d %10d %10d %10d\\n\"\n for s in range(self.Nedges()):\n edges = self.edges['cells'][s,:]\n edges[edges<0] = -1 \n nodes = self.edges['nodes'][s,:]\n nodes[nodes<0] = -1\n fp.write(edge_write_str%(s+1,\n edge_depths[s],\n nodes[0]+1,\n nodes[1]+1,\n edges[0]+1,\n edges[1]+1,\n self.edges['mark'][s]))",
"def save_depfile(depdata,outname,is31=True): \n\n if outname==None:\n print('save_depfile requires a filename to save.')\n return\n try:\n fp=open(outname,'w')\n except IOError:\n print('save_depfile: invalid filename.')\n return data\n if is31:\n fp.write('Node Number = %d\\n' % len(depdata['node_num']) )\n for i in range(0,len(depdata['node_num'])):\n fp.write('%f %f %f\\n'% (depdata['x'][i],depdata['y'][i],depdata['h'][i]))\n fp.close()\n \n return",
"def _write_dx(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n if FN.endswith('.dx'):\n F = open(FN, 'w')\n else:\n import gzip\n F = gzip.open(FN, 'w')\n\n F.write(\"\"\"object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}\norigin {1[0]} {1[1]} {1[2]}\ndelta {2[0]} 0.0 0.0\ndelta 0.0 {2[1]} 0.0\ndelta 0.0 0.0 {2[2]}\nobject 2 class gridconnections counts {0[0]} {0[1]} {0[2]}\nobject 3 class array type double rank 0 items {3} data follows\n\"\"\".format(data['counts'], data['origin'], data['spacing'], n_points))\n\n for start_n in range(0, len(data['vals']), 3):\n F.write(' '.join(['%6e' % c\n for c in data['vals'][start_n:start_n + 3]]) + '\\n')\n\n F.write('object 4 class field\\n')\n F.write('component \"positions\" value 1\\n')\n F.write('component \"connections\" value 2\\n')\n F.write('component \"data\" value 3\\n')\n F.close()",
"def write_ugrid(self,\n fn,\n mesh_name='mesh',\n fields='auto',\n overwrite=False):\n if os.path.exists(fn):\n if overwrite:\n os.unlink(fn)\n else:\n raise GridException(\"File %s exists\"%(fn))\n\n if 1: # xarray-based code\n ds=xr.Dataset()\n ds[mesh_name]=1\n\n mesh_var=ds[mesh_name]\n mesh_var.attrs['cf_role']='mesh_topology'\n mesh_var.attrs['node_coordinates']='node_x node_y'\n mesh_var.attrs['face_node_connectivity']='face_node'\n mesh_var.attrs['edge_node_connectivity']='edge_node'\n mesh_var.attrs['node_dimension']='node'\n mesh_var.attrs['edge_dimension']='edge'\n mesh_var.attrs['face_dimension']='face'\n \n ds['node_x'] = ('node',),self.nodes['x'][:,0]\n ds['node_y'] = ('node',),self.nodes['x'][:,1]\n\n ds['face_node'] = ('face','maxnode_per_face'),self.cells['nodes']\n\n ds['edge_node']=('edge','node_per_edge'),self.edges['nodes']\n\n if fields=='auto':\n for src_data,dim_name in [ (self.cells,'face'),\n (self.edges,'edge'),\n (self.nodes,'node') ]:\n for field in src_data.dtype.names:\n if field.startswith('_'):\n continue\n if field in ['cells','nodes','edges','deleted']:\n continue # already included\n if src_data[field].ndim != 1:\n continue # not smart enough for that yet\n if field in ds:\n out_field = dim_name + \"_\" + field\n else:\n out_field=field\n \n ds[out_field] = (dim_name,),src_data[field]\n ds.to_netcdf(fn)\n \n if 0: # old qnc-based code\n nc=qnc.empty(fn)\n\n nc[mesh_name]=1\n mesh_var=nc.variables[mesh_name]\n mesh_var.cf_role='mesh_topology'\n\n mesh_var.node_coordinates='node_x node_y'\n nc['node_x']['node']=self.nodes['x'][:,0]\n nc['node_y']['node']=self.nodes['x'][:,1]\n\n mesh_var.face_node_connectivity='face_node'\n nc['face_node']['face','maxnode_per_face']=self.cells['nodes']\n\n mesh_var.edge_node_connectivity='edge_node'\n nc['edge_node']['edge','node_per_edge']=self.edges['nodes']\n\n nc.close()",
"def write_ROMS_grid(grd, visc_factor, diff_factor, filename='roms_grd.nc'):\n\n Mm, Lm = grd.hgrid.x_rho.shape\n\n \n # Write ROMS grid to file\n nc = netCDF.Dataset(filename, 'w', format='NETCDF4')\n nc.Description = 'ROMS grid'\n nc.Author = 'Trond Kristiansen'\n nc.Created = datetime.now().isoformat()\n nc.type = 'ROMS grid file'\n\n nc.createDimension('xi_rho', Lm)\n nc.createDimension('xi_u', Lm-1)\n nc.createDimension('xi_v', Lm)\n nc.createDimension('xi_psi', Lm-1)\n \n nc.createDimension('eta_rho', Mm)\n nc.createDimension('eta_u', Mm)\n nc.createDimension('eta_v', Mm-1)\n nc.createDimension('eta_psi', Mm-1)\n\n nc.createDimension('xi_vert', Lm+1)\n nc.createDimension('eta_vert', Mm+1)\n\n nc.createDimension('bath', None)\n\n if hasattr(grd.vgrid, 's_rho') is True and grd.vgrid.s_rho is not None:\n N, = grd.vgrid.s_rho.shape\n nc.createDimension('s_rho', N)\n nc.createDimension('s_w', N+1)\n\n def write_nc_var(var, name, dimensions, long_name=None, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if long_name is not None:\n nc.variables[name].long_name = long_name\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n print ' ... wrote ', name\n\n if hasattr(grd.vgrid, 's_rho') is True and grd.vgrid.s_rho is not None:\n write_nc_var(grd.vgrid.theta_s, 'theta_s', (), 'S-coordinate surface control parameter')\n write_nc_var(grd.vgrid.theta_b, 'theta_b', (), 'S-coordinate bottom control parameter')\n write_nc_var(grd.vgrid.Tcline, 'Tcline', (), 'S-coordinate surface/bottom layer width', 'meter')\n write_nc_var(grd.vgrid.hc, 'hc', (), 'S-coordinate parameter, critical depth', 'meter')\n write_nc_var(grd.vgrid.s_rho, 's_rho', ('s_rho'), 'S-coordinate at RHO-points')\n write_nc_var(grd.vgrid.s_w, 's_w', ('s_w'), 'S-coordinate at W-points')\n write_nc_var(grd.vgrid.Cs_r, 'Cs_r', ('s_rho'), 'S-coordinate stretching curves at RHO-points')\n write_nc_var(grd.vgrid.Cs_w, 'Cs_w', ('s_w'), 'S-coordinate stretching curves at W-points')\n\n write_nc_var(grd.vgrid.h, 'h', ('eta_rho', 'xi_rho'), 'bathymetry at RHO-points', 'meter')\n #ensure that we have a bath dependancy for hraw\n if len(grd.vgrid.hraw.shape) == 2:\n hraw = np.zeros((1, grd.vgrid.hraw.shape[0], grd.vgrid.hraw.shape[1]))\n hraw[0,:] = grd.vgrid.hraw\n else:\n hraw = grd.vgrid.hraw\n write_nc_var(hraw, 'hraw', ('bath', 'eta_rho', 'xi_rho'), 'raw bathymetry at RHO-points', 'meter')\n write_nc_var(grd.hgrid.f, 'f', ('eta_rho', 'xi_rho'), 'Coriolis parameter at RHO-points', 'second-1')\n write_nc_var(1./grd.hgrid.dx, 'pm', ('eta_rho', 'xi_rho'), 'curvilinear coordinate metric in XI', 'meter-1')\n write_nc_var(1./grd.hgrid.dy, 'pn', ('eta_rho', 'xi_rho'), 'curvilinear coordinate metric in ETA', 'meter-1')\n write_nc_var(grd.hgrid.dmde, 'dmde', ('eta_rho', 'xi_rho'), 'XI derivative of inverse metric factor pn', 'meter')\n write_nc_var(grd.hgrid.dndx, 'dndx', ('eta_rho', 'xi_rho'), 'ETA derivative of inverse metric factor pm', 'meter')\n write_nc_var(grd.hgrid.xl, 'xl', (), 'domain length in the XI-direction', 'meter')\n write_nc_var(grd.hgrid.el, 'el', (), 'domain length in the ETA-direction', 'meter')\n\n write_nc_var(grd.hgrid.x_rho, 'x_rho', ('eta_rho', 'xi_rho'), 'x location of RHO-points', 'meter')\n write_nc_var(grd.hgrid.y_rho, 'y_rho', ('eta_rho', 'xi_rho'), 'y location of RHO-points', 'meter')\n write_nc_var(grd.hgrid.x_u, 'x_u', ('eta_u', 'xi_u'), 'x location of U-points', 'meter')\n write_nc_var(grd.hgrid.y_u, 'y_u', ('eta_u', 'xi_u'), 'y location of U-points', 'meter')\n write_nc_var(grd.hgrid.x_v, 'x_v', ('eta_v', 'xi_v'), 'x location of V-points', 'meter')\n write_nc_var(grd.hgrid.y_v, 'y_v', ('eta_v', 'xi_v'), 'y location of V-points', 'meter')\n write_nc_var(grd.hgrid.x_psi, 'x_psi', ('eta_psi', 'xi_psi'), 'x location of PSI-points', 'meter')\n write_nc_var(grd.hgrid.y_psi, 'y_psi', ('eta_psi', 'xi_psi'), 'y location of PSI-points', 'meter')\n write_nc_var(grd.hgrid.x_vert, 'x_vert', ('eta_vert', 'xi_vert'), 'x location of cell verticies', 'meter')\n write_nc_var(grd.hgrid.y_vert, 'y_vert', ('eta_vert', 'xi_vert'), 'y location of cell verticies', 'meter')\n\n if hasattr(grd.hgrid, 'lon_rho'):\n write_nc_var(grd.hgrid.lon_rho, 'lon_rho', ('eta_rho', 'xi_rho'), 'longitude of RHO-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_rho, 'lat_rho', ('eta_rho', 'xi_rho'), 'latitude of RHO-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_u, 'lon_u', ('eta_u', 'xi_u'), 'longitude of U-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_u, 'lat_u', ('eta_u', 'xi_u'), 'latitude of U-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_v, 'lon_v', ('eta_v', 'xi_v'), 'longitude of V-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_v, 'lat_v', ('eta_v', 'xi_v'), 'latitude of V-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_psi, 'lon_psi', ('eta_psi', 'xi_psi'), 'longitude of PSI-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_psi, 'lat_psi', ('eta_psi', 'xi_psi'), 'latitude of PSI-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_vert, 'lon_vert', ('eta_vert', 'xi_vert'), 'longitude of cell verticies', 'degree_east')\n write_nc_var(grd.hgrid.lat_vert, 'lat_vert', ('eta_vert', 'xi_vert'), 'latitude of cell verticies', 'degree_north')\n\n nc.createVariable('spherical', 'c')\n nc.variables['spherical'].long_name = 'Grid type logical switch'\n nc.variables['spherical'][:] = grd.hgrid.spherical\n print ' ... wrote ', 'spherical'\n\n write_nc_var(grd.hgrid.angle_rho, 'angle', ('eta_rho', 'xi_rho'), 'angle between XI-axis and EAST', 'radians')\n\n write_nc_var(grd.hgrid.mask_rho, 'mask_rho', ('eta_rho', 'xi_rho'), 'mask on RHO-points')\n write_nc_var(grd.hgrid.mask_u, 'mask_u', ('eta_u', 'xi_u'), 'mask on U-points')\n write_nc_var(grd.hgrid.mask_v, 'mask_v', ('eta_v', 'xi_v'), 'mask on V-points')\n write_nc_var(grd.hgrid.mask_psi, 'mask_psi', ('eta_psi', 'xi_psi'), 'mask on psi-points')\n\n if visc_factor != None:\n write_nc_var(visc_factor, 'visc_factor', ('eta_rho', 'xi_rho'), 'horizontal viscosity sponge factor')\n if diff_factor != None:\n write_nc_var(diff_factor, 'diff_factor', ('eta_rho', 'xi_rho'), 'horizontal diffusivity sponge factor')\n \n nc.close()",
"def exportECLPropertyFiles(self, fname):\r\n\r\n # Convert point data to cell data for output\r\n # verifying if this is necessary or if ECLIPSE can use point attributes\r\n pointConvert = True\r\n if pointConvert:\r\n p2c = vtk.vtkPointDataToCellData()\r\n p2c.SetInputDataObject(self.Grid)\r\n p2c.PassPointDataOn()\r\n p2c.Update()\r\n self.Grid = p2c.GetOutput()\r\n\r\n filename, ext = os.path.splitext(fname)\r\n for ia in range(self.Grid.GetCellData().GetNumberOfArrays()):\r\n prop = self.Grid.GetCellData().GetArray(ia).GetName()\r\n print(\"exporting prop\", prop)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \"prop-\" + prop.lower() + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid properties) (ASCII)\\n')\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Unit system : ' + \"ECLIPSE-Field\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write(prop.upper() + ' -- Generated : ReGrid\\n')\r\n f.write('-- Property name in Petrel : ' + prop + '\\n')\r\n\r\n c = -999.9999\r\n N = 0\r\n ii = 0\r\n fstr = \" \"\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # iac = round(self.Grid.GetCellData().GetArray(ia).GetTuple1(ii), 4)\r\n iac = '{:0.4e}'.format(self.Grid.GetCellData().GetArray(ia).GetTuple1(ii))\r\n print(iac)\r\n ii += 1\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999.9999:\r\n fstr = self.printPROP(f, c, N, fstr)\r\n c = eval(iac)\r\n N = 1\r\n fstr = self.printPROP(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")",
"def write_field(self,filename,field,griddescfile=None,fieldname=None):\n\n pass",
"def draw_gds(self):\n\n import gdstk\n\n lib = gdstk.Library()\n\n cell = lib.new_cell(\"FIRST\")\n\n for x, feature in self.features.items():\n curr_layer = list(self.layers.keys()).index(self.features[x].layer)\n for y in feature.coord:\n \n y = np.round(100*y)/100\n if self.layers[feature.layer]['inversion']==1:\n y = -y\n \n if feature.text_width is not None:\n fp1 = gdstk.FlexPath(y, width=feature.text_width, simple_path=True, layer=curr_layer)\n cell.add(fp1)\n else:\n cell.add(gdstk.Polygon(y, curr_layer))\n \n if self.file is None:\n raise Exception(\"No file name given. Use design.file to set name.\")\n filename = Path(self.file).with_suffix('.gds')\n lib.write_gds(filename)",
"def write_field(self, filename, field,griddescfile=None,fieldname=None):\n\n nlat,nlong = field.get_grid().get_grid_dimensions()\n if fieldname is None:\n fieldname = 'field_value'\n print(\"Writing output to {0}\".format(filename))\n if griddescfile is not None:\n output_filename=filename\n filename=path.splitext(filename)[0] + '_temp' + path.splitext(filename)[1]\n data_was_bool = False\n with netCDF4.Dataset(filename,mode='w',format='NETCDF4') as dataset:\n dataset.createDimension(\"latitude\",nlat)\n dataset.createDimension(\"longitude\",nlong)\n if field.get_data().dtype == np.bool_:\n field.set_data(field.get_data().astype(np.int32))\n data_was_bool=True\n field_values = dataset.createVariable(fieldname,field.get_data().dtype,\n ('latitude','longitude'))\n field_values[:,:] = field.get_data()\n if data_was_bool:\n field.set_data(field.get_data().astype(np.bool_))\n if griddescfile is not None:\n cdo_instance = cdo.Cdo()\n cdo_instance.setgrid(griddescfile,input=filename,output=output_filename)\n os.remove(filename)",
"def export_gds(lum_app, filename, top_cell_name, layer_def,\n n_circle = 64, n_ring = 64, n_custom = 64, n_wg = 64,\n round_to_nm = 1, grid = 1e-9, max_objects = 10000):\n \n print(\"Exporting from Lumerical application to GDS II...\")\n layer_def_str = \"layer_def = [\"\n for i in range(0,len(layer_def)):\n if i == (len(layer_def) - 1):\n # Insert end bracket and semi-colon at end of array\n layer_def_str = layer_def_str + str(layer_def[i]) + \"];\"\n elif (i + 1) % 4 == 0:\n # Insert semi-colon after every 4 params\n layer_def_str = layer_def_str + str(layer_def[i]) + \";\"\n else:\n layer_def_str = layer_def_str + str(layer_def[i]) + \",\"\n \n lsf_script = str(\"gds_filename = '{}.gds';\".format(filename) +\n \"top_cell = '{}';\".format(top_cell_name) +\n layer_def_str.format(-220.0e-9/2, 220.0e-9/2) +\n \"n_circle = {};\".format(n_circle) +\n \"n_ring = {};\".format(n_ring) +\n \"n_custom = {};\".format(n_custom) +\n \"n_wg = {};\".format(n_wg) +\n \"round_to_nm = {};\".format(round_to_nm) +\n \"grid = {};\".format(grid) +\n \"max_objects = {};\".format(max_objects) +\n \"Lumerical_GDS_auto_export;\")\n #return lsf_script\n # Run lsf script to export gds\n lum_app.cd(os.getcwd())\n lum_app.eval(lsf_script)\n return os.path.join(os.getcwd(), filename+\".gds\")",
"def write_field(self, filename, field,griddescfile=None,fieldname=None):\n\n print(\"Writing output to {0}\".format(filename))\n mgnr = f2py_mg.f2py_manager(path.join(fortran_source_path,\n \"mod_topo_io.f90\"), func_name=\"write_topo\")\n #reverse the manipulation in the load_field method\n data = np.rot90(np.fliplr(field.get_data()))\n mgnr.run_current_function_or_subroutine(filename,data)",
"def write_file(self, f=None):\n # get model information\n nlay = self.parent.nlay\n dis = self.parent.get_package(\"DIS\")\n if dis is None:\n dis = self.parent.get_package(\"DISU\")\n\n # Open file for writing\n if f is None:\n f_obj = open(self.fn_path, \"w\")\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET, IKVFLAG, IKCFLAG\n f_obj.write(\n f\" {self.ipakcb:9d} {self.hdry:9.3G} {self.iwdflg:9d}\"\n f\" {self.wetfct:9.3G} {self.iwetit:9d} {self.ihdwet:9d}\"\n f\" {self.ikvflag:9d} {self.ikcflag:9d}\\n\"\n )\n\n # LAYCON array\n for layer in range(nlay):\n if self.intercellt[layer] > 0:\n f_obj.write(\n f\"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} \"\n )\n else:\n f_obj.write(f\"0{self.laycon[layer]:1d} \")\n f_obj.write(\"\\n\")\n\n # TRPY, <ANGLEX>\n f_obj.write(self.trpy.get_file_entry())\n transient = not dis.steady.all()\n structured = self.parent.structured\n anis = any(t != 1 for t in self.trpy)\n if (not structured) and anis:\n f_obj.write(self.anglex.get_file_entry())\n\n # <SF1>, <TRAN>, <HY>, <VCONT>, <KV>, <SF2>, <WETDRY>\n for layer in range(nlay):\n if transient:\n f_obj.write(self.sf1[layer].get_file_entry())\n\n if self.ikcflag == 0:\n self._write_hy_tran_vcont_kv(f_obj, layer)\n\n if transient and (self.laycon[layer] in [2, 3, 4]):\n f_obj.write(self.sf2[layer].get_file_entry())\n\n if (self.iwdflg != 0) and (self.laycon[layer] in [1, 3]):\n f_obj.write(self.wetdry[layer].get_file_entry())\n\n # <KSAT> (if ikcflag==1)\n if abs(self.ikcflag == 1):\n f_obj.write(self.ksat.get_file_entry())\n\n f_obj.close()",
"def write_gro(top, filename, precision=3):\n pos_array = np.ndarray.copy(top.positions)\n pos_array = _validate_positions(pos_array)\n\n with open(filename, \"w\") as out_file:\n out_file.write(\n \"{} written by GMSO {} at {}\\n\".format(\n top.name if top.name is not None else \"\",\n gmso.__version__,\n str(datetime.datetime.now()),\n )\n )\n out_file.write(\"{:d}\\n\".format(top.n_sites))\n out_file.write(_prepare_atoms(top, pos_array, precision))\n out_file.write(_prepare_box(top))",
"def write_config(file_prefix=None, dir=None, date=None):\n\n # Print out.\n print(\"\\nCreating the OpenDX .cfg program configuration file.\")\n\n # Open the file.\n file = open_write_file(file_name=file_prefix+\".cfg\", dir=dir, force=True)\n\n # Generate the text.\n file.write(\"//\\n\")\n file.write(\"//\\n\")\n file.write(\"// time: %s\\n\" % date)\n file.write(\"//\\n\")\n file.write(\"// version: 3.2.0 (format), 4.3.2 (DX)\\n\")\n file.write(\"//\\n\")\n file.write(\"//\\n\")\n file.write(\"// panel[0]: position = (0.0164,0.0000), size = 0.2521x0.1933, startup = 1, devstyle = 1\\n\")\n file.write(\"// title: value = Control Panel\\n\")\n file.write(\"//\\n\")\n file.write(\"// workspace: width = 251, height = 142\\n\")\n file.write(\"// layout: snap = 0, width = 50, height = 50, align = NN\\n\")\n file.write(\"//\\n\")\n file.write(\"// interactor Selector[1]: num_components = 1, value = 1 \\n\")\n file.write(\"// selections: maximum = 2, current = 0 \\n\")\n file.write(\"// option[0]: name = \\\"Colour\\\", value = 1\\n\")\n file.write(\"// option[1]: name = \\\"Grey\\\", value = 2\\n\")\n file.write(\"// instance: panel = 0, x = 81, y = 6, style = Scrolled List, vertical = 1, size = 170x136\\n\")\n file.write(\"// label: value = Colour Selector\\n\")\n file.write(\"//\\n\")\n file.write(\"// node Image[3]:\\n\")\n file.write(\"// title: value = Surface\\n\")\n file.write(\"// depth: value = 24\\n\")\n file.write(\"// window: position = (0.0000,0.0400), size = 0.9929x0.9276\\n\")\n\n # Close the file.\n file.close()",
"def write_netcdf(self):\n\n if self.eigr2d_fnames:\n dim_fname = self.eigr2d_fnames[0]\n elif self.gkk_fnames:\n dim_fname = self.gkk_fnames[0]\n elif self.fan_fnames:\n dim_fname = self.fan_fnames[0]\n else:\n raise Exception('Need at least one file to read the dimensions: ' +\n 'EIGR2D, GKK, or FAN. ' +\n 'How did you even get there?')\n\n create_directory(self.nc_output)\n\n # Write on a NC files with etsf-io name convention\n with nc.Dataset(self.nc_output, 'w') as ds:\n\n # FIXME Reading from EIGR2D file is too restrictive\n # Should handle GKK.nc only\n # Read dim from first EIGR2D file\n dim = nc.Dataset(dim_fname, 'r')\n\n # Determine nsppol from reading occ\n nsppol = len(dim.variables['occupations'][:,0,0])\n if nsppol > 1:\n warnings.warn(\"nsppol > 1 has not been tested.\")\n mband = len(dim.dimensions['product_mband_nsppol']) / nsppol\n\n # Create dimension\n ds.createDimension('number_of_atoms',\n len(dim.dimensions['number_of_atoms']))\n ds.createDimension('number_of_kpoints',\n len(dim.dimensions['number_of_kpoints']))\n ds.createDimension('product_mband_nsppol',\n len(dim.dimensions['product_mband_nsppol']))\n\n ds.createDimension('cartesian', 3)\n ds.createDimension('cplex', 2)\n ds.createDimension('number_of_qpoints', self.nqpt)\n ds.createDimension('number_of_spins',\n len(dim.dimensions['number_of_spins']))\n ds.createDimension('max_number_of_states',self.nband_se)\n ds.createDimension('number_of_modes',\n 3*len(dim.dimensions['number_of_atoms']))\n\n ds.createDimension('number_of_temperature', len(self.temperatures))\n ds.createDimension('number_of_frequencies', len(self.omegase))\n\n # Write data on the eigenvalues\n data = ds.createVariable('reduced_coordinates_of_kpoints', 'd',\n ('number_of_kpoints','cartesian'))\n data[:,:] = dim.variables['reduced_coordinates_of_kpoints'][:,:]\n\n data = ds.createVariable(\n 'eigenvalues','d',\n ('number_of_spins','number_of_kpoints','max_number_of_states'))\n data[:,:,:] = dim.variables['eigenvalues'][:,:,:]\n\n data = ds.createVariable(\n 'occupations','i',\n ('number_of_spins','number_of_kpoints','max_number_of_states'))\n data[:,:,:] = dim.variables['occupations'][:,:,:]\n\n data = ds.createVariable(\n 'primitive_vectors', 'd',\n ('cartesian','cartesian'))\n\n data[:,:] = dim.variables['primitive_vectors'][:,:]\n\n dim.close()\n\n # Write epc data\n data = ds.createVariable('renormalization_is_dynamical', 'i1')\n data[:] = self.renormalization_is_dynamical\n\n data = ds.createVariable('broadening_is_dynamical', 'i1')\n data[:] = self.broadening_is_dynamical\n\n data = ds.createVariable('temperatures','d',\n ('number_of_temperature'))\n data[:] = self.temperatures[:]\n\n data = ds.createVariable('smearing', 'd')\n data[:] = self.smearing\n\n data = ds.createVariable('omegase', 'd',\n ('number_of_frequencies'))\n data[:] = self.omegase[:]\n\n # qpt\n data = ds.createVariable(\n 'reduced_coordinates_of_qpoints','d',\n ('number_of_qpoints', 'cartesian'))\n if self.qred is not None:\n data[...] = self.qred[...]\n\n # omega\n data = ds.createVariable(\n 'phonon_mode_frequencies','d',\n ('number_of_qpoints', 'number_of_modes'))\n if self.omega is not None:\n data[...] = self.omega[...]\n\n\n # ZPR\n zpr = ds.createVariable(\n 'zero_point_renormalization','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n #fan = ds.createVariable(\n # 'fan_zero_point_renormalization','d',\n # ('number_of_spins', 'number_of_kpoints',\n # 'max_number_of_states'))\n\n #ddw = ds.createVariable(\n # 'ddw_zero_point_renormalization','d',\n # ('number_of_spins', 'number_of_kpoints',\n # 'max_number_of_states'))\n\n if self.zero_point_renormalization is not None:\n # FIXME number of spin\n zpr[0,:,:] = self.zero_point_renormalization[:,:].real\n #fan[0,:,:] = self.fan_zero_point_renormalization[:,:].real\n #ddw[0,:,:] = self.ddw_zero_point_renormalization[:,:].real\n\n # TDR\n data = ds.createVariable(\n 'temperature_dependent_renormalization','d',\n ('number_of_spins','number_of_kpoints',\n 'max_number_of_states','number_of_temperature'))\n\n if self.temperature_dependent_renormalization is not None:\n # FIXME number of spin\n data[0,:,:,:] = (\n self.temperature_dependent_renormalization[:,:,:].real)\n\n # ZPR\n data = ds.createVariable(\n 'zero_point_broadening','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n if self.zero_point_broadening is not None:\n # FIXME number of spin\n data[0,:,:] = self.zero_point_broadening[:,:].real\n\n zpr_modes = ds.createVariable(\n 'zero_point_renormalization_by_modes','d',\n ('number_of_modes', 'number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n if self.zero_point_renormalization_modes is not None:\n zpr_modes[:,0,:,:] = (\n self.zero_point_renormalization_modes[:,:,:])\n\n # TDB\n data = ds.createVariable(\n 'temperature_dependent_broadening','d',\n ('number_of_spins','number_of_kpoints',\n 'max_number_of_states','number_of_temperature'))\n\n if self.temperature_dependent_broadening is not None:\n # FIXME number of spin\n data[0,:,:,:] = (\n self.temperature_dependent_broadening[:,:,:].real)\n\n # ZSE\n self_energy = ds.createVariable('self_energy','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies', 'cplex'))\n\n if self.self_energy is not None:\n\n # FIXME number of spin\n self_energy[0,:,:,:,0] = self.self_energy[:,:,:].real\n self_energy[0,:,:,:,1] = self.self_energy[:,:,:].imag\n\n # ZSE fan active\n self_energy_fan_active = ds.createVariable('self_energy_fan_active','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies', 'cplex'))\n\n if self.self_energy_fan_active is not None:\n\n # FIXME number of spin\n self_energy_fan_active[0,:,:,:,0] = self.self_energy_fan_active[:,:,:].real\n self_energy_fan_active[0,:,:,:,1] = self.self_energy_fan_active[:,:,:].imag\n\n # ZSE static\n data = ds.createVariable(\n 'self_energy_static','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states'))\n\n if self.self_energy_static is not None:\n # FIXME number of spin\n data[0,:,:] = self.self_energy_static[:,:].real\n\n # TSE\n self_energy_T = ds.createVariable(\n 'self_energy_temperature_dependent','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies',\n 'number_of_temperature', 'cplex'))\n\n if self.self_energy_T is not None:\n # FIXME number of spin\n self_energy_T[0,:,:,:,:,0] = self.self_energy_T[:,:,:,:].real\n self_energy_T[0,:,:,:,:,1] = self.self_energy_T[:,:,:,:].imag\n\n # TSE static\n data = ds.createVariable(\n 'self_energy_static_T','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_temperature'))\n\n if self.self_energy_static_T is not None:\n # FIXME number of spin\n data[0,:,:,:] = self.self_energy_static_T[:,:,:].real\n\n # ZSF\n spectral_function = ds.createVariable(\n 'spectral_function','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies'))\n\n if self.spectral_function is not None:\n # FIXME number of spin\n spectral_function[0,:,:,:] = self.spectral_function[:,:,:]\n\n spectral_function_T = ds.createVariable(\n 'spectral_function_temperature_dependent','d',\n ('number_of_spins', 'number_of_kpoints',\n 'max_number_of_states', 'number_of_frequencies',\n 'number_of_temperature'))\n\n # TSF\n if self.spectral_function_T is not None:\n # FIXME number of spin\n spectral_function_T[0,:,:,:,:] = (\n self.spectral_function_T[:,:,:,:])\n return",
"def write_crystfel_geom(self, filename, *,\n data_path='/entry_1/instrument_1/detector_1/data',\n mask_path=None, dims=('frame', 'modno', 'ss', 'fs'),\n adu_per_ev=None, clen=None, photon_energy=None):\n write_crystfel_geom(\n self, filename, data_path=data_path, mask_path=mask_path, dims=dims,\n adu_per_ev=adu_per_ev, clen=clen, photon_energy=photon_energy,\n )\n\n if self.filename == 'No file':\n self.filename = filename",
"def plot_save_normal_modes_Nashgyro(datadir, R, NL, KL, OmK, Omg, PVxydict=None, params={}, dispersion=[],\n sim_type='gyro', save_pkl=True, rm_images=True, save_ims=True, gapims_only=True,\n eigval=None, eigvect=None, matrix=None, lattice_color='k'):\n NP = len(R)\n if eigval is None and eigvect is None:\n matrix = dynamical_matrix_gyros(R, NL, KL, OmK, Omg, params=params, PVxydict=PVxydict, dispersion=[],\n sublattice_labels=[])\n # plot_complex_matrix(matrix, show=True)\n print 'Finding eigenvals/vects of dynamical matrix...'\n eigval, eigvect = eig_vals_vects(matrix)\n\n # Plot histogram of eigenvalues if saving at all\n if datadir is not None:\n fig, DOS_ax = leplt.initialize_DOS_plot(eigval, 'gyro', pin=-5000)\n plt.savefig(datadir + 'eigval_hist.png')\n\n # prepare DOS output dir\n if save_ims:\n dio.ensure_dir(datadir + 'DOS/')\n\n #####################################\n # SAVE eigenvals/vects as txt file\n #####################################\n if save_pkl:\n print 'Saving eigvals/vects as txt files...'\n output = open(datadir + 'eigval.pkl', 'wb')\n pickle.dump(eigval, output)\n output.close()\n\n output = open(datadir + 'eigvect.pkl', 'wb')\n pickle.dump(eigvect, output)\n output.close()\n # np.savetxt()\n\n if save_ims:\n print 'Saving DOS images...'\n #####################################\n # Prepare for plotting\n #####################################\n # Determine b,c\n omk = OmK[np.where(abs(OmK) > 0.)[0], np.where(abs(OmK) > 0)[1]]\n omg = Omg.ravel()[np.where(abs(Omg.ravel()) > 0.)[0]]\n if (omg == 0).all():\n '''Omg is purely zeros'''\n omg = Omg.ravel()[0:NP]\n omg_purely_zero = True\n else:\n omg_purely_zero = False\n\n # omk > 0, omg > 0: b=0,c=1\n # omk > 0, omg < 0: b=1,c=0\n # omk < 0, omg > 0: b=1,c=1\n # omk < 0, omg < 0: b=0,c=0\n # Check if uniform/homogenous omk and omg\n # and make string for labelling spin direction and whether\n # pendulum is standing or hanging.\n if (omk == omk[0]).all() and not omg_purely_zero:\n if (omg == omg[0]).all():\n do_bc = True\n # Find values for b and c\n if omk[0] > 0:\n if omg[0] > 0:\n bstr = 'b=0'\n cstr = 'c=1'\n btmp = 0\n ctmp = 1\n elif omg[0] < 0:\n bstr = 'b=1'\n cstr = 'c=0'\n btmp = 1\n ctmp = 0\n elif omk[0] < 1:\n if omg[0] > 0:\n bstr = 'b=1'\n cstr = 'c=1'\n btmp = 1\n ctmp = 1\n elif omg[0] < 0:\n bstr = 'b=0'\n cstr = 'c=0'\n btmp = 0\n ctmp = 0\n else:\n do_bc = False\n bstr = 'b and/or c are mixed sign'\n cstr = ''\n else:\n do_bc = False\n bstr = 'b and/or c are mixed sign or Omg=0.'\n cstr = ''\n\n bcstr = bstr + ', ' + cstr\n\n if do_bc:\n # temporarily store the homogeneous pin frequency as pin\n pin = omg[0]\n else:\n pin = -5000\n\n print 'plotting...'\n fig, DOS_ax, eig_ax = leplt.initialize_eigvect_DOS_header_plot(eigval, R, sim_type=sim_type)\n leplt.lattice_plot(R, NL, KL, eig_ax, linecolor=lattice_color)\n\n # Make strings for spring, pin, k, and g values\n if do_bc:\n springstr_Hz = '{0:.03f}'.format(omk[0] / (2. * np.pi))\n pinstr_Hz = '{0:.03f}'.format(omg[0] / (2. * np.pi))\n else:\n springstr_Hz = ''\n pinstr_Hz = ''\n\n # If small system, find analytic eigval solns\n if len(R) < 5:\n try:\n exact_eigvals = check_explicit_eigvals(len(R), b[0], c[0], params={})\n exactstr = '\\n' + str(exact_eigvals)\n except:\n print 'Function check_explicit_eigvals is not written yet...'\n exactstr = ''\n else:\n exactstr = ''\n\n text2show = 'spring = ' + springstr_Hz + ' Hz, pin = ' + pinstr_Hz + ' Hz\\n' + \\\n exactstr + '\\n' + bcstr\n fig.text(0.4, 0.1, text2show, horizontalalignment='center', verticalalignment='center')\n\n # Add schematic of hanging/standing top spinning with dir\n if do_bc:\n schem_ax = plt.axes([0.85, 0.0, .025 * 5, .025 * 7], axisbg='w')\n # drawing\n schem_ax.plot([0., 0.2], [1 - btmp, btmp], 'k-')\n schem_ax.scatter([0.2], [btmp], s=150, c='k')\n schem_ax.arrow(0.2, btmp, -(-1) ** ctmp * 0.06, 0.3 * (-1) ** (btmp + ctmp), \\\n head_width=0.3, head_length=0.1, fc='b', ec='b')\n wave_x = np.arange(-0.07 * 5, 0.0, 0.001)\n wave_y = 0.1 * np.sin(wave_x * 100) + 1. - btmp\n schem_ax.plot(wave_x, wave_y, 'k-')\n schem_ax.set_xlim(-0.1 * 5, .21 * 5)\n schem_ax.set_ylim(-0.1 * 7, .21 * 7)\n # schem_ax.axis('equal')\n schem_ax.axis('off')\n\n #####################################\n # SAVE eigenvals/vects as images\n #####################################\n\n done_pngs = len(glob.glob(datadir + 'DOS/DOS_*.png'))\n # check if normal modes have already been done\n if not done_pngs:\n totN = len(eigval)\n if done_pngs < totN:\n # decide on which eigs to plot\n if gapims_only:\n middle = int(round(totN * 0.25))\n ngap = int(round(np.sqrt(totN)))\n todo = range(middle - ngap, middle + ngap)\n else:\n todo = range(int(round(len(eigval) * 0.5)))\n\n dmyi = 0\n for ii in todo:\n if np.mod(ii, 50) == 0:\n print 'plotting eigvect ', ii, ' of ', len(eigval)\n fig, [scat_fg, scat_fg2, p, f_mark, lines_12_st], cw_ccw = \\\n leplt.construct_eigvect_DOS_plot(R, fig, DOS_ax, eig_ax, eigval, eigvect, ii, sim_type, NL, KL,\n marker_num=0, color_scheme='default', sub_lattice=-1, )\n plt.savefig(datadir + 'DOS/DOS_' + '{0:05}'.format(dmyi) + '.png')\n scat_fg.remove()\n scat_fg2.remove()\n p.remove()\n f_mark.remove()\n lines_12_st.remove()\n dmyi += 1\n\n fig.clf()\n plt.close('all')\n\n ######################\n # Save DOS as movie\n ######################\n imgname = datadir + 'DOS/DOS_'\n names = datadir.split('/')[0:-1]\n # Construct movie name from datadir path string\n movname = ''\n for ii in range(len(names)):\n if ii < len(names) - 1:\n movname += names[ii] + '/'\n else:\n movname += names[ii]\n\n movname += '_DOS'\n\n subprocess.call(\n ['./ffmpeg', '-i', imgname + '%05d.png', movname + '.mov', '-vcodec', 'libx264', '-profile:v', 'main',\n '-crf', '12', '-threads', '0', '-r', '100', '-pix_fmt', 'yuv420p'])\n\n if rm_images:\n # Delete the original images\n print 'Deleting folder ' + datadir + 'DOS/'\n subprocess.call(['rm', '-r', datadir + 'DOS/'])\n\n return eigvect, eigval, matrix",
"def write_gro(gro_file):\n \n f = open(gro_file,'a')\n st = \"gro write by hoomd-blue step=%i\\n\"%get_step()\n f.write(st)\n f.write(' '+str(len(system.particles))+'\\n')\n for i, p in enumerate(system.particles):\n #st = \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f%8.4f%8.4f%8.4f\\n\"%(i,'NON',p.type,p.postion,p.velocity)\n st = \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f%8.4f%8.4f%8.4f\\n\"%(i+1,'SOL',p.type,i+1,\n p.position[0],p.position[1],p.position[2],p.velocity[0],p.velocity[1],p.velocity[2])\n f.write(st)\n st = \"%10.5f%10.5f%10.5f\\n\"%(system.box.Lx,system.box.Ly,system.box.Lz)\n f.write(st)\n f.close()",
"def save_to_pln(self, name=None, dir=None, pref=None, disp=True):\n\n # If name is not provided, use the NAME field value + .pln.\n if not name:\n name = \".\".join([self.name.value, \"pln\"])\n\n # If the gui flag is set, prepend a 'gen_' on the filename to specify\n # this was generated by the GUI.\n if pref:\n name = \"_\".join([pref, name])\n\n # If dir is provided add this directory path to the filename.\n if dir:\n name = os.path.join(dir, name)\n home = os.getcwd()\n name = os.path.join(home, name)\n\n self.pln_filename = name\n\n if os.path.isfile(self.pln_filename):\n os.remove(self.pln_filename)\n\n with open(self.pln_filename, 'w') as new_pln:\n if disp:\n print(\"writing to {0}\".format(self.pln_filename))\n\n # Use the pln_template dictionary to create the file sections and\n # look up the names that may be in the parameters dictionary.\n for section, fields in self.pln_template.items():\n\n # Recreate the section header for each new section.\n comment = \"#* \"\n separator = \"=\" * 48\n new_pln.write(\"\".join([comment, separator, \"\\n\"]))\n new_pln.write(\"\".join([comment, section, \"\\n\"]))\n new_pln.write(\"\".join([comment, separator, \"\\n\"]))\n\n # Look for attributes that match the field names defined in\n # the .pln template.\n for f in fields:\n try:\n exo_param = getattr(self, f.lower())\n except AttributeError:\n continue\n\n # exo_param is now an ExoParameter object. Add the\n # keyword & value pair to the .pln file.\n if is_empty(exo_param.value):\n exo_param.value = exo_param.default\n self._write_pln_line(new_pln, f, exo_param.value)\n\n # Add additional keywords for uncertainties and references\n # if they are present in the current ExoParameter.\n if exo_param.uncertainty:\n uf = \"\".join([\"U\", f])\n self._write_pln_line(new_pln,\n uf,\n exo_param.uncertainty\n )\n if exo_param.uncertainty_upper:\n ufd = \"\".join([\"U\", f, \"D\"])\n self._write_pln_line(new_pln,\n ufd,\n exo_param.uncertainty_upper\n )\n if exo_param.reference:\n fref = \"\".join([f, \"REF\"])\n self._write_pln_line(new_pln,\n fref,\n exo_param.reference\n )\n if exo_param.url:\n furl = \"\".join([f, \"URL\"])\n self._write_pln_line(new_pln,\n furl,\n exo_param.url\n )",
"def write(self,grfile):\n grfile = open(grfile + \".gr\",\"w\")\n \n for i in range(1,self.n_max):\n grfile.write(str(self.r[i]) + \"\\t\" + str(self.gr[i]) + \"\\n\")\n\n grfile.close()",
"def output(self):\n to_write = 'C '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x'])+' '\n to_write += str(self.offset[1] + self.def_field['y'])+' '\n to_write += str(self.def_field['radius'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write",
"def write_cells_shp(self,shpname,extra_fields=[],overwrite=True):\n # assemble a numpy struct array with all of the info \n # seems that having an object references in there is unstable,\n # so pass geometries in a list separately.\n base_dtype =[('poly_id1',np.int32),\n ('area',np.float64),\n ('volume',np.float64),\n ('depth_mean',np.float64)]\n\n try:\n cell_depths_max = self.cell_depths_max()\n extra_fields.append( ('depth_max',np.float64, lambda i: cell_depths_max[i]) )\n except:\n pass\n\n for efi in range(len(extra_fields)):\n fname,fdata=extra_fields[efi]\n base_dtype.append( (fname,fdata.dtype) )\n\n cell_data = np.zeros(self.Ncells(), dtype=base_dtype)\n\n for efi in range(len(extra_fields)):\n fname,fdata=extra_fields[efi]\n cell_data[fname]=fdata\n\n self.update_cell_edges()\n\n cell_geoms = [None]*self.Ncells()\n \n cell_data['depth_mean'] = self.cell_depths()\n cell_data['area']=self.cells_area()\n cell_data['volume']=cell_data['depth_mean']*cell_data['area']\n cell_data['poly_id1'] = 1+np.arange(self.Ncells())\n\n for poly_id in range(self.Ncells()):\n if poly_id % 500 == 0:\n print( \"%0.2g%%\"%(100.*poly_id/self.Ncells()) )\n\n # older code put this together manually.\n cell_geoms[poly_id]=self.cell_polygon(poly_id)\n\n print( cell_data.dtype )\n wkb2shp.wkb2shp(shpname,input_wkbs=cell_geoms,fields=cell_data,\n overwrite=overwrite)"
]
| [
"0.6520951",
"0.6063423",
"0.57705635",
"0.53065544",
"0.5199533",
"0.51919234",
"0.51606506",
"0.5131439",
"0.5114998",
"0.5106586",
"0.5006485",
"0.49765608",
"0.49460092",
"0.4878289",
"0.48778144",
"0.48677453",
"0.48314908",
"0.48107162",
"0.4794123",
"0.4792132",
"0.47909006",
"0.47810093",
"0.4751283",
"0.4747598",
"0.4745936",
"0.4721115",
"0.47185758",
"0.47140402",
"0.47072896",
"0.47055006"
]
| 0.82842046 | 0 |
Invalid module names raise Exceptions. | def test_invalid_module_names(self):
self.assertRaises(ValueError, Module, '')
self.assertRaises(ValueError, Module, 'names-with-dashes')
self.assertRaises(ValueError, Module, 'names with spaces')
self.assertRaises(ValueError, Module, 'names.with,punctuations!')
self.assertRaises(ValueError, Module, '4names_starting_with_numbers')
self.assertRaises(ValueError, Module, 'names.with.reserved.keywords') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_normal_module_name(assert_errors, filename, default_options):\n visitor = WrongModuleNameVisitor(default_options, filename=filename)\n visitor.run()\n\n assert_errors(visitor, [])",
"def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule",
"def test_broken_module(self):\r\n module = self.descriptor._xmodule\r\n self.assertIsInstance(module, ErrorModule)",
"def test_import_string_missing_module(self):\n invalid_module = 'ttgn.nonexistent_module.foobar'\n with pytest.raises(ImportError):\n utils.import_string(invalid_module)",
"def test_call_external_invalid_module(self):\n string = 'GET /v1/CALL_EXTERNAL|asdfasdfasdf:asdfasdfasdf:[]|'\n self.assertRaises(ImportError, parser.call_external_functions, string)",
"def test_module_exceptions(self):\n\n # these modules have normal types...\n normal_types = ['sys', 'clr', 'builtins', 'winreg', 'mmap', 'nt', 'posix', '_thread']\n builtins = [x for x in sys.builtin_module_names if x not in normal_types]\n for module in builtins:\n mod = __import__(module)\n\n for attrName in dir(mod):\n val = getattr(mod, attrName)\n if isinstance(val, type) and issubclass(val, Exception):\n self.assertTrue(repr(val).startswith(\"<class \"))\n if val in (BlockingIOError, OSError): continue\n val.x = 2\n self.assertEqual(val.x, 2)",
"def test_module_names(filename, error, default_options):\n Checker.parse_options(default_options)\n checker = Checker(tree=ast.parse(''), file_tokens=[], filename=filename)\n _line, _col, error_text, _type = next(checker.run())\n\n assert int(error_text[3:6]) == error.code",
"def test_import_string_missing_class_or_attribute(self):\n valid_module = 'ttgn.pokedex'\n invalid_class = 'NonexistentClass'\n with pytest.raises(ImportError) as error:\n utils.import_string('{}.{}'.format(valid_module, invalid_class))\n assert 'Module {} has no class or attribute {}'.format(\n valid_module, invalid_class) == str(error.value)",
"def _check_tokens_are_valid(format_string, message):\n named_tokens = re.findall(r\"{(\\w*)}\", format_string)\n invalid_tokens = [x for x in named_tokens if x.lower() not in _valid_tokens]\n if invalid_tokens:\n msg = message\n msg += \" [{0}]. \".format(\", \".join(invalid_tokens))\n msg += 'Did you check your \"modules.yaml\" configuration?'\n raise RuntimeError(msg)",
"def test_import_error_message_maintained(self):\n settings = GrpcSettings({\n 'INTERCEPTORS': [\n ('tests.invalid_module.InvalidClassName', {})\n ]\n })\n with self.assertRaises(ImportError):\n _ = settings.INTERCEPTORS",
"def test_bad_names(self):\n self.do_test_bad_name('', 'tmp/frog')\n self.do_test_bad_name('.b', 'tmp/frog')\n self.do_test_bad_name('a b', 'tmp/frog') # FAILS\n self.do_test_bad_name('a-b', 'tmp/frog') # FAILS",
"def test_create_module_invalid(self):\n payload = {'name': ''}\n res = self.client.post(MODULES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_check_non_existing_module(self) -> None:\n with self.assertRaises(ClientErrorException):\n check_module(\"non_existing_module\")",
"def test_broken_error_descriptor(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule",
"def testFour(self):\n with self.assertRaises(ValueError):\n resolveNames(\"qconfname_CH_1.QCONF\", (\"_CH_2\", \"_CH_3\", \"_CH_1_snakemask\"))",
"def raiseNameError(text):\n pattern = re.compile(\"[a-zA-Z]\")\n if not pattern.match(text):\n raise Exception(\"Invalid Name Entered\")",
"def test_valid_python_raise_exception(self):\n \n data_file = testutils.DataFile(\"integration_module_valid_raise\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_raise --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_raise test\")\n\n assert(rtn.return_code == 246)",
"def testNoSuchModule(self):\n self.assertRaises(messages.DefinitionNotFoundError,\n messages.find_definition,\n 'does.not.exist',\n importer=self.Importer)",
"def is_valid_package_module_name(name):\n if \".\" in name:\n for part in name.split(\".\"):\n if not is_valid_package_module_name(part):\n return False\n elif len(name):\n if name[0] not in _first_letter_for_valid_name:\n return False\n\n if len(set(name).difference(_char_set_for_valid_name)):\n return False\n else:\n return False\n return True",
"def test_import_string_invalid_path(self):\n invalid_path = 'some invalid module path'\n with pytest.raises(ImportError) as error:\n utils.import_string(invalid_path)\n assert '{} doesn\\'t look like a module path'.format(\n invalid_path) == str(error.value)",
"def validateName(name):\n if not name:\n # This happens when the name is an existing directory\n raise BadCommand('Please give the name of a layer.')\n # 'setup' is a valid controller name, but when paster controller is ran\n # from the root directory of a project, importing setup will import the\n # project's setup.py causing a sys.exit(). Blame relative imports\n if name != 'setup' and can_import(name):\n raise BadCommand(\n \"\\n\\nA module named '%s' is already present in your \"\n \"PYTHON_PATH.\\nChoosing a conflicting name will likely cause \"\n \"import problems in\\nyour controller at some point. It's \"\n \"suggested that you choose an\\nalternate name, and if you'd \"\n \"like that name to be accessible as\\n'%s', add a route \"\n \"to your projects config/routing.py file similar\\nto:\\n\"\n \" map.connect('%s', controller='my_%s')\" \\\n % (name, name, name, name))\n return True",
"def test_import_fails_with_no_modules(self):\n with self.assertRaises(ValueError):\n LazyImportTester([])",
"def test_deprecated_modules(self):\n\n deprecated_modules_present = False\n\n deprecated_modules = [\n \"game_assets\",\n \"models\",\n \"world\",\n \"modular_assets\",\n ]\n\n for path in self.application_files:\n for module in deprecated_modules:\n module_text = open(path).read()\n found_reference = False\n if \"import %s\" % module in module_text:\n found_reference = True\n if \"from %s\" % module in module_text:\n found_reference = True\n\n if found_reference:\n print(\"Found '%s' reference in %s\" % (module, path))\n deprecated_modules_present = True\n\n self.assertFalse(deprecated_modules_present)",
"def _check_name(self):\n\t\tpass",
"def test_handling_importing_a_bugged_module(enaml_module):\n name, _, path = enaml_module\n with open(path, 'a') as f:\n f.write('\\nraise RuntimeError()')\n\n assert name not in sys.modules\n with imports():\n with pytest.raises(RuntimeError):\n importlib.import_module(name)\n\n assert name not in sys.modules",
"def test_check_soft_dependencies_raises_error():\n with pytest.raises(ModuleNotFoundError, match=r\".* soft dependency .*\"):\n _check_soft_dependencies(\"unavailable_module\")\n\n with pytest.raises(ModuleNotFoundError, match=r\".* soft dependency .*\"):\n _check_soft_dependencies(\"unavailable_module_1\", \"unavailable_module_2\")",
"def testModuleName(self):\n prop = recipe_api.Property(param_name='foo')\n prop.bind('$fake_repo/fake_module', MODULE_PROPERTY,\n 'fake_repo::fake_module')\n\n with self.assertRaises(ValueError):\n prop.bind('$fake_repo/wrong_module', MODULE_PROPERTY,\n 'fake_repo::fake_module')\n\n with self.assertRaises(ValueError):\n prop.bind('$fake_repo/fake_module', RECIPE_PROPERTY,\n 'fake_repo::fake_module:example')",
"def get_exceptions_module():\n return exceptions",
"def sanitize_module_name(module_name):\n module_name = module_name.replace('-', '_').replace('.', '_')\n if module_name[0] not in string.ascii_letters:\n module_name = \"a\" + module_name\n return module_name",
"def check_imported_name(self, name, field, sourcepath):\n if len(name) > 80:\n raise NameFormatError(\"ERROR: %s: %s name too long: %s\"\n % (sourcepath, field, name))"
]
| [
"0.70322967",
"0.68136823",
"0.657802",
"0.6511772",
"0.64713454",
"0.64292055",
"0.6387931",
"0.6375257",
"0.6323564",
"0.6284643",
"0.62667674",
"0.6255107",
"0.61453974",
"0.6095324",
"0.6056169",
"0.6034116",
"0.60115004",
"0.60109293",
"0.598281",
"0.59621185",
"0.5941867",
"0.59282213",
"0.58934975",
"0.5869986",
"0.58474535",
"0.58409166",
"0.5788381",
"0.57744056",
"0.57478714",
"0.57346356"
]
| 0.8091457 | 0 |
Modules can be initialized with strings. | def test_module_initialization(self):
m = Module('foo')
assert str(m) == 'foo'
m = Module('foo.bar')
assert str(m) == 'foo.bar'
m = Module('foo.bar.qux')
assert str(m) == 'foo.bar.qux' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modules():",
"def __init__(self):\n self.modules = {}",
"def __init__(self, name):\r\n super(Module, self).__init__()\r\n self.name = name",
"def __init__(self, name):\n super(Module, self).__init__()\n self.name = name",
"def setModule(name, module):",
"def __init_module(self, module_type: str) -> Module:\n\n module = {\n \"button\": Button,\n \"dial\": Dial,\n \"display\": Display,\n \"env\": Env,\n \"gyro\": Gyro,\n \"ir\": Ir,\n \"led\": Led,\n \"mic\": Mic,\n \"motor\": Motor,\n \"speaker\": Speaker,\n \"ultrasonic\": Ultrasonic,\n }.get(module_type)\n return module",
"def MODULES(self):\n pass",
"def _try_set_module(self, module: str):\n loaded_module = next((x for x in self.modules if x.__name__ == module), None)\n if loaded_module is not None:\n self.current_module = module\n self._real_module = loaded_module\n print(\n asciistuff.Cowsay(\n \"Master of puppets is pulling the strings: using \"\n + self.current_module\n )\n )\n else:\n print(\"No module named {} available\".format(module))",
"def __init__(self, name, dynloadmodule = None):\n\n\t\tif dynloadmodule is not None:\n\t\t\tobject.__setattr__(self, 'module', dynloadmodule)\n\t\t\tobject.__setattr__(self, 'name', name)\n\t\telse:\n\t\t\tindex = name.rfind('.')\n\t\t\tif index == -1:\n\t\t\t\traise ValueError('Need a module attribut as NAME.')\n\n\t\t\tmodulename = name[:index]\n\t\t\tattributename = name[index+1:]\n\n\t\t\tdynloadmodule = Dynload(modulename)\n\t\t\tobject.__setattr__(self, 'module', dynloadmodule)\n\t\t\tobject.__setattr__(self, 'name', attributename)",
"def __init__(self, groups: Iterable[List[str]]) -> None:\n self.module_map = {} # type: Dict[str, str]\n for names in groups:\n self.module_map.update(make_module_translation_map(names))\n self.translations = {} # type: Dict[Tuple[str, str], str]\n self.used_names = set() # type: Set[str]",
"def _import_string(module_name, content):\n\n # assign module a name that's not likely to conflict\n safe_name = 'confab.data.' + module_name\n\n # check if module is already loaded\n existing = sys.modules.get(safe_name)\n if existing:\n return existing\n\n # try to load module\n module = imp.new_module(safe_name)\n exec content in module.__dict__\n return module",
"def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)",
"def __init__(self, module):\n om.ExplicitComponent.__init__(self)\n self.module_name = module.name",
"def __init__(self, module):\n om.ExplicitComponent.__init__(self)\n self.module_name = module.name",
"def initialize(self, module_name):\n # Load.\n self._initialize(module_name)",
"def FromModules (cls, modules = None):\n\n tomb = cls ()\n if modules:\n for module in modules:\n tomb.Add (module)\n else:\n tomb.Add (__package__ or __name__.partition ('.') [0])\n return tomb",
"def __init__(self, name=None, module_name=None, **kwargs):\n self.name = name\n self.module_name = module_name\n self.kwargs = kwargs",
"def __init__(self, name, loader):\n _, packagename = dotpath_split(name)\n super(ModuleSpec, self).__init__(name, loader,\n origin=packagename)",
"def test_module(self):\n pass",
"def on_modules_command(sender, command, label, args):\n plugin_header(sender, \"Modules\")\n msg(sender, \", \".join([((\"&a\" if mod in shared[\"modules\"] else \"&c\") + mod) for mod in shared[\"load_modules\"]]))",
"def __init__(self, moduleName, moduleType, userCreatedName):\n\n # set class variables\n self.modName = moduleName\n self.moduleType = moduleType\n self.rootMod = None\n self.name = userCreatedName\n self.originalName = userCreatedName\n self.outlinerControls = []\n self.controlTypes = []\n\n # get the directory path of the tools\n settings = QtCore.QSettings(\"Epic Games\", \"ARTv2\")\n self.toolsPath = settings.value(\"toolsPath\")\n self.iconsPath = settings.value(\"iconPath\")",
"def __init__(self, paths=sys.path):\n self.paths = paths\n self.module = PythonModuleList()\n for path in paths:\n self.module.addModule( PythonModuleOnDisk(path) )\n\n self.moduleResolver = SimpleModuleResolver(self.module)\n\n self.needed_modules = {}",
"def __init__(self,MODULE_NAME='auto_amqp',config=None):\n newConfig = { MODULE_NAME : DEFAULT_CONFIG}\n Configurable.__init__(self,newConfig)\n self.load_conf(config)\n self.MODULE_NAME = MODULE_NAME",
"def set_module(obj, mod):\n if not isinstance(mod, str):\n raise TypeError(\"The mod argument should be a string\")\n obj.__module__ = mod",
"def test_invalid_module_names(self):\n self.assertRaises(ValueError, Module, '')\n self.assertRaises(ValueError, Module, 'names-with-dashes')\n self.assertRaises(ValueError, Module, 'names with spaces')\n self.assertRaises(ValueError, Module, 'names.with,punctuations!')\n self.assertRaises(ValueError, Module, '4names_starting_with_numbers')\n self.assertRaises(ValueError, Module, 'names.with.reserved.keywords')",
"def _rai_module(self) -> str:\n module = [\"--loadmodule\", CONFIG.redisai]\n if self.queue_threads:\n module.append(f\"THREADS_PER_QUEUE {self.queue_threads}\")\n if self.inter_threads:\n module.append(f\"INTER_OP_PARALLELISM {self.inter_threads}\")\n if self.intra_threads:\n module.append(f\"INTRA_OP_PARALLELISM {self.intra_threads}\")\n return \" \".join(module)",
"def __init__(self, name):\n\n\t\tobject.__setattr__(self, 'name', name)\n\t\tobject.__setattr__(self, 'module', None)\n\n\t\tobject.__setattr__(self, 'ready', True) # value doesn't matter",
"def __init__(self, verbose):\n self.modules = maus_cpp.globals.get_monte_carlo_mice_modules()\n self.verbose = verbose",
"def _SetRequiredCoreModules(self, textEdit, frame, tab, controls):\n self.text_editor = self.system_modules[textEdit]\n self.mf = self.system_modules[frame]\n self.mt = self.system_modules[tab]\n self.mc = self.system_modules[controls]",
"def set_modules(self, modules=[]):\n m = ldb.Message()\n m.dn = ldb.Dn(self.ldb, \"@MODULES\")\n m[\"@LIST\"] = \",\".join(modules)\n self.ldb.add(m)\n self.ldb = samba.Ldb(self.filename)"
]
| [
"0.67689395",
"0.6621376",
"0.656986",
"0.6532882",
"0.6476829",
"0.6445392",
"0.6406558",
"0.636682",
"0.6138554",
"0.61111265",
"0.61097723",
"0.6005294",
"0.5972358",
"0.5972358",
"0.5923163",
"0.58846354",
"0.58816534",
"0.5881171",
"0.58698267",
"0.5860582",
"0.58604735",
"0.5845561",
"0.58359414",
"0.5782058",
"0.57749665",
"0.57709545",
"0.5762971",
"0.5724561",
"0.57131106",
"0.5709577"
]
| 0.73997223 | 0 |
Deploy the contents of `BUILD_DIR` to Netlify, using `NETLIFY_SITE_ID` and `NETLIFY_API_TOKEN` if available. | def deploy(self):
netlify_cli = getattr(settings, "NETLIFY_PATH", None)
if not netlify_cli:
raise CommandError("NETLIFY_PATH is not defined in settings")
deployment = Deployment()
deployment.save()
command = [netlify_cli, "deploy"]
command.append("--dir={}".format(settings.BUILD_DIR))
command.append("--prod")
command.append('--message="Wagtail Deployment #{}"'.format(deployment.pk))
site_id = getattr(settings, "NETLIFY_SITE_ID", None)
if site_id:
command.append("--site={}".format(site_id))
auth_token = getattr(settings, "NETLIFY_API_TOKEN", None)
if auth_token:
command.append("--auth={}".format(auth_token))
subprocess.call(command) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy():\n build()\n collect()\n commit()\n push()",
"def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')",
"def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )",
"def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()",
"def deploy():\n build()\n copy()\n install()",
"def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()",
"def build():\n local('python manage.py build \\\n --skip-static --settings={{ project_name }}.settings.production')\n\n # hack to move whole directory over to build\n local('cd {} && mv static/* build/'.format(settings.BASE_DIR))",
"def deploy():\n with cd(env.REMOTE_CODEBASE_PATH):\n run(\"git pull\")\n run(\"go build -o app\")\n sudo(\"supervisorctl reload\")",
"def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())",
"def deploy():\n upload_static()\n compile_code()\n upload_code()\n upload_supervisor()\n start_server()",
"def deploy():\n git_pull()\n# build_virtualenv()\n# collectstatic()\n migrate()\n# reload_gunicorn()\n# restart_celery()\n puts(green(\"Deployment done!\"))",
"def deploy():\n remote_dir = os.path.abspath(os.path.join(REMOTE_BASE_DIR, REPO_NAME))\n \n with settings(warn_only=True):\n if run(\"test -d %s\" % (remote_dir)).failed:\n puts(red(\"[Repo %s does not exist on remote at: %s]\" % (REPO_NAME, remote_dir)))\n with cd(REMOTE_BASE_DIR):\n run(\"git clone %s %s\" % (REPO_URL, REPO_NAME))\n\n puts(yellow(\"[Write logs]\"))\n run(\"echo '-----------------------------' > %s\" % REMOTE_ERR_FILE)\n run(\"echo `date` >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' > %s\" % REMOTE_LOG_FILE)\n run(\"echo `date` >> %s\" % REMOTE_LOG_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_LOG_FILE)\n\n puts(yellow(\"[Update repo: %s]\" % REPO_NAME))\n with cd(remote_dir):\n run(\"git pull origin master >> %s 2>> %s\" %\n (REMOTE_LOG_FILE, REMOTE_ERR_FILE))\n\n # reminder new static files\n puts(yellow('Do not forget to run collect staticfiles on DJANGO server.'))",
"def build(ctx):\n if 'cicd' in run('hostname').stdout.strip():\n # Check if we are executing the task from an aws instance\n if requests.get('http://169.254.169.254/latest/meta-data/').status_code == 200:\n git_ref_source = os.environ.get('GIT_SOURCE_BRANCH')\n git_ref_target = os.environ.get('GIT_TARGET_BRANCH')\n run('git fetch --all')\n run('git checkout {}'.format(git_ref_target))\n\n \n tar_name = \"Frontend\"\n #'wordpress-{}-en_CA.tar.gz'.format(WORDPRESS_VERSION)\n #tar_file = open(tar_name, 'wb')\n #tar_file.write(wp_tar.content)\n #tar_file.close()\n\n #run('tar -xzf {}'.format(tar_name))\n \n # Download the postmedia source-code and patches/config\n #clone(git_ref_target, git_ref_source)\n\n # merge (if applicable) and create the release\n if git_ref_source:\n git_pr_id = os.getenv('GIT_PR_ID')\n github_util.put('repos/{}/{}/pulls/{}/merge'.format(GIT_ORG, GIT_REPO, git_pr_id), params={'merge_method': 'squash'})\n version = github_util.get_next_rc()\n github_util.set_release(target_commitish='master', tag=version, prerelease=True)\n build_type = 'release candidate'\n else:\n version = github_util.get_next_hf()\n github_util.set_release(git_ref_target, version)\n build_type = 'hotfix'\n\n # package and upload to S3\n author = os.environ.get('GIT_AUTHOR')\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n tarball = package(notes, version)\n print(\"No upload to S3\")\n #upload(tarball, S3_BUCKET_STAGE)\n else:\n author = input('please enter your name for the release notes: ')\n\n valid_snapshot_name = False\n while not valid_snapshot_name:\n snapshot_name = input('please enter a name for your snapshot: ')\n snapshot_name = snapshot_name.lower()\n snapshot_name = re.sub('-', '_', snapshot_name)\n\n # domain sections cannot be longer than 63 characters, so snapshot\n # name cannot be longer than 26 (63 minus snapshot-20190128-1713-homesanddesign - 37)\n if (len(snapshot_name) <= 26):\n valid_snapshot_name = True\n else:\n print(\"{} is too long. Please enter a new snapshot name of 28 characters or less.\".format(snapshot_name))\n\n build_type = 'snapshot'\n \n version = '{}_{}_{}'.format(build_type, snapshot_name,\n datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"))\n print(\"Building snapshot {}\".format(version))\n git_ref_target = 'master'\n git_ref_source = 'HEAD'\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n os.chdir('/opt/')\n if os.path.exists(WORK_DIR):\n os.system('rm -rf {}'.format(WORK_DIR))\n os.mkdir(WORK_DIR)\n tarball = package(notes, version)\n print (\"No upload to S3\")\n #upload(tarball, S3_BUCKET_DEV)",
"def deploy(env_type):\n render(env_type)\n\n bucket = _config['deploy'][env_type]['bucket']\n notice('deploying to %s' % bucket)\n\n # Sync to S3\n deploy_path = join(_config['project_path'], 'build', 'website')\n _s3cmd_sync(deploy_path, bucket)",
"def deploy():\n _git_pull()\n _migrate()\n _collect_static_files()\n _restart_webserver()",
"def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')",
"def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()",
"def deploy_django_project(self):\n\n if self.no_files:\n return\n\n local_dir = \"{0}\".format(self.app_dir)\n app_dir = \"{0}\".format(self.app_remote_dir)\n\n if not exists(app_dir):\n mkdir(app_dir)\n\n zip_name = make_zip(local_dir, self.app_name)\n put(zip_name, self.app_remote_dir)\n\n with cd(self.app_remote_dir):\n run(\"unzip -o {0}\".format(zip_name))\n\n os.remove(zip_name)",
"def deploy_installer(l_dir=env.local_directory):\n env.local_directory = l_dir\n deploy_app(host_=env.myhost)",
"def deploy():\n setup()\n builddir = get_build_dir()\n if sys.platform == 'win32':\n # Support cygwin rsync on windows:\n build_path = cygpath(slashed(builddir))\n else:\n build_path = slashed(builddir)\n rsync_project(env.admin_webroot, build_path, exclude=\".*\", delete=True)\n sudo(\"chmod -R 755 %(admin_webroot)s\" % env)",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def deploy():\n return do_deploy(do_pack())",
"def deploy(n = 10):\n upload_current_release()\n install_requisites()\n create_redirects()\n make_symlinks()\n symlink_current_release()\n sudo('service nginx reload')\n gc_deploys(n)",
"def build(target_dir):\n prepare_demo_site(target_dir)\n\n patch_config(\n target_dir, (\"# CREATE_FULL_ARCHIVES = False\", \"CREATE_FULL_ARCHIVES = True\")\n )\n\n with cd(target_dir):\n __main__.main([\"build\"])",
"def deploy():",
"def deploy():\n filepath = do_pack()\n if (filepath is None):\n return False\n return do_deploy(filepath)",
"def task_deploy():\n client = boto3.client(\"lambda\")\n\n def upload_build():\n if function_exists(client):\n update_lambda_function(client)\n else:\n create_lambda_function(client)\n\n return {\"actions\": [upload_build], \"file_dep\": [f\"{DIST_DIR}/build.zip\"]}",
"def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)",
"def upload():\n\n # Our credentials are only available from within the main repository and not forks.\n # We need to prevent uploads from all BUT the branches in the main repository.\n # Pull requests and master-branches of forks are not allowed to upload.\n is_pull_request = (\n (\"TRAVIS_PULL_REQUEST\" in os.environ and os.environ[\"TRAVIS_PULL_REQUEST\"] != \"false\") or\n \"APPVEYOR_PULL_REQUEST_NUMBER\" in os.environ\n )\n if is_pull_request:\n click.echo(\"Refusing to upload artifacts from a pull request!\")\n return\n\n if \"AWS_ACCESS_KEY_ID\" in os.environ:\n subprocess.check_call([\n \"aws\", \"s3\", \"cp\",\n \"--acl\", \"public-read\",\n DIST_DIR + \"/\",\n \"s3://snapshots.mitmproxy.org/{}/\".format(UPLOAD_DIR),\n \"--recursive\",\n ])\n\n upload_pypi = (\n TAG and\n \"WHEEL\" in os.environ and\n \"TWINE_USERNAME\" in os.environ and\n \"TWINE_PASSWORD\" in os.environ\n )\n if upload_pypi:\n whl = glob.glob(join(DIST_DIR, 'mitmproxy-*-py3-none-any.whl'))[0]\n click.echo(\"Uploading {} to PyPi...\".format(whl))\n subprocess.check_call([\n \"twine\",\n \"upload\",\n whl\n ])\n\n upload_docker = (\n (TAG or BRANCH == \"master\") and\n \"DOCKER\" in os.environ and\n \"DOCKER_USERNAME\" in os.environ and\n \"DOCKER_PASSWORD\" in os.environ\n )\n if upload_docker:\n docker_tag = \"dev\" if BRANCH == \"master\" else VERSION\n\n click.echo(\"Uploading Docker image to tag={}...\".format(docker_tag))\n subprocess.check_call([\n \"docker\",\n \"login\",\n \"-u\", os.environ[\"DOCKER_USERNAME\"],\n \"-p\", os.environ[\"DOCKER_PASSWORD\"],\n ])\n subprocess.check_call([\n \"docker\",\n \"push\",\n \"mitmproxy/mitmproxy:{}\".format(docker_tag),\n ])"
]
| [
"0.6667937",
"0.6642163",
"0.66397226",
"0.63277745",
"0.6307375",
"0.6095078",
"0.6087253",
"0.6028257",
"0.6025697",
"0.6015445",
"0.59656906",
"0.5894895",
"0.587419",
"0.5867786",
"0.57129294",
"0.5681408",
"0.56705046",
"0.56375337",
"0.56278193",
"0.5605738",
"0.56032896",
"0.56032896",
"0.56032896",
"0.55815077",
"0.55787843",
"0.55665845",
"0.5536336",
"0.5527947",
"0.55085397",
"0.5508104"
]
| 0.7264105 | 0 |
Add Jacchia 1977 empirical corrections to [O] and [O2]. | def _O_and_O2_correction(self, alt):
_O_and_O2_correction_fast(alt, self.Texo) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def water_correction_energies(fname, se_h2o_hof, se_h_hof, ref_h2o_ener,\n se_au=False, ref_au=True):\n check_for_keys(fname, REFEK, NATMK, SEEK)\n with h5.File(fname, 'r') as ifi:\n # This calculates the reference heat of formation\n # Note the reference is assumed to be in eH\n correction = ifi[REFEK][:] - ((ifi[NATMK][:]//3) * ref_h2o_ener)\n if ref_au:\n correction *= 627.509\n if se_au:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof) * 627.509\n else:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof)\n return correction",
"def add_folia_correction(self, doc, sentence):\n words = []\n\n # Tokenize both the original and the edited form.\n original_tokens = tokenize(self.original)\n edited_tokens = tokenize(self.edited)\n\n # If we're dealing with single words (e.g. spelling errors), create the correction directly on the word.\n if len(original_tokens) == 1 and len(edited_tokens) == 1:\n w = sentence.add(folia.Word)\n words.append(w)\n n = folia.New(doc, self.edited)\n o = folia.Original(doc, self.original)\n for i, a in enumerate(self.annotations):\n if i == 0:\n correction = w.add(folia.Correction, n, o, cls=a['unit'], generate_id_in=sentence)\n else:\n n_new = folia.New(doc, self.edited)\n o_new = folia.Original(doc, self.original)\n correction = o.add(folia.Correction, n_new, o_new, cls=a['unit'], generate_id_in=sentence)\n self.add_features(correction, a)\n # We are dealing with more than one word, or an insertion/deletion. Create word elements for each token.\n else:\n n = folia.New(doc)\n o = folia.Original(doc)\n for w in edited_tokens:\n word = n.add(folia.Word, w, generate_id_in=sentence)\n words.append(word)\n for w in original_tokens:\n o.add(folia.Word, w, generate_id_in=sentence)\n for i, a in enumerate(self.annotations):\n if i == 0:\n correction = sentence.add(folia.Correction, n, o, cls=a['unit'], generate_id_in=sentence)\n else:\n n_new = folia.New(doc)\n o_new = folia.Original(doc)\n for w in edited_tokens:\n n_new.add(folia.Word, w, generate_id_in=sentence)\n for w in original_tokens:\n o_new.add(folia.Word, w, generate_id_in=sentence)\n correction = o.add(folia.Correction, n_new, o_new, cls=a['unit'], generate_id_in=sentence)\n self.add_features(correction, a)\n\n return words",
"def exo2():",
"def apply_corrections(self, liwc_data, liwc_scores):\n print('Applying corrections..')\n if self.author == 'yarkoni' and self.version == 2001:\n liwc_data_copy = liwc_data.copy()\n liwc_data_copy['Pronoun'] = np.log(liwc_data_copy['Pronoun'] + 1)\n liwc_data_copy['Social'] = liwc_data_copy['Social'] ** (1 / 2)\n liwc_scores['Agreeableness'] = liwc_data_copy.swifter.apply(\n self.get_agreeableness, axis=1, result_type='expand'\n )\n liwc_data_copy = liwc_data.copy()\n liwc_data_copy['Social'] = liwc_data_copy['Social'] ** (1 / 2)\n liwc_scores['Extraversion'] = liwc_data_copy.swifter.apply(\n self.get_extraversion, axis=1, result_type='expand'\n )\n elif self.author == 'golbeck' and self.version == 2007:\n liwc_data_copy = liwc_data.copy()\n liwc_data_copy['Exclam'] = np.log(liwc_data_copy['Exclam'] + 1)\n liwc_data_copy['hear'] = np.log(liwc_data_copy['hear'] + 1)\n liwc_scores['Neuroticism'] = liwc_data_copy.swifter.apply(\n self.get_neuroticism, axis=1, result_type='expand'\n )\n liwc_data_copy = liwc_data.copy()\n liwc_data_copy['you'] = liwc_data_copy['you'] ** (1 / 3)\n liwc_scores['Agreeableness'] = liwc_data_copy.swifter.apply(\n self.get_agreeableness, axis=1, result_type='expand'\n )\n return liwc_scores",
"def autocorrect(self, prefix, N):\r\n def valid_edits():\r\n letters, length = \"abcdefghijklmnopqrstuvwxyz\".upper(), len(prefix)\r\n yield from (\r\n #add letter\r\n prefix[:i] + l + prefix[i:] for i in range(length+1)\r\n for l in letters\r\n )\r\n yield from (\r\n #delete letter\r\n prefix[:i] + prefix[i+1:] for i in range(length)\r\n )\r\n yield from (\r\n #replace letter\r\n prefix[:i] + l + prefix[i+1:] for i in range(length)\r\n for l in letters\r\n )\r\n yield from (\r\n #swap letters\r\n prefix[:i]+prefix[j]+prefix[i+1:j]+prefix[i]+prefix[j+1:]\r\n for i in range(length) for j in range(i+1, length)\r\n )\r\n\r\n edits = []\r\n for e in valid_edits():\r\n f = self.find(e)\r\n if f and f.frequency: edits.append((e, f.frequency))\r\n edits.sort(key=lambda x: x[1])\r\n\r\n #Ensure no duplicates by using set.\r\n l = set(self.autocomplete(prefix, N))\r\n while len(l) < N and edits: l.add(edits.pop()[0])\r\n return list(l)",
"def test_jcamp2():\n dic = ng.bruker.read_jcamp(os.path.join(DATA_DIR, \"bruker_2d\", \"acqu2s\"))\n tf = tempfile.mktemp(dir='.')\n ng.bruker.write_jcamp(dic, tf)\n ndic = ng.bruker.read_jcamp(tf)\n assert dic_similar(dic, ndic)\n os.remove(tf)",
"def SpecialCodes(self):\n if sre.search(r\"[^aeiouy]e\\b\", self.wd): # nonsyllabic final e after C\n if ((not self.isPlural or self.wd[-2] not in SIBILANTS) and\n (not self.isPast or self.wd[-2] not in 'dt')):\n self.wd = self.wd[:-1] + encode(self.wd[-1])\n if not sre.search(r\"[aeiouy]\", self.wd): # any vowel left??\n self.wd = self.wd[:-1] + 'e' # undo the encoding\n self.wd = self.CiVcomb.sub(handleCiV, self.wd)\n self.wd = self.CCpair.sub(handleCC, self.wd)\n self.wd = self.VyVcomb.sub(handleVyV, self.wd)",
"def determine_aa_change( self ):\n for k,v in self.obj_mi.hash_isoforms.iteritems(): #k = string that is isoform_id, v = Isoform instance\n obj_tt = self.create_transcript_instances( k )\n\n #METHOD 1: get the original codon & mutated codon\n # orig_codon = obj_tt.retrieve_containing_codon( self.snv_start, self.snv_strand )\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # mut_codon = obj_tt.retrieve_containing_codon( self.snv_start, self.snv_strand )\n\n\n #METHOD 2: get the mutated codon\n full_pos = self.snv_chrom + ':' + str( self.snv_start ) + '-' + str( self.snv_end )\n hash_codon_info = obj_tt.get_mutated_codon( self.base_orig, self.base_alt, full_pos, self.snv_strand, True ) #output is hash -> {'codon_orig': codon_orig, 'codon_mut': codon_mut, 'aa_orig': aa_orig, 'aa_mut': aa_mut}\n\n\n\n ##TEST:: show the AA change based on mutation\n # print \"hash_codon_info: \"\n # print hash_codon_info\n\n # print \"gene strand & snv strand: \", obj_tt.iso_sj.strand, \" & \", self.snv_strand\n # print \"original base > mutated base: \", self.base_orig, \" > \", self.base_alt\n # print \"original codon > mutated codon: \", hash_codon_info['codon_orig'], \" > \", hash_codon_info['codon_mut']\n # print \"original AA > mutated AA: \", hash_codon_info['aa_orig'], \" > \", hash_codon_info['aa_mut']\n\n\n ##TEST:: determine consequence\n print \"GV_DAAC 1: \"\n obj_tt.alteration_consequence( self.base_alt, self.get_genomic_range(), self.snv_strand, self.alt_type )\n \n\n ##TEST METHOD - SEE WHAT STEPS I NEED TO PERFORM\n #TEST:: retrieve the original base & the mutated base\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # orig_base = obj_tt.arr_nuc_seq[ i_genome_pos ]\n # print \"k = \", k, \" & i_genome_pos = \", i_genome_pos, \" | orig_base = \", orig_base, \" & double_check = \", self.base_orig, \" & iso_sj.strand = \", obj_tt.iso_sj.strand, \" & mut strand = \", self.snv_strand\n # hash_orig_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_orig = \", hash_orig_codon\n # get_orig_codon = obj_tt.arr_nuc_seq[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] + 1 ]\n # str_orig_codon = ''.join( get_orig_codon ) if obj_tt.iso_sj.strand > 0 else ''.join( get_orig_codon[::-1] )\n # print \"seq_orig = \", str_orig_codon, \" & type = \", type( get_orig_codon ), \" & rf = \", obj_tt.arr_rf[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] + 1 ], \" & list_orig_codon = \", get_orig_codon\n\n # ##TEST:: make mutation\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # hash_mut_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_muts = \", hash_mut_codon\n # get_mut_codon = obj_tt.arr_nuc_seq[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] + 1 ]\n # str_mut_codon = ''.join( get_mut_codon ) if obj_tt.iso_sj.strand > 0 else ''.join( get_mut_codon[::-1] )\n # print \"seq_muts = \", str_mut_codon, \" & type = \", type( get_mut_codon ), \" & rf = \", obj_tt.arr_rf[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] + 1 ], \" & list_mut_codon = \", get_mut_codon \n\n # ##TEST:: retrieve \n # print \"AA: from \", Seq( str_orig_codon ).translate( to_stop = False ), \">\", Seq( str_mut_codon ).translate( to_stop = False )\n\n # try:\n # i_genome_pos = obj_tt.arr_genome_pos.index( self.snv_start )\n # orig_base = obj_tt.arr_nuc_seq[ i_genome_pos ]\n # print \"k = \", k, \" & i_genome_pos = \", i_genome_pos, \" | orig_base = \", orig_base, \" & double_check = \", self.base_orig, \" & iso_sj.strand = \", obj_tt.iso_sj.strand, \" & mut strand = \", self.snv_strand\n # hash_orig_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_orig = \", hash_orig_codon\n # get_orig_codon = obj_tt.arr_nuc_seq[ hash_orig_codon['i_genome_start']:hash_orig_codon['i_genome_end'] ]\n # print \"seq_orig = \", get_orig_codon\n\n # ##TEST:: make mutation\n # obj_tt.arr_nuc_seq[ i_genome_pos ] = self.base_alt\n # hash_mut_codon = obj_tt.find_containing_codon( self.snv_start )\n # print \"hash_muts = \", hash_mut_codon\n # get_mut_codon = obj_tt.arr_nuc_seq[ hash_mut_codon['i_genome_start']:hash_mut_codon['i_genome_end'] ]\n # print \"seq_muts = \", get_mut_codon \n\n # ##TEST:: retrieve \n # print \"AA: from \", Seq( orig_codon ).translate( to_stop = False ), \">\", Seq( mut_codon ).translate( to_stop = False )\n # except:\n # print \"ERROR:: for \", k, \", position does not exist: \", self.snv_start\n # continue\n\n print \"////////////////////\\n\"",
"def exercise_b2_82():\r\n pass",
"def main():\n\n args = get_args()\n seq = args.seq.upper()\n codon_to_aa = {\n 'AAA': 'K',\n 'AAC': 'N',\n 'AAG': 'K',\n 'AAU': 'N',\n 'ACA': 'T',\n 'ACC': 'T',\n 'ACG': 'T',\n 'ACU': 'T',\n 'AGA': 'R',\n 'AGC': 'S',\n 'AGG': 'R',\n 'AGU': 'S',\n 'AUA': 'I',\n 'AUC': 'I',\n 'AUG': 'M',\n 'AUU': 'I',\n 'CAA': 'Q',\n 'CAC': 'H',\n 'CAG': 'Q',\n 'CAU': 'H',\n 'CCA': 'P',\n 'CCC': 'P',\n 'CCG': 'P',\n 'CCU': 'P',\n 'CGA': 'R',\n 'CGC': 'R',\n 'CGG': 'R',\n 'CGU': 'R',\n 'CUA': 'L',\n 'CUC': 'L',\n 'CUG': 'L',\n 'CUU': 'L',\n 'GAA': 'E',\n 'GAC': 'D',\n 'GAG': 'E',\n 'GAU': 'D',\n 'GCA': 'A',\n 'GCC': 'A',\n 'GCG': 'A',\n 'GCU': 'A',\n 'GGA': 'G',\n 'GGC': 'G',\n 'GGG': 'G',\n 'GGU': 'G',\n 'GUA': 'V',\n 'GUC': 'V',\n 'GUG': 'V',\n 'GUU': 'V',\n 'UAA': 'Stop',\n 'UAC': 'Y',\n 'UAG': 'Stop',\n 'UAU': 'Y',\n 'UCA': 'S',\n 'UCC': 'S',\n 'UCG': 'S',\n 'UCU': 'S',\n 'UGA': 'Stop',\n 'UGC': 'C',\n 'UGG': 'W',\n 'UGU': 'C',\n 'UUA': 'L',\n 'UUC': 'F',\n 'UUG': 'L',\n 'UUU': 'F',\n }\n\n k = 3\n\n # 1: for loop\n # protein = ''\n # for codon in [seq[i:i + k] for i in range(0, len(seq), k)]:\n # aa = codon_to_aa.get(codon, '-')\n # if aa == 'Stop':\n # break\n # protein += aa\n\n # 2: list comprehension, slice to remove Stop\n # codons = [seq[i:i + k] for i in range(0, len(seq), k)]\n # aa = [codon_to_aa.get(codon, '-') for codon in codons]\n # if 'Stop' in aa:\n # aa = aa[:aa.index('Stop')]\n # print(''.join(aa))\n\n # 3: L.C. -> map(), slice -> takewhile\n # codons = map(lambda i: seq[i:i + k], range(0, len(seq), k))\n # aa = map(lambda codon: codon_to_aa.get(codon, '-'), codons)\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 4: combine map()\n # aa = map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k)))\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 5: combine all\n # print(''.join(\n # takewhile(\n # lambda c: c != 'Stop',\n # map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k))))))\n\n # 6: Seq\n print(str(Seq(args.seq).translate()).replace('*', ''))",
"def o2sat(s, pt):\n\n t = sw.T68conv(pt) + Kelvin\n # Eqn (4) of Weiss 1970 (the constants are used for units of ml O2/kg).\n a = (-177.7888, 255.5907, 146.4813, -22.2040)\n b = (-0.037362, 0.016504, -0.0020564)\n lnC = (a[0] + a[1] * (100. / t) + a[2] * np.log(t / 100.) + a[3] *\n (t / 100.) +\n s * (b[0] + b[1] * (t / 100.) + b[2] * (t / 100.) ** 2))\n osat = np.exp(lnC) * 1000. / 22.392 # Convert from ml/kg to um/kg.\n\n \"\"\"The Apparent Oxygen Utilization (AOU) value was obtained by subtracting\n the measured value from the saturation value computed at the potential\n temperature of water and 1 atm total pressure using the following\n expression based on the data of Murray and Riley (1969):\n\n ln(O2 in µmol/kg) = - 173.9894 + 255.5907(100/TK) + 146.4813 ln(TK/100) -\n 22.2040(TK/100) + Sal [-0.037362 + 0.016504(TK/100) - 0.0020564(TK/100)2],\n where TK is temperature in °K and Sal in the Practical Salinity (SP) scale.\n \"\"\"\n return osat",
"def translateORFtoAAs(self,sequence,number):\r\n AAStringfromORF = str()\r\n startingM = int()\r\n for i in range(0,len(sequence)-2,3):\r\n if sequence[i:i+3] != \"AUG\":\r\n pass\r\n else:\r\n startingM = i\r\n for i in range(startingM,len(sequence)-2,3):\r\n x = self.tabletoTranslate(sequence[i:i+3])\r\n AAStringfromORF+=x\r\n if x == \"-\":\r\n self.listofSequences.append(AAStringfromORF.rstrip(\"-\").lstrip().rstrip())\r\n AAStringfromORF = str()\r\n break",
"def _calculate_correction(self, telid):",
"def y_o_question(analysis):\n\n #init\n phrase = []\n\n #Recovering the subject\n subject = element_rebuilding.nom_struc_rebuilding(analysis.sn)\n\n if analysis.sv:\n #Recovering the end of the sentence\n phrase = element_rebuilding.end_question_rebuilding(phrase, analysis.sv, analysis.sn, analysis.aim)\n\n #We need special processing to find the position of the subject\n if analysis.sv[0].state == VerbalGroup.negative:\n phrase = phrase[0:2] + subject + phrase[2:]\n else:\n phrase = [phrase[0]] + subject + phrase[1:]\n\n #Recovering subsentences\n for s in analysis.sv[0].vrb_sub_sentence:\n phrase = phrase + sub_process(s)\n else:\n phrase = subject\n\n #Eliminate redundancies if there are\n phrase = other_functions.eliminate_redundancy(phrase)\n\n #If it is a question about the origin\n if analysis.aim == 'origin':\n return phrase + ['from'] + ['?']\n\n return phrase + ['?']",
"def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD",
"def codon_merge(codon):\r\n first = [];second = [];third = []\r\n f = 0;s = 0;t = 0;order = 0;\r\n for elem in codon:\r\n if elem[1] == 1:\r\n first.append(elem[0])\r\n elif elem[1] == 2:\r\n second.append(elem[0])\r\n elif elem[1] == 3:\r\n third.append(elem[0])\r\n else:\r\n print \"codon_merge error\"\r\n #f\r\n if first[0] == 'r': f = 1 #ㄱ\r\n elif first[0] == 's': f = 3 #ㄴ\r\n elif first[0] == 'e': f = 4 #ㄷ\r\n elif first[0] == 'f': f = 6 #ㄹ\r\n elif first[0] == 'a': f = 7 #ㅁ\r\n elif first[0] == 'q': f = 8 #ㅂ\r\n elif first[0] == 't': f = 10 #ㅅ\r\n elif first[0] == 'd': f = 12 #ㅇ\r\n elif first[0] == 'w': f = 13 #ㅈ\r\n elif first[0] == 'c': f = 15 #ㅊ\r\n elif first[0] == 'z': f = 16 #ㅋ\r\n elif first[0] == 'x': f = 17 #ㅌ\r\n elif first[0] == 'v': f = 18 #ㅍ\r\n elif first[0] == 'g': f = 19 #ㅎ\r\n elif first[0] == 'R': f = 2 #ㄲ\r\n elif first[0] == 'T': f = 11 #ㅆ\r\n elif first[0] == 'E': f = 5 #ㄸ\r\n elif first[0] == 'Q': f = 9 #ㅃ\r\n elif first[0] == 'W': f = 14 #ㅉ\r\n #s\r\n if len(second) == 1:\r\n if second[0] == 'k': s = 1 #ㅏ\r\n elif second[0] == 'i': s = 3 #ㅑ\r\n elif second[0] == 'j': s = 5 #ㅓ\r\n elif second[0] == 'u': s = 7 #ㅕ\r\n elif second[0] == 'h': s = 9 #ㅗ\r\n elif second[0] == 'y': s = 13 #ㅛ\r\n elif second[0] == 'n': s = 14 #ㅜ\r\n elif second[0] == 'b': s = 18 #ㅠ\r\n elif second[0] == 'm': s = 19 #ㅡ\r\n elif second[0] == 'l': s = 21 #ㅣ\r\n elif second[0] == 'o': s = 2 #ㅐ\r\n elif second[0] == 'O': s = 4 #ㅒ\r\n elif second[0] == 'p': s = 6 #ㅔ\r\n elif second[0] == 'P': s = 8 #ㅖ\r\n elif len(second) == 2:\r\n if second[0] == 'h' and second[1] == 'k': s = 10 #ㅘ\r\n elif second[0] == 'h' and second[1] == 'o': s = 11 #ㅙ\r\n elif second[0] == 'h' and second[1] == 'l': s = 12 #ㅚ\r\n elif second[0] == 'n' and second[1] == 'j': s = 15 #ㅝ\r\n elif second[0] == 'n' and second[1] == 'p': s = 16 #ㅞ\r\n elif second[0] == 'n' and second[1] == 'l': s = 17 #ㅟ\r\n elif second[0] == 'm' and second[1] == 'l': s = 20 #ㅢ\r\n #t\r\n if len(third) == 0:\r\n t = 1\r\n elif len(third) == 1:\r\n if third[0] == 'r': t = 2 #ㄱ\r\n elif third[0] == 's': t = 5 #ㄴ\r\n elif third[0] == 'e': t = 8 #ㄷ\r\n elif third[0] == 'f': t = 9 #ㄹ\r\n elif third[0] == 'a': t = 17 #ㅁ\r\n elif third[0] == 'q': t = 18 #ㅂ\r\n elif third[0] == 't': t = 20 #ㅅ\r\n elif third[0] == 'd': t = 22 #ㅇ\r\n elif third[0] == 'w': t = 23 #ㅈ\r\n elif third[0] == 'c': t = 24 #ㅊ\r\n elif third[0] == 'z': t = 25 #ㅋ\r\n elif third[0] == 'x': t = 26 #ㅌ\r\n elif third[0] == 'v': t = 27 #ㅍ\r\n elif third[0] == 'g': t = 28 #ㅎ\r\n elif len(third) == 2:\r\n if third[0] == 'r' and third[1] == 'r': t = 3 #ㄲ\r\n elif third[0] == 'r' and third[1] == 't': t = 4 #ㄳ\r\n elif third[0] == 's' and third[1] == 'w': t = 6 #ㄵ\r\n elif third[0] == 's' and third[1] == 'g': t = 7 #ㄶ\r\n elif third[0] == 'f' and third[1] == 'r': t = 10 #ㄺ\r\n elif third[0] == 'f' and third[1] == 'a': t = 11 #ㄻ\r\n elif third[0] == 'f' and third[1] == 'q': t = 12 #ㄼ\r\n elif third[0] == 'f' and third[1] == 't': t = 13 #ㄽ\r\n elif third[0] == 'f' and third[1] == 'x': t = 14 #ㄾ\r\n elif third[0] == 'f' and third[1] == 'v': t = 15 #ㄿ\r\n elif third[0] == 'f' and third[1] == 'g': t = 16 #ㅀ\r\n elif third[0] == 'q' and third[1] == 't': t = 19 #ㅄ\r\n elif third[0] == 't' and third[1] == 't': t = 21 #ㅆ\r\n\r\n if f >= 1 and s == 0:\r\n if first[0] == 'r': f = 1 #ㄱ\r\n elif first[0] == 's': f = 4 #ㄴ\r\n elif first[0] == 'e': f = 7 #ㄷ\r\n elif first[0] == 'f': f = 9 #ㄹ\r\n elif first[0] == 'a': f = 17 #ㅁ\r\n elif first[0] == 'q': f = 18 #ㅂ\r\n elif first[0] == 't': f = 21 #ㅅ\r\n elif first[0] == 'd': f = 23 #ㅇ\r\n elif first[0] == 'w': f = 24 #ㅈ\r\n elif first[0] == 'c': f = 26 #ㅊ\r\n elif first[0] == 'z': f = 27 #ㅋ\r\n elif first[0] == 'x': f = 28 #ㅌ\r\n elif first[0] == 'v': f = 29 #ㅍ\r\n elif first[0] == 'g': f = 30 #ㅎ\r\n elif first[0] == 'R': f = 2 #ㄲ\r\n elif first[0] == 'T': f = 16 #ㅆ\r\n elif first[0] == 'E': f = 6 #ㄸ\r\n elif first[0] == 'Q': f = 13 #ㅃ\r\n elif first[0] == 'W': f = 19 #ㅉ\r\n order = 12592 + f\r\n #print unichr(order)\r\n else:\r\n order = 44032 + (f-1)*21*28 + (s-1)*28 + (t-1)\r\n return unichr(order)",
"def extract_ao_integrals(mol, prefix):\n\n with open(prefix + \"_nuc.txt\", \"w\") as f:\n f.write(\"%.18e\\n\" % mol.energy_nuc())\n\n np.savetxt(prefix + \"_ovl.txt\", mol.intor(\"int1e_ovlp\"))\n\n np.savetxt(prefix + \"_oei.txt\", mol.intor(\"int1e_kin\") + mol.intor(\"int1e_nuc\"))\n\n tei = mol.intor(\"int2e\")\n tei = np.reshape(tei,(2,2,2,2))\n\n\n with open(prefix + \"_tei.txt\", \"w\") as f:\n for i in range(tei.shape[0]):\n for j in range(tei.shape[1]):\n for k in range(tei.shape[2]):\n for l in range(tei.shape[3]):\n f.write(\"%4i %4i %4i %4i %.18e\\n\" % (i, j, k, l, tei[i,j,k,l]))",
"def test_jcamp1():\n dic = ng.bruker.read_jcamp(os.path.join(DATA_DIR, \"bruker_1d\", \"acqus\"))\n assert dic['LFILTER'] == 200\n assert len(dic['PRECHAN']) == 16\n tf = tempfile.mktemp(dir='.')\n ng.bruker.write_jcamp(dic, tf)\n ndic = ng.bruker.read_jcamp(tf)\n assert dic_similar(dic, ndic)\n os.remove(tf)",
"def exercise_b2_27():\r\n pass",
"def add_possessive(results, form, poss):\n if not poss:\n return results\n\n # Add possessive suffix\n suffixes = nounspecs.possessive_suffixes[poss]\n if isinstance(suffixes, str):\n suffixes = [suffixes]\n results2 = []\n for suffix in suffixes:\n for v in results:\n parts = list(x for x in v)\n if suffix[0] != \"@\":\n for x in suffix:\n if x == \"A\":\n p = \"\".join(parts)\n m = re.search(\"([aouAOU])[^yäöYÄÖ]*$\", p)\n if m:\n parts.append(\"a\")\n else:\n parts.append(\"ä\")\n else:\n parts.append(x)\n v = \"\".join(parts)\n else:\n if form not in (\n \"ine-sg\", \"ine-pl\", \"ela-sg\", \"ela-pl\",\n \"all-sg\", \"all-pl\", \"ade-sg\", \"ade-pl\",\n \"abl-sg\", \"abl-pl\", \"tra-sg\", \"tra-pl\",\n \"ess-sg\", \"ess-pl\", \"abe-sg\", \"abe-pl\",\n \"ptv-sg\", \"ptv-pl\", \"cmt\",\n \"inf1-long\", \"inf2\", \"inf3\", \"inf4\", \"inf5\"):\n continue\n if len(v) < 2 or v[-1] not in \"aeiouyäö\":\n continue\n if v[-2] == v[-1]:\n continue\n v += v[-1]\n v += suffix[1:]\n if v:\n results2.append(v)\n return results2",
"def special_danish_accuracy(self, questions):\n ok_vocab = self.get_vocabulary()\n print(\"ok vocab\")\n #print(ok_vocab)\n new_vocab = [(w, self.model.wv.vocab[w]) for w in ok_vocab]\n print(\"not dict\")\n #new_vocab = [w.upper() for w in ok_vocab]\n #print(new_vocab)\n new_vocab = {w.upper(): v for w, v in new_vocab}\n new_vocab = dict(new_vocab)\n #print(new_vocab)\n\n\n\n\n sections, section = [], None\n wrong_predictions = []\n for line_no, line in enumerate(utils.smart_open(questions)):\n # TODO: use level3 BLAS (=evaluate multiple questions at once), for speed\n line = utils.to_unicode(line)\n if line.startswith(': '):\n # a new section starts => store the old section\n if section:\n sections.append(section)\n self.log_accuracy(section)\n section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}\n else:\n if not section:\n raise ValueError(\"missing section header before line #%i in %s\" % (line_no, questions))\n try:\n a, b, c, d, e, expected = [word.upper() for word in line.split()]\n except ValueError:\n logger.info(\"skipping invalid line #%i in %s\", line_no, questions)\n continue\n if a not in new_vocab or b not in new_vocab or c not in new_vocab or d not in new_vocab or e not in new_vocab or expected not in new_vocab:\n #print('not in vocab')\n logger.debug(\"skipping line #%i with OOV words: %s\", line_no, line.strip())\n continue\n\n original_vocab = self.get_vocabulary()\n self.set_vocabulary(new_vocab)\n ignore = {a, b, c, d, e} # input words to be ignored\n\n # find the most likely prediction, ignoring OOV words and input words\n sims = self.most_similar(positive_words=[c, d, e], negative_words=[a, b])\n #print(\"sims\")\n #print(sims)\n self.set_vocabulary(original_vocab)\n\n predicted = sims[0][0]\n predicted = predicted.upper()\n #print(predicted)\n if predicted == expected:\n section['correct'].append((a, b, c, d, e, expected))\n else:\n wrong_message = a + \" \" + b + \" \" + c + \" \" + d + \" \" + e + \", predicted: \" + predicted + \", should have been: \" + expected\n section['incorrect'].append((a, b, c, d, e, expected))\n wrong_predictions.append(wrong_message)\n if section:\n # store the last section, too\n sections.append(section)\n self.log_accuracy(section)\n\n total = {\n 'section': 'total',\n 'correct': sum((s['correct'] for s in sections), []),\n 'incorrect': sum((s['incorrect'] for s in sections), []),\n }\n self.log_accuracy(total)\n sections.append(total)\n print(wrong_predictions)\n return sections",
"def exercise_b2_70():\r\n pass",
"def add_pref_and_suff(data, w2i):\n t_aff = time()\n\n pref_set, suff_set = set(), set()\n for sentence, _ in data:\n for word in sentence:\n pref_set.add(word[:3])\n suff_set.add(word[-3:])\n # prefix\n w2i[PREF_UNK] = len(w2i)\n for pref in pref_set:\n w2i[PREF_FLAG + pref] = len(w2i)\n # suffix\n w2i[SUFF_UNK] = len(w2i)\n for suff in suff_set:\n w2i[SUFF_FLAG + suff] = len(w2i)\n\n print 'time for affixes:', time() - t_aff",
"def exercise_b2_106():\r\n pass",
"def exercise_b2_95():\r\n pass",
"def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein",
"def exercise_b2_93():\r\n pass",
"def add_prep_to_auxiliar_object(doc, aux_obj_dict):\n\n if doc[aux_obj_dict['indexes'][0]-1].pos_ == 'ADP':\n aux_obj_dict['initial_value'] = doc[aux_obj_dict['indexes'][0]-1].text+' '+aux_obj_dict['initial_value']\n aux_obj_dict['replacement_value'] = doc[aux_obj_dict['indexes'][0]-1].text+' '+aux_obj_dict['replacement_value']\n aux_obj_dict['indexes'] = (aux_obj_dict['indexes'][0] - 1, aux_obj_dict['indexes'][1])\n aux_obj_dict['prep_added'] = True\n else:\n aux_obj_dict['prep_added'] = False\n return aux_obj_dict",
"def ot2bio_ote(ote_tag_sequence):\n new_ote_sequence = []\n n_tag = len(ote_tag_sequence)\n prev_ote_tag = '$$$'\n for i in range(n_tag):\n cur_ote_tag = ote_tag_sequence[i]\n assert cur_ote_tag == 'O' or cur_ote_tag == 'T'\n if cur_ote_tag == 'O':\n new_ote_sequence.append(cur_ote_tag)\n else:\n # cur_ote_tag is T\n if prev_ote_tag == 'T':\n new_ote_sequence.append('I')\n else:\n # cur tag is at the beginning of the opinion target\n new_ote_sequence.append('B')\n prev_ote_tag = cur_ote_tag\n return new_ote_sequence",
"def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)"
]
| [
"0.5400359",
"0.537652",
"0.537189",
"0.5369304",
"0.5282336",
"0.52555615",
"0.5243372",
"0.5148587",
"0.51211125",
"0.5108739",
"0.5108302",
"0.5104019",
"0.5102798",
"0.5083969",
"0.5044422",
"0.50319",
"0.498012",
"0.49792227",
"0.49786368",
"0.49592388",
"0.4957492",
"0.49535972",
"0.4926332",
"0.49122587",
"0.49079344",
"0.48813835",
"0.48798186",
"0.48605412",
"0.4857865",
"0.48491877"
]
| 0.60554034 | 0 |
Show the power of battery | def describe_battery(self):
print(f"The power of battery is - {self.battery_size}-kWh") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def describe_battery(self):\r\n\t\tprint(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\r\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"Ten samochod ma akumulator o pojemnosci \"\n + str(self.battery_size) + \" kWh.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \"+str(self.battery_size)+\"-KWh battery.\")",
"def describe_battery(self):\n print(f\"This car has a {self.battery_size}-kWh battery.\")",
"def describe_battery(self):\n print(f\"This car has a {self.battery_size}-kWh battery.\")",
"def describe_battery(self):\n print(f\"This car has a {self.battery_size}-kWh battery.\")",
"def describe_battery(self):\n print(f\"This car has a {self.battery_size}-KWh battery.\")",
"def describe_battery(self):\n print(f'This car has a {self.battery_size}-kilowatt-hour battery.')",
"def describe_battery(self):\n print(f\"This car has a battery size of {self.battery_size}-kWh.\")",
"def describe_battery(self):\n self.battery.describe_battery()",
"def battery_plugged(self):\n self._info(\"battery_plugged\")",
"def requestBattery(self) -> None:\n self._protocol.write_line(CMD_BATTERY)",
"def power():\n request_command(tv_command=TVCommand.power)",
"def notifyMe(bat_status, bat_lvl, tofull):\n Notify.init(\"Acpi Notification\")\n \n Notify.Notification.new(\n \"Battery Low\",\n\t\"{} {}%\".format(bat_status, bat_lvl)\n ).show()",
"async def power_on(self) -> str:\n return f\"d2 lamp is {await self.hw_device.lamp('d2')}; halogen lamp is {await self.hw_device.lamp('hal')}\"",
"def battery_level(self):\n return self.battery",
"def MyPower1(self):\n print ('%s' % money + \" : \" + name + \"'s power sucks it! D:\")",
"def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")",
"def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")",
"def getBattery(self):\n raise NotImplementedError",
"def battery(self):\n return self._battery",
"def get_battery(self) -> int:\r\n return self.state['bat']",
"def state(self):\n return self.roller.battery"
]
| [
"0.764002",
"0.7595747",
"0.7550925",
"0.7534905",
"0.7534905",
"0.7534905",
"0.7534905",
"0.7534905",
"0.7534905",
"0.7509543",
"0.7422574",
"0.7422574",
"0.7422574",
"0.7403888",
"0.7403453",
"0.7190697",
"0.71666986",
"0.7127265",
"0.6908638",
"0.6851342",
"0.6814892",
"0.6748854",
"0.66721267",
"0.66528744",
"0.6610375",
"0.6610375",
"0.66035515",
"0.6537641",
"0.6510848",
"0.64621496"
]
| 0.8015619 | 0 |
Chek and set the battery size | def upgrade_battery(self):
if self.battery_size <= 75:
self.battery_size = 100 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upgrade_battery(self):\n if self.battery_size != 100:\n self.battery_size = 100",
"def upgrade_battery(self):\n if self.battery_size < 85:\n print(\"Upgrading battery size in progress.\")\n self.battery_size = 85\n else:\n print(\"Battery size is already at max.\")",
"def __init__(self, battery_size=70):\r\n\t\tself.battery_size = battery_size",
"def __init__(self, battery_size=75):\n self.battery_size = battery_size",
"def __init__(self, battery_size=75):\n self.battery_size = battery_size",
"def __init__(self, battery_size=70):\n self.battery_size = battery_size",
"def __init__(self, battery_size=70):\n self.battery_size = battery_size",
"def __init__(self, battery_size=70):\n self.battery_size = battery_size",
"def __init__(self, battery_size= 75):\n self.battery_size = battery_size",
"def __init__(self, battery_size=40):\n self.battery_size = battery_size",
"def __init__(self,battery_size=85):\n self.battery_size = battery_size",
"def __init__(self, battery_size=70):\n self.battery_size = battery_size\n self._range = 0",
"def chargeBatteries(self):\n self.currentBattery = self.maxBattery",
"def __init__(self, battery_size=75): #Note that battery_size is optional parameter if no value is provided.\n self.battery_size = battery_size",
"def describe_battery(self):\n print(\"This car has a \"+str(self.battery_size)+\"-KWh battery.\")",
"def describe_battery(self):\r\n\t\tprint(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\r\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(\"This car has a \" + str(self.battery_size) + \"-kWh battery.\")",
"def describe_battery(self):\n print(f\"This car has a battery size of {self.battery_size}-kWh.\")",
"def describe_battery(self):\n print(\"Ten samochod ma akumulator o pojemnosci \"\n + str(self.battery_size) + \" kWh.\")",
"def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")",
"def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")",
"def describe_battery(self):\n print(f\"The power of battery is - {self.battery_size}-kWh\")",
"def describe_battery(self):\n print(f\"This car has a {self.battery_size}-KWh battery.\")",
"def describe_battery(self):\n print(f\"This car has a {self.battery_size}-kWh battery.\")"
]
| [
"0.7852364",
"0.7556952",
"0.72916096",
"0.7285516",
"0.7285516",
"0.7273139",
"0.7273139",
"0.7273139",
"0.7238471",
"0.7235647",
"0.7093763",
"0.70713466",
"0.6882216",
"0.67099327",
"0.6617052",
"0.65214",
"0.64824754",
"0.6475698",
"0.6475698",
"0.6475698",
"0.6475698",
"0.6475698",
"0.6475698",
"0.6428919",
"0.6410678",
"0.6361261",
"0.6361261",
"0.6332628",
"0.62502193",
"0.6230055"
]
| 0.79119164 | 0 |
This is the class constructor. It requests the username and password. If a connection to the cloud database can be established for the username and password, then it checks whether the user has the priviledge to create the database or insert new data. This priviledge will open additional options in the main menu. The user will have three oportunities to enter their username and password. If they are unable to log on, the program will exit. | def __init__(self):
# Standard tools that allow a user to enter a password without any text
# displayed on the screen.
import getpass
# The user will have three opportunities to enter their username and password.
# If successful, a boolean variable is set to true and the loop exits.
# After three unsuccessful attempts the program will continue with the boolean
# variable set to False.
for i in range(3):
# Gets user information. For now, host is fixed.
self.Name = input('login as: ')
self.Password = getpass.getpass('Password: ')
self.Host = 'ideasdba.mariadb.database.azure.com'
# Variable to indicate what permissions the user has. If the user
# can insert data or create tables, then it is set to True and the
# user can access tools that create the database and update data.
# If the user has not been granted these permissions, then the user
# is limited to the tools for querying data and creating data models.
self.Update = False
# Variable to indicate whether the user is connected to the database
self.Connected = True
try:
Connection = mariadb.connect(
user = self.Name,
host = self.Host,
password= self.Password,
port=3306)
break
# Exception to catch the error thrown if the user is unable to connect
# to the database.
except mariadb.Error as e:
print('Attempt {}'.format(i))
print('Unable connect. Username and password combination not recognized.')
print('Please try again.')
self.Connected = False
# After the user connection is verified, check the privileges the user has
# for the database. For now, does the user have privilege to create a new
# databases? If so, the program will provide additional options that will
# allow the user to create the database and insert new data.
# Warning: With this privilege, the user can reconstruct the entire database.
# Use create option with caution.
if self.Connected:
AccessQuery = Connection.cursor()
AccessQuery.execute('USE mysql')
AccessQuery.execute("SELECT Create_priv FROM user WHERE User = '{}'".format(self.Name))
AccessList = [i for sub in AccessQuery for i in sub]
if (AccessList[0] == 'Y'):
self.Update = True
AccessQuery.close()
Connection.close()
# If the user is unable to connect, then the program warns the user that
# the number of logins has been exceeded.
else:
print('Maximum number of login attempts exceeded.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, username, password, db_file=None):\n self.logger = logging.getLogger('pyragarga.Pyragarga')\n self._database = None\n if db_file:\n self.enable_db(db_file)\n self._session = requests.session()\n self._session.post(KG_URL + LOGIN_SCRIPT,\n data={'username':username, 'password':password})\n self.user_id = self._session.cookies['uid']\n self.logger.info('Logged into KG as user %s' % username)",
"def __init__(self, username, password=None):\n # Get password if necessary\n if password is None:\n password = getpass()\n # Get URL for the database\n self.db_url = \"http://galaxy-catalogue.dur.ac.uk:8080/Eagle\"\n # Set up authentication and cookies\n self.password_mgr = HTTPPasswordMgrWithDefaultRealm()\n self.password_mgr.add_password(None, self.db_url, username, password)\n self.opener = OpenerDirector()\n self.auth_handler = HTTPBasicAuthHandler(self.password_mgr)\n self.cookie_handler = HTTPCookieProcessor(cookie_jar)",
"def __init__(self):\n # create a connection through our super role via db.connect\n try:\n self.connection = db.connect(SUPER_ROLE, authcode=SUPER_AUTHCODE, host=HOST)\n except db.OperationalError: # thrown if password or role don't match\n print 'Caught an exception while trying to log in, maybe your account does not exist yet?'\n exit()\n \n # get a DictCursor as our cursor (which returns queries as column-name dicts)\n self.cursor = self.connection.cursor(DictCursor)\n \n self.setup_tables()",
"def __init__(self,username, password):\n self.username = username\n self.password = password",
"def __init__(self, username, password):\n self.username = username\n self.password = password\n self.server = self.login()",
"def __init__(self, username, password):\n\n self.username = username\n self.password = password",
"def __init__(self):\n self.user = \"\"\n self.password = \"\"",
"def __init__(self, username, password):\n\n self._username = username\n self._password = password",
"def init(username, password):\r\n click.echo('Initializing the database...')\r\n db.create_all()\r\n\r\n admin = Admin.query.first()\r\n if admin:\r\n click.echo('The adminstrator already exists, updating...')\r\n admin.username = username\r\n admin.set_password(password)\r\n else:\r\n click.echo('Creating the temporary administrator account..')\r\n admin = Admin(\r\n username=username,\r\n blog_title='Bluelog',\r\n blog_sub_title=\"No, I'm the real thing.\",\r\n name='Admin',\r\n about='Anything about you'\r\n )\r\n admin.set_password(password)\r\n db.session.add(admin)\r\n\r\n category = Category.query.first()\r\n if category is None:\r\n click.echo('Creating the default category...')\r\n category = Category(name='默认')\r\n db.session.add(category)\r\n\r\n db.session.commit()\r\n click.echo('Done.')",
"def __init__(self, db_name):\n\n self.db_ip = \"10.114.70.201\" # IP address of MySQL Database Machine\n self.uid = \"root\"\t\t # USER ID of the SQL ADMIN\n self.pwd = \"root\"\t\t # PASSWORD of the SQL ADMIN\n self.db_name = db_name # DATABASE name to connect to\n self.db = None\n self.cursor = None",
"def __init__(__self__, *,\n password: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if username is not None:\n pulumi.set(__self__, \"username\", username)",
"def __init__(self, user, password, host=None):\n self.user = user\n self.password = password\n if host: self.host = host",
"def main():\n parser = optparse.OptionParser('-u username ' + '-p password')\n parser.add_option('-u', dest='username', type='string', help=\"Game username- enter within '' marks\")\n parser.add_option('-p', dest='password', type='string', help=\"Game password- enter within '' marks\")\n (options, args) = parser.parse_args()\n u_name = options.username\n p_word = options.password\n # if no username or password is given, print out the usage\"\n if u_name is None or p_word is None:\n print(parser.usage)\n exit(0)\n\n login(u_name, p_word)",
"def __init__(__self__, *,\n password: str,\n username: str):\n pulumi.set(__self__, \"password\", password)\n pulumi.set(__self__, \"username\", username)",
"def __init__(self, username, password):\n self.username = username\n self.password = password\n self.privkey = None\n\n # sets self.privkey\n self.__set_or_create_key_if_not_exist()",
"def __init__(self,cicUser, cicPassword,cicUrl):\n\n self.cicUser = cicUser\n self.cicPassword = cicPassword\n self.cicUrl = cicUrl\n\n self.login()",
"def __init__(self):\n # Start Database Manager #\n self.dbManager = DBPM()\n self.user = None\n self.state = SessionStates.LOGGED_OUT\n\n # Make admin user - this is only needed when database is empty,\n # and admin user has never been created yet \n self.p_makeAdminUser();",
"def init(username, password):\r\n from flaskblog.models import User, Tag, Category, Article\r\n click.echo('Initializing the database...')\r\n db.create_all()\r\n\r\n user = User.query.first()\r\n if user is not None:\r\n click.echo('The administrator already exists, updating...')\r\n user.username = username\r\n user.password(password)\r\n else:\r\n click.echo('Creating the temporary administrator account...')\r\n # user = User(username=username,email\r\n #\r\n # )\r\n # admin.set_password(password)\r\n # db.session.add(admin)\r",
"def __init__(self, host='192.168.45.45', username='admin', password='Admin123', autodeploy=True):\n logging.debug(\"In the FMC __init__() class method.\")\n\n self.host = host\n self.username = username\n self.password = password\n self.autodeploy = autodeploy",
"def main(username, pw):\n pass",
"def __init__(\n self,\n username: str,\n password: str\n ) -> None:\n\n super().__init__(username, password)",
"def __init__(self, *, username: str = None, password: str = None) -> None:\n LOG.debug(f\"Authenticating to PostgreSQL database using {pg_environment()}\")\n\n connect_params = {\n \"cursor_factory\": NamedTupleCursor,\n \"fallback_application_name\": fallback_application_name(),\n\n **({\"user\": username} if username is not None else {}),\n **({\"password\": password} if password is not None else {}),\n }\n\n try:\n # connect() requires a DSN as the first arg even if the connection\n # details are fully-specified by the environment, but we don't need to\n # fill it with anything.\n self.connection = psycopg2.connect(\"\", **connect_params)\n except DatabaseError as error:\n LOG.error(f\"Authentication failed: {error}\")\n raise error from None\n\n LOG.info(f\"Connected to {self.session_info()}\")",
"def __init__(self, provider, hostname, **kwargs):\r\n self.provider = provider\r\n self.hostname = hostname\r\n self.username = kwargs.get('username')\r\n self.password = kwargs.get('password')\r\n self.database = kwargs.get('database')\r\n\r\n if self.provider not in SupportedDatabase.__members__:\r\n db_list = ','.join(list(SupportedDatabase.__members__))\r\n raise Exception(\r\n provider + ', is not supported at this time. Following databases are only supported : ' + db_list)\r\n\r\n self.db = Database()\r\n self.db.bind(provider=SupportedDatabase[self.provider].value, user=self.username, password=self.password,\r\n host=self.hostname, database=self.database)",
"def initialize(self):\n self.login()",
"def init_db(self):\n _client = pymongo.MongoClient(username=self.config['database']['admin'],\n password=self.config['database']['admin_pwd'],\n host=self.config['database']['host'],\n port=self.config['database']['port'])\n # _id: db_name.user_name\n user_ids = [_u['_id'] for _u in _client.admin.system.users.find({}, {'_id': 1})]\n\n db_name = self.config['database']['db']\n username = self.config['database']['user']\n\n # print(f'{db_name}.{username}')\n # print(user_ids)\n\n if f'{db_name}.{username}' not in user_ids:\n _client[db_name].command('createUser', self.config['database']['user'],\n pwd=self.config['database']['pwd'], roles=['readWrite'])\n print('Successfully initialized db')",
"def __init__(self, name=\"\", auth_path=None, sql_lite=None, db=None):\n self._name=name\n self._con=None\n self._auth_path= os.path.dirname(os.path.abspath(__file__)) + '/db.csv' if auth_path is None else auth_path\n self._sql_lite=sql_lite\n self._db=db",
"def __init__(self, host='localhost', user='wikiwsd', passwd='wikiwsd', database='wikiwsd3'):\n self._host = host\n self._user = user\n self._passwd = passwd\n self._database = database",
"def __init__(__self__, *,\n db_name: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n server: pulumi.Input[str],\n admin_secret: Optional[pulumi.Input[str]] = None,\n admin_secret_key_vault: Optional[pulumi.Input[str]] = None,\n key_vault_to_store_secrets: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"db_name\", db_name)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"server\", server)\n if admin_secret is not None:\n pulumi.set(__self__, \"admin_secret\", admin_secret)\n if admin_secret_key_vault is not None:\n pulumi.set(__self__, \"admin_secret_key_vault\", admin_secret_key_vault)\n if key_vault_to_store_secrets is not None:\n pulumi.set(__self__, \"key_vault_to_store_secrets\", key_vault_to_store_secrets)\n if username is not None:\n pulumi.set(__self__, \"username\", username)",
"def __init__(__self__, *,\n db_name: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n server: pulumi.Input[str],\n admin_secret: Optional[pulumi.Input[str]] = None,\n admin_secret_key_vault: Optional[pulumi.Input[str]] = None,\n key_vault_to_store_secrets: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"db_name\", db_name)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"server\", server)\n if admin_secret is not None:\n pulumi.set(__self__, \"admin_secret\", admin_secret)\n if admin_secret_key_vault is not None:\n pulumi.set(__self__, \"admin_secret_key_vault\", admin_secret_key_vault)\n if key_vault_to_store_secrets is not None:\n pulumi.set(__self__, \"key_vault_to_store_secrets\", key_vault_to_store_secrets)\n if username is not None:\n pulumi.set(__self__, \"username\", username)",
"def __init__(self, site_name, user_name, password):\n self.site_name = site_name\n self.user_name = user_name\n self.password = password"
]
| [
"0.6977395",
"0.6778311",
"0.6732263",
"0.672914",
"0.6667451",
"0.66476744",
"0.6609962",
"0.6582779",
"0.6526893",
"0.6486139",
"0.6459199",
"0.6423249",
"0.64199483",
"0.6390175",
"0.63895506",
"0.634812",
"0.63453716",
"0.63385814",
"0.63188916",
"0.63105637",
"0.6264708",
"0.62606865",
"0.62469",
"0.6219842",
"0.6213095",
"0.62001127",
"0.61968505",
"0.6187323",
"0.6187323",
"0.6179006"
]
| 0.6830924 | 1 |
This attribute tests the connection to the database. It is mostly used in debugging. For now, there is an option in the main menu to test the connection. If the attribute can connect to the database and query the list of tables, it will return the list of tables in the database. If a mariadb exception occurs, it will return an empty list. | def test(self):
# Establish connection and execute a query that returns
# a table with the names of each table in the database.
# Close the connection. Convert the table to a list and
# return it.
try:
Connection = mariadb.connect(
user = self.Name,
host = self.Host,
password= self.Password,
port=3306)
TestQuery = Connection.cursor()
TestQuery.execute('USE moleculardata')
TestQuery.execute('SHOW TABLES')
TestQuery.close()
Connection.close()
return [j for sub in TestQuery for j in sub]
# Exception to catch database errors. Exceptions could include:
# problem connecting to the database or errors in the data query
# request.
# Returns an empty list.
except mariadb.Error as e:
print('Unable open connection {}.'.format(e))
return[] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testDatabase(self):\n con = self.getMetadataDatabaseConnection()\n if con:\n return True",
"def test_database_connection(self):\n\t\t\n\t\tself.assertTrue(database.connect())",
"def test_004_connect(self):\n HEADING()\n self.db.connect()\n\n result = True\n assert result",
"def test_database_can_connect():\n\n assert \"sqlite\" in settings.database_uri",
"def test_mysql_connect_fail(self):\n if _is_backend_avail('mysql', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")",
"def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)",
"def test_mysql_connect_fail(self):\n if test_migrations._is_backend_avail(\n 'mysql', 'kickstand_cifail', self.PASSWD, self.DATABASE):\n self.fail(\"Shouldn't have connected\")",
"def test_db_connection(env_setup, env_table):\n test_string = DbManager(SqLiteHelper, {\"db_path\": env_setup, \"master_table\": env_table})\\\n .test_connection()\n assert test_string is not None",
"def test_connection():\n database = r'.\\data\\SQLite\\chinook.db'\n\n try:\n conn = sqlite3.connect(database)\n table_names = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n for table_name in table_names:\n print(table_name[0])\n\n finally:\n try: conn.close()\n except: pass",
"def test_db_connection(server, db_name, username, passwd):\n print(blue + \"\\n>>> \" + reset + \"Checking i-doit database connection...\")\n cnx = None\n try:\n cnx = mysql.connector.connect(\n user=username, password=passwd, host=server, database=db_name)\n print(green + \"\\n>>> \" + reset +\n \"Successfully connected to the i-doit database.\")\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(red + \"\\n>>> \" + reset +\n \"Something is wrong with your username or password.\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(red + \"\\n>>> \" + reset + \"Database does not exist.\")\n else:\n print(red + \"\\n>>> \" + reset + str(err))\n return cnx",
"def test_database_connection(self):\n\n # test that all models are \"querysable\"\n for model in self.mb_model_list:\n model.objects.all()\n\n # test that main tables are filled\n self.assertTrue(mb_models.Artist.objects.count() > 0)\n self.assertTrue(mb_models.ReleaseGroup.objects.count() > 0)\n self.assertTrue(mb_models.Release.objects.count() > 0)\n self.assertTrue(mb_models.Track.objects.count() > 0)",
"def test_get_database(self):\r\n database = self.profile.get_database('testing.db')\r\n self.assertIsInstance(database, QtDBConnector)",
"def test(self):\r\n self.log.debug(\"connection test using version query with adapter %s...\", self.adapter)\r\n try:\r\n res = self.get('SELECT * FROM Package WHERE PackageID=\\'179\\'') # 'SELECT Family FROM Version'\r\n if not res:\r\n self.log.critical('no results, database problem.')\r\n return False\r\n else:\r\n self.log.info('connection successful.')\r\n return True\r\n except:\r\n self.log.critical('connection not possible, check host/user/pwd configuration')\r\n return False",
"def test_check_connection(self):\n self.assertIsNotNone(app.check_connection())",
"def test_connection_is_established(self):\n for conn in self.connections:\n assert conn.is_connected is True",
"def test_010_info(self):\n HEADING()\n db = self.db\n\n db.connect()\n db.info()\n pass",
"def test_connect_db_to_query(db):\n assert 1",
"def test_db_connection():\n\n from cwf2neo.neo4j import Neo4j\n\n db = Neo4j()\n\n assert db.graph.database.name",
"def test_connection(self):\n r = main.List.connection()\n self.assertTrue(r.ping(), \"Connection failed.\")",
"def test_connection(self):\n print('\\ntest_connection')\n connector = btc_price.db.ConnectPSQL(host=DB_HOST, user=DB_USER, port=DB_PORT, db=DB_NAME)\n print(connector.show_table_name())\n print(connector.show_column_name('ticker'))",
"def check_database_connection():\n try:\n engine = create_engine(DB_LOGIN)\n connection = engine.connect()\n\n # Read some data\n count = connection.execute(select([func.count(REGISTER.c.register_id)]).where(REGISTER.c.create_time == None)).first()[0]\n LOG.info('DB test count: {0}'.format(count))\n\n connection.close()\n\n except Exception:\n # Something when wrong\n LOG.exception('check_database_connection')\n return False\n\n return True",
"def test_create_tables(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT * from information_schema.tables \"\n \"WHERE table_schema = 'public' \"\n \"AND table_type = 'BASE TABLE';\")\n result = cur.fetchall()\n result = [x[2] for x in result]\n self.assertCountEqual(result,\n ['bioms', 'counts', 'networks',\n 'taxonomy', 'edges', 'samples', 'meta']\n )\n cur.close()\n conn.close()\n conn_object.delete_tables()",
"def test_check_database_exists(self):\n query_result = [{'streamalert': True}]\n self.client.athena_client = MockAthenaClient(results=query_result)\n\n assert_true(self.client.check_database_exists())",
"def _test_connection(self, connection_string):\n try:\n engine = create_engine(connection_string)\n connection = engine.connect()\n connection.close()\n return True\n except Exception as e:\n if options.debug:\n logging.exception(\"Database connection failed: %s\" % e)\n return False",
"def test_connect(self):\n db = Database.TestDB(self.mktemp())\n self.assertFalse(db.initialized)\n yield db.open()\n self.assertTrue(db.initialized)\n db.close()",
"def test_get_table_list(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def list_tables(*args, **kwargs):\n return [[\"Table_1\", \"t\"], [\"Table_2\", \"t\"]]\n\n cursor.run_sql_in_snapshot = list_tables\n table_list = db_introspection.get_table_list(cursor=cursor)\n self.assertEqual(\n table_list,\n [\n TableInfo(name=\"Table_1\", type=\"t\"),\n TableInfo(name=\"Table_2\", type=\"t\"),\n ],\n )",
"def testDbConnection(host, port, user, passwd):\n try:\n conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd )\n cursor = conn.cursor()\n cursor.execute(\"CREATE DATABASE IF NOT EXISTS nupic_db_test\")\n conn.select_db(\"nupic_db_test\")\n cursor.execute(\"CREATE TABLE db_test \\\n (teststring VARCHAR(255),\\\n someint INT)\")\n cursor.execute(\"INSERT INTO db_test VALUES ('testing123', 123)\")\n cursor.execute(\"DROP TABLE IF EXISTS db_test\")\n cursor.execute(\"DROP DATABASE IF EXISTS nupic_db_test\")\n return True\n\n except pymysql.err.OperationalError , e:\n print str(e)\n return False",
"def connect_to_db(self):\n try:\n self.connection = sqlite3.connect('test_database.db')\n self.cursor = sqlite3.connect('test_database.db').cursor()\n return self.connection, self.cursor\n except EnvironmentError:\n print(EnvironmentError)",
"def test_get_table(self):\n my_conn = MySQL(*self.conn_params)\n inf_schema = my_conn.get_table('inf_schema') # GET TABLE example\n row_count = my_conn.engine.scalar(\n select([func.count('*')]).select_from(inf_schema)\n )\n # The select.columns parameter is not available in the method form of\n # select(), e.g. FromClause.select().\n # See https://docs.sqlalchemy.org/en/latest/core/selectable.html#\n # sqlalchemy.sql.expression.FromClause.select\n my_conn.engine.execute(\n select([inf_schema.c.table_name]).select_from(inf_schema))\n self.assertGreaterEqual(row_count, 100)",
"def _check_connection(self):\n if \"_connection\" not in self.__dict__:\n message = \"use connect method before doing operation on this database\"\n raise Exception(message)"
]
| [
"0.6990052",
"0.6902803",
"0.6520851",
"0.6482434",
"0.6388653",
"0.63614976",
"0.6319599",
"0.62959796",
"0.6262326",
"0.6209179",
"0.61443245",
"0.61433625",
"0.60929066",
"0.60791105",
"0.60245323",
"0.6016008",
"0.59738386",
"0.5869316",
"0.5864464",
"0.5805988",
"0.57900774",
"0.57862103",
"0.57835555",
"0.57457423",
"0.57453007",
"0.57345456",
"0.5716237",
"0.5706869",
"0.5699954",
"0.5694288"
]
| 0.74820626 | 0 |
Return the wait time multiplied by the quickest bus ID. | def get_quickest_bus(departure_time: int, buses: List[int]) -> int:
quickest_bus = sorted(buses,
key=lambda x: get_wait_time(departure_time, x),
reverse=False)[0]
return get_wait_time(departure_time, quickest_bus) * quickest_bus | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait(bus, timestamp):\n\treturn (bus - (timestamp % bus)) % bus",
"def part1() -> int:\n longest_sleeper = max(sleep_times, key=lambda g: len(sleep_times[g]))\n sleepiest_minute = max(\n sleep_times[longest_sleeper], key=sleep_times[longest_sleeper].count)\n\n return longest_sleeper * sleepiest_minute",
"def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r",
"def part2() -> int:\n sleepiest_minute = None\n sleeping_guard = None\n sleep_frequency = 0\n\n for guard, times in sleep_times.items():\n if len(times) == 0:\n continue\n _sleepiest_minute = max(\n sleep_times[guard], key=sleep_times[guard].count)\n _sleep_frequency = sleep_times[guard].count(_sleepiest_minute)\n\n if _sleep_frequency > sleep_frequency:\n sleepiest_minute = _sleepiest_minute\n sleeping_guard = guard\n sleep_frequency = _sleep_frequency\n\n assert sleeping_guard is not None and sleepiest_minute is not None\n return sleeping_guard * sleepiest_minute",
"def get_wait_time(self) -> int:\n next_ts = self.get_next_timestamp()\n if next_ts is None:\n return max(0, self.min_wait)\n return min((next_ts - parser.parse(self.event['timestamp'])).seconds, self.max_wait)",
"def queued_time(self):\r\n return (self.node_monitor_launch_time - self.node_monitor_submit_time)",
"def get_total_wait_time(self):\n s = sum([r['wait'] for r in self.required_signals.itervalues()])\n if self.current_signal:\n s += self.current_signal[1]['wait']\n\n return s",
"def wait_time(self, current_time):\n return current_time - self.timestamp",
"def receive_and_probing_time(self):\r\n latest_completion = 0\r\n for probe in self.__probes.values():\r\n\t\t \t if probe.complete():\r\n\t\t\t \t\t latest_completion = max(latest_completion, probe.completion_time)\r\n return latest_completion - self.__arrival_time",
"def get_timed(self):\n ret = self.send(\"?T\", recv=True)\n ret = int(ret, 10)\n # FIXME: range?\n assert 1 <= ret <= 9999\n return ret",
"def sleep_time(self):\n now = datetime.utcnow()\n return min(service.next_update_in(now) for service in self.services)",
"def measure_time(n: int, max_delay: int) -> float:\n t0 = time.time()\n asyncio.run(wait_n(n, max_delay))\n t1 = time.time()\n total_time = t1 - t0\n return total_time / n",
"def getTime():\n\n return float(time.perf_counter()*1000)",
"def get_waiting_in_line(self):\n return self.time_step_to_dequeue - self.time_step_to_enqueue",
"def measure_time(n: int, max_delay: int) -> float:\n start_time = time.time()\n asyncio.run(wait_n(n, max_delay))\n return (time.time() - start_time) / n",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def remaining_ms():",
"def nextbus(buses, timestamp):\n\twaits = [(bus, wait(bus,timestamp)) for bus in buses]\n\treturn min(waits, key= lambda w:w[1])",
"def probing_time(self):\r\n earliest_launch = (time.time() * 1000)**2\r\n latest_completion = 0\r\n for probe in self.__probes.values():\r\n if probe.complete():\r\n earliest_launch = min(earliest_launch, probe.launch_time)\r\n latest_completion = max(latest_completion, probe.completion_time)\r\n return latest_completion - earliest_launch",
"def _get_time_to_sleep(self, num_bytes, current_time=None):\n if current_time is None:\n current_time = time.time()\n\n fill_amount = (\n current_time - self.__last_bucket_fill_time\n ) * self.__bucket_fill_rate\n\n self.__bucket_contents = min(\n self.__bucket_size, self.__bucket_contents + fill_amount\n )\n self.__last_bucket_fill_time = current_time\n\n if num_bytes <= self.__bucket_contents:\n return 0.0\n elif self.__bucket_fill_rate == 0:\n return float(\"inf\")\n else:\n return (num_bytes - self.__bucket_contents) / self.__bucket_fill_rate",
"def curr_time_millis():\n return 1000 * timeit.default_timer()",
"def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)",
"def get_timeout(self) -> int:",
"def get_timediv(self):\n result = self.bus.ask('HOR:MAI:SCA?')\n return float(result.rstrip())",
"def bake_time_remaining(elapsed_bake_time: int) -> int:\n return EXPECTED_BAKE_TIME - elapsed_bake_time"
]
| [
"0.6823249",
"0.6669374",
"0.660172",
"0.6395295",
"0.6350145",
"0.62678856",
"0.6054506",
"0.6011937",
"0.5985268",
"0.59784275",
"0.59142697",
"0.58659416",
"0.58637136",
"0.58592594",
"0.5832436",
"0.582941",
"0.582941",
"0.582941",
"0.582941",
"0.582941",
"0.582941",
"0.578117",
"0.577532",
"0.5749794",
"0.5734294",
"0.57172924",
"0.5706314",
"0.57034594",
"0.56776655",
"0.5674377"
]
| 0.67769605 | 1 |
Install datasets for retriever. | def _install(args, use_cache, debug):
engine = choose_engine(args)
engine.use_cache = use_cache
if args['dataset'].endswith('.zip') or args.get('hash_value'):
path_to_archive = args['dataset']
if args.get('hash_value'):
path_to_archive = os.path.join(
PROVENANCE_DIR, args['dataset'],
'{}-{}.zip'.format(args['dataset'], args['hash_value']))
if not os.path.exists(path_to_archive):
print('The committed file does not exist.')
engine = install_committed(path_to_archive,
engine,
force=args.get('force', False))
return engine
script_list = SCRIPT_LIST()
if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):
check_for_updates()
script_list = SCRIPT_LIST()
data_sets_scripts = name_matches(script_list, args['dataset'])
if data_sets_scripts:
for data_sets_script in data_sets_scripts:
print("=> Installing", data_sets_script.name)
try:
if engine.name == "HDF5":
sqlite_opts = {
'command': 'install',
'dataset': data_sets_script,
'engine': 'sqlite',
'file': (args["file"].split("."))[0] + ".db",
'table_name': args["table_name"],
'data_dir': args["data_dir"]
}
sqlite_engine = choose_engine(sqlite_opts)
data_sets_script.download(sqlite_engine, debug=debug)
data_sets_script.engine.final_cleanup()
engine.script_table_registry = OrderedDict()
data_sets_script.download(engine, debug=debug)
data_sets_script.engine.final_cleanup()
except Exception as e:
print(e)
if debug:
raise
elif args['dataset'].startswith('socrata') and not data_sets_scripts:
socrata_id = args['dataset'].split('-', 1)[1]
resource = find_socrata_dataset_by_id(socrata_id)
if "error" in resource.keys():
if resource["datatype"][0] == "map":
print("{} because map type datasets are not supported".format(
resource["error"]))
else:
print("{} because it is of type {} and not tabular".format(
resource["error"], resource["datatype"][1]))
elif len(resource.keys()) == 0:
return
else:
print("=> Installing", args['dataset'])
name = f"socrata-{socrata_id}"
create_socrata_dataset(engine, name, resource)
if args['command'] == 'download':
return engine
else:
script_list = SCRIPT_LIST()
script = get_script(args['dataset'])
script.download(engine, debug=debug)
script.engine.final_cleanup()
elif args['dataset'].startswith('rdataset') and not data_sets_scripts:
print("=> Installing", args['dataset'])
rdataset = args['dataset'].split('-')
update_rdataset_catalog()
package, dataset_name = rdataset[1], rdataset[2]
create_rdataset(engine, package, dataset_name)
if args['command'] == 'download':
return engine
else:
script_list = SCRIPT_LIST()
script = get_script(args['dataset'])
script.download(engine, debug=debug)
script.engine.final_cleanup()
else:
message = "Run retriever.datasets() to list the currently available " \
"datasets."
raise ValueError(message)
return engine | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()",
"def task_installTestData(self):\n if env.get('environment') == 'production':\n abort(\"Don't use installTestData in production.\")\n\n if postgres.tableExists('trac', 'system'):\n abort(\"Existing Trac tables found.\")\n\n with settings(user=self.serviceUser):\n # Run trac initenv to create the postgresql database tables, but use\n # a throwaway trac-env directory because that comes from\n # https://github.com/twisted-infra/trac-config/tree/master/trac-env\n try:\n run('~/virtualenv/bin/trac-admin '\n '/tmp/trac-init initenv TempTrac postgres://@/trac git \"\"')\n finally:\n run(\"rm -rf /tmp/trac-init\")\n\n # Run an upgrade to add plugin specific database tables and columns.\n run('~/virtualenv/bin/trac-admin config/trac-env upgrade --no-backup')",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def prepare_data(self) -> None:\n if (self.root).is_dir():\n logger.info(\"Found the dataset.\")\n else:\n download_and_extract(self.root, DOWNLOAD_INFO)",
"def install_package_data(data_dir: str = None):\n\n zen = InstallPackageData(data_dir=data_dir)\n\n zen.fetch_zenodo()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)",
"def _install(self):\n\n pass",
"def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()",
"def install(self) -> None:\n if self.local_packages:\n self.prepare_install_local()\n self.install_local()\n if self.remote_packages:\n self.install_from_url()\n if self.repository_packages:\n self.install_from_repository()\n if self.debuginfo_packages:\n self.install_debuginfo()",
"def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")",
"def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)",
"def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)",
"def setup(self):\n\n folder_name, file_name, url, md5 = self.resource\n dataset_folder = os.path.join(self.data_root, folder_name)\n if not os.path.exists(dataset_folder):\n sh_utils.download_and_extract_archive(url, dataset_folder, md5, file_name)\n\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = tv_datasets.ImageFolder(\n root=dataset_folder, transform=test_transform\n )\n self.images_only_dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )",
"def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def download_dataset(self):\n raise NotImplementedError",
"def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))",
"def datasets(self):\n pass",
"def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")",
"def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)",
"def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])",
"def install_all(self):\n self.install_native_host()\n self.install_extension()",
"def setup(self, ds):\n pass",
"def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)",
"def install(self, clientDataDir, suppressPackageContentFileGeneration=False):\n\t\tself.setClientDataDir(clientDataDir)\n\t\tself.getMetaData()\n\t\tself.runPreinst()\n\t\tself.extractData()\n\t\tif not suppressPackageContentFileGeneration:\n\t\t\tself.createPackageContentFile()\n\t\tself.setAccessRights()\n\t\tself.runPostinst()\n\t\tself.cleanup()",
"def makecldf(args):\n with_dataset(args, Dataset._install)",
"def install(clean=False):\n\n # Drop tables\n if clean:\n app.logger.info(\"Dropping all tables...\")\n from utils import drop_all_tables\n drop_all_tables(app)\n\n # Create tables\n app.logger.info(\"Creating all tables...\")\n import cavavin.models # NOQA: to register all Model\n db.create_all()\n\n # Import fixture data\n app.logger.info(\"Installing default data...\")\n charlatan_manager = charlatan.FixturesManager(db_session=db.session, use_unicode=True)\n charlatan_manager.load('data/countries.yaml', models_package='cavavin.models')\n charlatan_manager.load('data/users.yaml', models_package='cavavin.models')\n charlatan_manager.install_all_fixtures()"
]
| [
"0.64824706",
"0.62990665",
"0.62516385",
"0.62516385",
"0.62441695",
"0.6194383",
"0.6147018",
"0.6111253",
"0.60733217",
"0.6066709",
"0.6036118",
"0.60200125",
"0.5941362",
"0.5941362",
"0.5929711",
"0.5899112",
"0.5832342",
"0.58091074",
"0.58081496",
"0.58072793",
"0.5782787",
"0.5778196",
"0.5776588",
"0.57668024",
"0.5737552",
"0.57296467",
"0.57212615",
"0.5706761",
"0.5705467",
"0.5684086"
]
| 0.6306677 | 1 |
Install datasets into mysql. | def install_mysql(dataset,
user='root',
password='',
host='localhost',
port=3306,
database_name='{db}',
table_name='{db}.{table}',
debug=False,
use_cache=True,
force=False,
hash_value=None):
args = {
'command': 'install',
'database_name': database_name,
'engine': 'mysql',
'dataset': dataset,
'host': host,
'port': port,
'password': password,
'table_name': table_name,
'user': user,
'force': force,
'hash_value': hash_value
}
return _install(args, use_cache, debug) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_mysql():\n with lcd(env.projectroot):\n put(\"manage/sysconf/%(target)s/mysql/setup-mysql.sql\" % env, \"/tmp\")\n #sudo(\"mysql -u root -p < /tmp/setup-mysql.sql\")\n sudo(\"mysql -u root < /tmp/setup-mysql.sql\")",
"def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)",
"def task_installTestData(self):\n if env.get('environment') == 'production':\n abort(\"Don't use installTestData in production.\")\n\n if postgres.tableExists('trac', 'system'):\n abort(\"Existing Trac tables found.\")\n\n with settings(user=self.serviceUser):\n # Run trac initenv to create the postgresql database tables, but use\n # a throwaway trac-env directory because that comes from\n # https://github.com/twisted-infra/trac-config/tree/master/trac-env\n try:\n run('~/virtualenv/bin/trac-admin '\n '/tmp/trac-init initenv TempTrac postgres://@/trac git \"\"')\n finally:\n run(\"rm -rf /tmp/trac-init\")\n\n # Run an upgrade to add plugin specific database tables and columns.\n run('~/virtualenv/bin/trac-admin config/trac-env upgrade --no-backup')",
"def _install(args, use_cache, debug):\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n if args['dataset'].endswith('.zip') or args.get('hash_value'):\n path_to_archive = args['dataset']\n if args.get('hash_value'):\n path_to_archive = os.path.join(\n PROVENANCE_DIR, args['dataset'],\n '{}-{}.zip'.format(args['dataset'], args['hash_value']))\n if not os.path.exists(path_to_archive):\n print('The committed file does not exist.')\n engine = install_committed(path_to_archive,\n engine,\n force=args.get('force', False))\n return engine\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n print(\"=> Installing\", data_sets_script.name)\n try:\n if engine.name == \"HDF5\":\n sqlite_opts = {\n 'command': 'install',\n 'dataset': data_sets_script,\n 'engine': 'sqlite',\n 'file': (args[\"file\"].split(\".\"))[0] + \".db\",\n 'table_name': args[\"table_name\"],\n 'data_dir': args[\"data_dir\"]\n }\n sqlite_engine = choose_engine(sqlite_opts)\n data_sets_script.download(sqlite_engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n elif args['dataset'].startswith('socrata') and not data_sets_scripts:\n socrata_id = args['dataset'].split('-', 1)[1]\n resource = find_socrata_dataset_by_id(socrata_id)\n\n if \"error\" in resource.keys():\n if resource[\"datatype\"][0] == \"map\":\n print(\"{} because map type datasets are not supported\".format(\n resource[\"error\"]))\n else:\n print(\"{} because it is of type {} and not tabular\".format(\n resource[\"error\"], resource[\"datatype\"][1]))\n elif len(resource.keys()) == 0:\n return\n else:\n print(\"=> Installing\", args['dataset'])\n name = f\"socrata-{socrata_id}\"\n create_socrata_dataset(engine, name, resource)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n elif args['dataset'].startswith('rdataset') and not data_sets_scripts:\n print(\"=> Installing\", args['dataset'])\n rdataset = args['dataset'].split('-')\n update_rdataset_catalog()\n package, dataset_name = rdataset[1], rdataset[2]\n create_rdataset(engine, package, dataset_name)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n else:\n message = \"Run retriever.datasets() to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine",
"def setup_tap_mysql(self):\n db_script = os.path.join(DIR, '..', '..', 'db', 'tap_mysql_db.sh')\n self._run_command(db_script)",
"def insert_db():\n populate_tables()",
"def mysql_import():\n # first make another copy of the db\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db_temp.sql\")\n # then import from the backup\n run(\"mysql -u database_user -p -D database_name < ~/tmp/exported_db.sql\")",
"def installMariaDb(self):\n apt_get_install(self.paquets)",
"def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)",
"def createDatabase(self):\n \n try:\n self.conn = MySQLdb.connect (host = settings.DATABASES['default']['HOST'], user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'])\n except MySQLdb.Error, e:\n raise Exception(\"Cannot connect to the MySQL server.\")\n\n try:\n cursor = self.conn.cursor()\n cursor.execute(\"CREATE DATABASE {};\".format(settings.DBNAME))\n except MySQLdb.Error, e:\n raise Exception(\"Cannot create the database {}. {}\".format(settings.DBNAME, e))\n finally:\n self.conn.close()\n\n try:\n self.conn = MySQLdb.connect (host = settings.DATABASES['default']['HOST'], user = settings.DATABASES['default']['USER'], passwd = settings.DATABASES['default']['PASSWORD'], db = settings.DBNAME)\n except MySQLdb.Error, e:\n raise Exception(\"Cannot connect to the database {}. {}\".format(settings.DBNAME, e))\n\n try:\n cursor = self.conn.cursor()\n # creating the datasets table\n cursor.execute(\"CREATE TABLE datasets (dataset VARCHAR(255) PRIMARY KEY, datasetid INT UNIQUE AUTO_INCREMENT, ximagesz INT, yimagesz INT, zimagesz INT, xoffset INT, yoffset INT, zoffset INT, xvoxelres DOUBLE, yvoxelres DOUBLE, zvoxelres DOUBLE, scalingoption INT, scalinglevels INT, starttime INT, endtime INT);\")\n cursor.execute(\"CREATE TABLE channels (channel_name VARCHAR(255) ,dataset VARCHAR(255) REFERENCES datasets(dataset), PRIMARY KEY (channel_name,dataset), channel_type VARCHAR(255), channel_datatype VARCHAR(255), startwindow INT, endwindow INT);\")\n self.conn.commit()\n except MySQLdb.Error, e:\n raise Exception(\"Cannot create the tables for the database {}, {}\".format(settings.DBNAME, e))\n finally:\n self.conn.close()\n \n def pipInstall(self):\n \"\"\"Installing all the pip packages\"\"\"\n\n print \"Does Nothing\"\n\n def restart(self):\n \"\"\"Restart all the services\"\"\"",
"def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)",
"def setup(self):\n return self.setupDatabases()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')",
"def install(drop_all=False):\n\n print \"Connecting to database...\"\n engine = create_engine(config.DATABASE_URI)\n Base.metadata.bind = engine\n\n if drop_all:\n print \"Dropping old tables...\"\n Base.metadata.drop_all()\n \n print \"Installing new schema...\"\n Base.metadata.create_all(engine)",
"def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()",
"def connect_db_and_load_data(cls):\n db.connect()\n db.create_tables([Product], safe=True)\n load_data(transform_data('./inventory.csv'))",
"def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()",
"def initdb():\n\tc, conn = connect()\n\tsql = []\n\twith open('data\\\\database.sql') as f:\n\t\tfor line in f:\n\t\t\tsql.append(line.strip())\n\t\n\tfor query in sql:\n\t\tc.execute(query)\n\tconn.commit()",
"def fill_sql_to_mysql():\n for server in servers:\n stop_server_cmd = \"sshpass -p %s ssh root@%s 'sh /data0/update_locate_server.sh'\" % (password, server.target_lan_ip)\n os.system(stop_server_cmd)\n find_target_init_sql_cmd = \"sshpass -p %s ssh root@%s 'ls /data0/src/%s_*.sql'\" % (password, server.target_lan_ip, server.server_name_pre)\n sql_file_full_path = os.popen(find_target_init_sql_cmd).readline()\n input_sql_cmd = \"sshpass -p %s ssh root@%s '/usr/local/mysql/bin/mysql -uroot -p%s -h127.0.0.1 -P%s wg_lj < %s' \" \\\n % (password, server.target_lan_ip, mysql_pw, server.target_mysql_port, sql_file_full_path)\n logger.info(\"Begin to dump sql to the mysql! cmd is = \" + input_sql_cmd)\n os.system(input_sql_cmd)",
"def install(clean=False):\n\n # Drop tables\n if clean:\n app.logger.info(\"Dropping all tables...\")\n from utils import drop_all_tables\n drop_all_tables(app)\n\n # Create tables\n app.logger.info(\"Creating all tables...\")\n import cavavin.models # NOQA: to register all Model\n db.create_all()\n\n # Import fixture data\n app.logger.info(\"Installing default data...\")\n charlatan_manager = charlatan.FixturesManager(db_session=db.session, use_unicode=True)\n charlatan_manager.load('data/countries.yaml', models_package='cavavin.models')\n charlatan_manager.load('data/users.yaml', models_package='cavavin.models')\n charlatan_manager.install_all_fixtures()",
"def bootstrap():\n Base.metadata.create_all(engine)",
"def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)",
"def init_data_base_installation(path):\n\n conn = sqlite3.connect(path)\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS Installations\")\n c.execute('''CREATE TABLE Installations\n (\n numInstall TEXT, nameTown TEXT,\n INSEE TEXT,zipCode TEXT,placeCalled TEXT,\n numStreet TEXT,nameStreet TEXT,\n latitude REAL,longitude REAL,noAccessArrang TEXT,\n accesReducMobi TEXT,accessSensHand TEXT,\n sizeInM2 TEXT,caretakerAndHousing TEXT,\n multiTown TEXT,numberPlaceParking TEXT,\n numberPlaceParkingdHand TEXT,installParticular TEXT,\n servSubway TEXT,servBus TEXT,servTram TEXT,\n servTrain TEXT,servBoat TEXT,servOther TEXT,\n numberEquip TEXT,installUpd TEXT\n )''')\n\n conn.commit()\n conn.close()",
"def setup(self):\n #print \"Creating test database...\"\n files = glob.glob(os.path.join(self.home_dir, 'sqlFiles', '*.sql'))\n for fls in files:\n loc = fls.rfind('/')\n #print(\" \" + fls.replace('.sql', '')[loc + 1:])\n flh = open(fls, 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()\n for fls in ['INSERTS', 'TRIGGERS']:\n #print(fls)\n flh = open(os.path.join(self.home_dir, 'sqlFiles', fls), 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()",
"def mysql_system_database(run_services, mysql_data_dir, mysql_defaults_file, memory_temp_dir, lock_dir, services_log):\n if run_services:\n mysql_install_db = find_executable('mysql_install_db')\n assert mysql_install_db, 'You have to install mysql_install_db script.'\n\n my_print_defaults = find_executable('my_print_defaults')\n assert my_print_defaults, 'You have to install my_print_defaults script.'\n\n mysql_basedir = os.path.dirname(os.path.dirname(os.path.realpath(my_print_defaults)))\n\n try:\n services_log.debug('Starting mysql_install_db.')\n check_output([\n mysql_install_db,\n '--defaults-file={0}'.format(mysql_defaults_file),\n '--datadir={0}'.format(mysql_data_dir),\n '--basedir={0}'.format(mysql_basedir),\n '--user={0}'.format(os.environ['USER'])\n ])\n except CalledProcessWithOutputError as e:\n services_log.error(\n '{e.cmd} failed with output:\\n{e.output}\\nand erorr:\\n{e.err}. '\n 'Please ensure you disabled apparmor for /run/shm/** or for whole mysql'.format(e=e))\n raise\n finally:\n services_log.debug('mysql_install_db was executed.')",
"def initialise_database():\n with cd(code_dir):\n run(python_add_str + \"python manage.py syncdb --all\")\n run(python_add_str + \"python manage.py migrate --fake\")",
"def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')",
"def initdb_command():\n db.drop_all()\n db.create_all()\n if LOAD_DUMMY_DATA:\n setup_dummy_data()\n\n print('Initialized the database.')"
]
| [
"0.643989",
"0.62512773",
"0.6188407",
"0.5927646",
"0.5920215",
"0.5827072",
"0.57705396",
"0.5708299",
"0.56744295",
"0.56740564",
"0.5666191",
"0.56248426",
"0.56166804",
"0.56147426",
"0.5608201",
"0.5588403",
"0.55595106",
"0.55533975",
"0.5551925",
"0.55481184",
"0.5547758",
"0.55459434",
"0.55213255",
"0.55185723",
"0.550667",
"0.54977936",
"0.5494855",
"0.54851186",
"0.5482077",
"0.5475886"
]
| 0.6979793 | 0 |
Install datasets into postgres. | def install_postgres(dataset,
user='postgres',
password='',
host='localhost',
port=5432,
database='postgres',
database_name='{db}',
table_name='{db}.{table}',
bbox=[],
debug=False,
use_cache=True,
force=False,
hash_value=None):
args = {
'command': 'install',
'database': database,
'database_name': database_name,
'engine': 'postgres',
'dataset': dataset,
'host': host,
'port': port,
'password': password,
'table_name': table_name,
'user': user,
'bbox': bbox,
'force': force,
'hash_value': hash_value
}
return _install(args, use_cache, debug) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def task_installTestData(self):\n if env.get('environment') == 'production':\n abort(\"Don't use installTestData in production.\")\n\n if postgres.tableExists('trac', 'system'):\n abort(\"Existing Trac tables found.\")\n\n with settings(user=self.serviceUser):\n # Run trac initenv to create the postgresql database tables, but use\n # a throwaway trac-env directory because that comes from\n # https://github.com/twisted-infra/trac-config/tree/master/trac-env\n try:\n run('~/virtualenv/bin/trac-admin '\n '/tmp/trac-init initenv TempTrac postgres://@/trac git \"\"')\n finally:\n run(\"rm -rf /tmp/trac-init\")\n\n # Run an upgrade to add plugin specific database tables and columns.\n run('~/virtualenv/bin/trac-admin config/trac-env upgrade --no-backup')",
"def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)",
"def install():\n # update apt index\n deb.update_index(quiet=False)\n\n print(green('Installing PostgreSQL and its development packages.'))\n utils.deb.install('postgresql')\n utils.deb.install('postgresql-contrib')\n utils.deb.install('libpq-dev')",
"def setup_tap_postgres(self):\n db_script = os.path.join(DIR, '..', '..', 'db', 'tap_postgres_db.sh')\n self._run_command(db_script)",
"def install(drop_all=False):\n\n print \"Connecting to database...\"\n engine = create_engine(config.DATABASE_URI)\n Base.metadata.bind = engine\n\n if drop_all:\n print \"Dropping old tables...\"\n Base.metadata.drop_all()\n \n print \"Installing new schema...\"\n Base.metadata.create_all(engine)",
"def init_database(db: sa.engine.Connectable):\n\n # setup the Postgres extensions and schema\n db.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\" WITH SCHEMA public;\n \"\"\")\n db.execute(\n ';\\n'.join(\n 'CREATE SCHEMA IF NOT EXISTS {}'.format(s) for s in SCHEMAS.values()\n )\n )\n\n # create the schema from the models\n METADATA.create_all(bind=db)",
"def setup_target_postgres(self):\n self.run_query_target_postgres('CREATE EXTENSION IF NOT EXISTS pgcrypto')\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres CASCADE'\n )\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_public2 CASCADE'\n )\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_logical1 CASCADE'\n )\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_logical2 CASCADE'\n )\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_mysql CASCADE'\n )\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_mysql_2 CASCADE'\n )\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_s3_csv CASCADE'\n )\n self.run_query_target_postgres(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_mongodb CASCADE'\n )\n\n # Clean config directory\n shutil.rmtree(os.path.join(CONFIG_DIR, 'postgres_dwh'), ignore_errors=True)",
"def bootstrap():\n Base.metadata.create_all(engine)",
"def insert_db():\n populate_tables()",
"def _install(args, use_cache, debug):\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n if args['dataset'].endswith('.zip') or args.get('hash_value'):\n path_to_archive = args['dataset']\n if args.get('hash_value'):\n path_to_archive = os.path.join(\n PROVENANCE_DIR, args['dataset'],\n '{}-{}.zip'.format(args['dataset'], args['hash_value']))\n if not os.path.exists(path_to_archive):\n print('The committed file does not exist.')\n engine = install_committed(path_to_archive,\n engine,\n force=args.get('force', False))\n return engine\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n print(\"=> Installing\", data_sets_script.name)\n try:\n if engine.name == \"HDF5\":\n sqlite_opts = {\n 'command': 'install',\n 'dataset': data_sets_script,\n 'engine': 'sqlite',\n 'file': (args[\"file\"].split(\".\"))[0] + \".db\",\n 'table_name': args[\"table_name\"],\n 'data_dir': args[\"data_dir\"]\n }\n sqlite_engine = choose_engine(sqlite_opts)\n data_sets_script.download(sqlite_engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n elif args['dataset'].startswith('socrata') and not data_sets_scripts:\n socrata_id = args['dataset'].split('-', 1)[1]\n resource = find_socrata_dataset_by_id(socrata_id)\n\n if \"error\" in resource.keys():\n if resource[\"datatype\"][0] == \"map\":\n print(\"{} because map type datasets are not supported\".format(\n resource[\"error\"]))\n else:\n print(\"{} because it is of type {} and not tabular\".format(\n resource[\"error\"], resource[\"datatype\"][1]))\n elif len(resource.keys()) == 0:\n return\n else:\n print(\"=> Installing\", args['dataset'])\n name = f\"socrata-{socrata_id}\"\n create_socrata_dataset(engine, name, resource)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n elif args['dataset'].startswith('rdataset') and not data_sets_scripts:\n print(\"=> Installing\", args['dataset'])\n rdataset = args['dataset'].split('-')\n update_rdataset_catalog()\n package, dataset_name = rdataset[1], rdataset[2]\n create_rdataset(engine, package, dataset_name)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n else:\n message = \"Run retriever.datasets() to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine",
"def post_install_postgresql():\n from django.conf import settings as s\n with settings(warn_only=True):\n sudo('/etc/init.d/postgresql-8.4 restart')\n sudo(\"psql template1 -c \\\"ALTER USER postgres \" \\\n \"with encrypted password '%s';\\\" \" % env.password,\n user='postgres')\n sudo(\"psql -f /usr/share/postgresql/8.4/contrib/adminpack.sql\",\n user='postgres')\n if (s.DATABASES['default']['ENGINE'] ==\n 'django.db.backends.postgresql_psycopg2'):\n sudo(\"psql template1 -c \\\"CREATE ROLE %s LOGIN with \" \\\n \"encrypted password '%s';\\\" \" % (\n s.DATABASES['default']['USER'],\n s.DATABASES['default']['PASSWORD'],\n ),\n user='postgres')\n sudo('createdb -T template0 -O %s %s' % (\n s.DATABASES['default']['USER'],\n s.DATABASES['default']['NAME'],\n ),\n user='postgres')\n\n print \"* setup postgres user password with your \" \\\n \"'%s' password\" % env.user\n print \"* imported the adminpack\"\n print \"Post install setup of Postgresql complete!\"",
"def setup_db(self):\n\n with cd(\"/var/lib/postgresql\"):\n with settings(warn_only=True):\n sudo(\"psql -c \\\"CREATE USER {0} WITH PASSWORD '{1}';\\\"\".format(config.get(\"postgres_user\", self.app_name), config.get(\"postgres_password\", self.app_name)), user=\"postgres\")\n sudo(\"createdb {0}\".format(self.db_name), user=\"postgres\")\n sudo(\"psql -c \\\"GRANT ALL PRIVILEGES ON DATABASE {0} TO {1};\\\"\".format(self.db_name, config.get(\"postgres_user\", self.app_name)), user=\"postgres\")",
"def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"[email protected]\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)",
"def install(engine):\n sql = _postgres_install(engine) if \"postgres\" in engine.name else _sqlite_install()\n execute_statements(engine, sql)",
"def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()",
"def install(clean=False):\n\n # Drop tables\n if clean:\n app.logger.info(\"Dropping all tables...\")\n from utils import drop_all_tables\n drop_all_tables(app)\n\n # Create tables\n app.logger.info(\"Creating all tables...\")\n import cavavin.models # NOQA: to register all Model\n db.create_all()\n\n # Import fixture data\n app.logger.info(\"Installing default data...\")\n charlatan_manager = charlatan.FixturesManager(db_session=db.session, use_unicode=True)\n charlatan_manager.load('data/countries.yaml', models_package='cavavin.models')\n charlatan_manager.load('data/users.yaml', models_package='cavavin.models')\n charlatan_manager.install_all_fixtures()",
"def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()",
"def _init_db():\n c = ppc.app().config['PUBLICPRIZE']['DATABASE']\n e = os.environ.copy()\n e['PGPASSWORD'] = c['postgres_pass']\n subprocess.call(\n ['createuser', '--host=' + c['host'], '--user=postgres',\n '--no-superuser', '--no-createdb', '--no-createrole', c['user']],\n env=e)\n p = subprocess.Popen(\n ['psql', '--host=' + c['host'], '--user=postgres', 'template1'],\n env=e,\n stdin=subprocess.PIPE)\n s = u\"ALTER USER {user} WITH PASSWORD '{password}'\".format(**c)\n enc = locale.getlocale()[1]\n loc = locale.setlocale(locale.LC_ALL)\n p.communicate(input=bytes(s, enc))\n subprocess.check_call(\n ['createdb', '--host=' + c['host'], '--encoding=' + enc,\n '--locale=' + loc, '--user=postgres',\n '--template=template0',\n '--owner=' + c['user'], c['name']],\n env=e)",
"def install(self):\n conn = sqlite3.connect(self.__DB)\n cursor = conn.cursor()\n\n # creating tables...\n\n cursor.execute('''\n CREATE TABLE users (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n agency TEXT NOT NULL,\n account TEXT NOT NULL,\n password TEXT NOT NULL,\n balance REAL NOT NULL\n );\n ''')\n\n cursor.execute('''\n CREATE TABLE history (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n register TEXT NOT NULL,\n owner INTEGER NOT NULL\n );\n ''')\n\n # inserting a few users by default (there isn't 'sign up' requirement for this app)...\n\n hasher = User('', '', '')\n users_data = [\n ('A1', '00000-0', hasher.str_to_hash('pass0'), 1500),\n ('A1', '11111-1', hasher.str_to_hash('pass1'), 400),\n ('A2', '22222-2', hasher.str_to_hash('pass2'), 260),\n ('A3', '33333-3', hasher.str_to_hash('pass3'), 380),\n ('A2', '44444-4', hasher.str_to_hash('pass4'), 240),\n ]\n\n cursor.executemany('''\n INSERT INTO users (agency, account, password, balance)\n VALUES (?, ?, ?, ?);\n ''', users_data)\n\n conn.commit()\n conn.close()\n\n self.load_users()",
"def init_new_db(args):\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n session = Session()\n session.add(Environment(name='normal', slickurl='http://slicker.homestead-corp.com/slickij', buildurl='?', filename='hs-tcrunij.tar.gz', tcrunijsubdir='hs-tcrunij/tcrunij'))\n session.add(Environment(name='dev', slickurl='http://octomom.homestead-corp.com/slickij', buildurl='?', filename='tcrunij.tar.gz', tcrunijsubdir='tcrunij/tcrunij'))\n session.commit()",
"def init_data_base_installation(path):\n\n conn = sqlite3.connect(path)\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS Installations\")\n c.execute('''CREATE TABLE Installations\n (\n numInstall TEXT, nameTown TEXT,\n INSEE TEXT,zipCode TEXT,placeCalled TEXT,\n numStreet TEXT,nameStreet TEXT,\n latitude REAL,longitude REAL,noAccessArrang TEXT,\n accesReducMobi TEXT,accessSensHand TEXT,\n sizeInM2 TEXT,caretakerAndHousing TEXT,\n multiTown TEXT,numberPlaceParking TEXT,\n numberPlaceParkingdHand TEXT,installParticular TEXT,\n servSubway TEXT,servBus TEXT,servTram TEXT,\n servTrain TEXT,servBoat TEXT,servOther TEXT,\n numberEquip TEXT,installUpd TEXT\n )''')\n\n conn.commit()\n conn.close()",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['STAGE'].values()))\n cur = conn.cursor()\n \n #remove the existing tables\n drop_tables(cur, conn)\n \n #replace the tables with new ones\n create_tables(cur, conn)\n \n #add missing postcode value into table\n default_missing_values(cur, conn)\n \n conn.close()",
"def create_db():\n init_postgres(current_app.config['SQLALCHEMY_DATABASE_URI'])",
"def setup_database(self):\n self.db.setup_database()",
"def setup_app(command, conf, vars):\n load_environment(conf.global_conf, conf.local_conf)\n\n # Create the tables if they don't already exist\n # meta.metadata.drop_all(bind=meta.engine, checkfirst=True)\n meta.metadata.create_all(bind=meta.engine)",
"def init_db(self):\n if self.is_client() or not self.is_responsible_validator():\n return\n\n ip, _ = self.experiment.get_peer_ip_port_by_id(self.my_id)\n\n self.db_path = os.path.join(\"/tmp\", \"postgres-data\", ip)\n shutil.rmtree(self.db_path, ignore_errors=True)\n os.makedirs(self.db_path, exist_ok=True)\n\n os.system(\"/usr/lib/postgresql/11/bin/initdb %s > postgres.out\" % self.db_path)",
"def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)",
"def initialise_database():\n with cd(code_dir):\n run(python_add_str + \"python manage.py syncdb --all\")\n run(python_add_str + \"python manage.py migrate --fake\")",
"def setup(self):\n self.load_connection_info(self.ini_filename)\n if self.conn_info:\n self.logger.info('Load connection info of Postgres')\n\n psql_connection_info = f\"dbname={self.conn_info['dbname']} \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\" \n \n check_db = self.create_db(psql_connection_info)\n\n connection = psycopg2.connect((\n f\"dbname=password_manager \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\")) \n cursor = connection.cursor()\n\n if check_db:\n self.logger.info('Database has been created')\n\n check_tables = self.create_tables(connection, \n cursor, \n self.sql_query_table_person, \n self.sql_query_table_login_data)\n \n if check_tables:\n self.logger.info('Tables have been created')\n else:\n self.logger.info('Tables do not exist')\n else:\n self.logger.info('Database does not exist')\n \n connection.close()\n cursor.close()\n else:\n self.logger.info('Connection to Postgres could not esablished')",
"def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()"
]
| [
"0.67101014",
"0.65121603",
"0.6294656",
"0.62906325",
"0.6240764",
"0.6184617",
"0.6175147",
"0.60847354",
"0.6074436",
"0.60625094",
"0.6056598",
"0.6044377",
"0.6042259",
"0.6034427",
"0.6003979",
"0.59687144",
"0.59366",
"0.5833422",
"0.5800394",
"0.5787577",
"0.575593",
"0.57428503",
"0.5742408",
"0.57389003",
"0.5727066",
"0.57270205",
"0.5707131",
"0.5686782",
"0.5681013",
"0.5665501"
]
| 0.7324537 | 0 |
Install datasets into msaccess. | def install_msaccess(dataset,
file='access.mdb',
table_name='[{db} {table}]',
data_dir=DATA_DIR,
debug=False,
use_cache=True,
force=False,
hash_value=None):
args = {
'command': 'install',
'dataset': dataset,
'engine': 'msaccess',
'file': file,
'table_name': table_name,
'data_dir': data_dir,
'force': force,
'hash_value': hash_value
}
return _install(args, use_cache, debug) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _install(args, use_cache, debug):\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n if args['dataset'].endswith('.zip') or args.get('hash_value'):\n path_to_archive = args['dataset']\n if args.get('hash_value'):\n path_to_archive = os.path.join(\n PROVENANCE_DIR, args['dataset'],\n '{}-{}.zip'.format(args['dataset'], args['hash_value']))\n if not os.path.exists(path_to_archive):\n print('The committed file does not exist.')\n engine = install_committed(path_to_archive,\n engine,\n force=args.get('force', False))\n return engine\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n print(\"=> Installing\", data_sets_script.name)\n try:\n if engine.name == \"HDF5\":\n sqlite_opts = {\n 'command': 'install',\n 'dataset': data_sets_script,\n 'engine': 'sqlite',\n 'file': (args[\"file\"].split(\".\"))[0] + \".db\",\n 'table_name': args[\"table_name\"],\n 'data_dir': args[\"data_dir\"]\n }\n sqlite_engine = choose_engine(sqlite_opts)\n data_sets_script.download(sqlite_engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n elif args['dataset'].startswith('socrata') and not data_sets_scripts:\n socrata_id = args['dataset'].split('-', 1)[1]\n resource = find_socrata_dataset_by_id(socrata_id)\n\n if \"error\" in resource.keys():\n if resource[\"datatype\"][0] == \"map\":\n print(\"{} because map type datasets are not supported\".format(\n resource[\"error\"]))\n else:\n print(\"{} because it is of type {} and not tabular\".format(\n resource[\"error\"], resource[\"datatype\"][1]))\n elif len(resource.keys()) == 0:\n return\n else:\n print(\"=> Installing\", args['dataset'])\n name = f\"socrata-{socrata_id}\"\n create_socrata_dataset(engine, name, resource)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n elif args['dataset'].startswith('rdataset') and not data_sets_scripts:\n print(\"=> Installing\", args['dataset'])\n rdataset = args['dataset'].split('-')\n update_rdataset_catalog()\n package, dataset_name = rdataset[1], rdataset[2]\n create_rdataset(engine, package, dataset_name)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n else:\n message = \"Run retriever.datasets() to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine",
"def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')",
"def setup(self, ds):\n pass",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)",
"def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()",
"def set_data(db_dir, command, args = None):\n #print command\n with lite.connect((db_dir)) as conn:\n #try:\n cursor = conn.cursor()\n if args:\n cursor.execute(command,args)\n else:\n cursor.execute(command)\n conn.commit()\n #print '[sql management] set successfully the data according to:\\n--- %s ---'%(command )\n return True\n #except:\n return False\n return False",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')",
"def task_installTestData(self):\n if env.get('environment') == 'production':\n abort(\"Don't use installTestData in production.\")\n\n if postgres.tableExists('trac', 'system'):\n abort(\"Existing Trac tables found.\")\n\n with settings(user=self.serviceUser):\n # Run trac initenv to create the postgresql database tables, but use\n # a throwaway trac-env directory because that comes from\n # https://github.com/twisted-infra/trac-config/tree/master/trac-env\n try:\n run('~/virtualenv/bin/trac-admin '\n '/tmp/trac-init initenv TempTrac postgres://@/trac git \"\"')\n finally:\n run(\"rm -rf /tmp/trac-init\")\n\n # Run an upgrade to add plugin specific database tables and columns.\n run('~/virtualenv/bin/trac-admin config/trac-env upgrade --no-backup')",
"def setup(self):\n return self.setupDatabases()",
"def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)",
"def _install(self):\n\n pass",
"def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()",
"def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)",
"def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)",
"def setup():\n\tglobal conn_old\n\tconn_old = pdatab.connect_permissions_db(ip=\"localhost\", port=3306, db=\"pcomp_srv_sb\")\n\tglobal conn_new\n\tconn_new = pdatab.connect_permissions_db(ip=\"localhost\", port=3306, db=\"pcomp_srv\")",
"def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)",
"def install(clean=False):\n\n # Drop tables\n if clean:\n app.logger.info(\"Dropping all tables...\")\n from utils import drop_all_tables\n drop_all_tables(app)\n\n # Create tables\n app.logger.info(\"Creating all tables...\")\n import cavavin.models # NOQA: to register all Model\n db.create_all()\n\n # Import fixture data\n app.logger.info(\"Installing default data...\")\n charlatan_manager = charlatan.FixturesManager(db_session=db.session, use_unicode=True)\n charlatan_manager.load('data/countries.yaml', models_package='cavavin.models')\n charlatan_manager.load('data/users.yaml', models_package='cavavin.models')\n charlatan_manager.install_all_fixtures()",
"def init_data_base_installation(path):\n\n conn = sqlite3.connect(path)\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS Installations\")\n c.execute('''CREATE TABLE Installations\n (\n numInstall TEXT, nameTown TEXT,\n INSEE TEXT,zipCode TEXT,placeCalled TEXT,\n numStreet TEXT,nameStreet TEXT,\n latitude REAL,longitude REAL,noAccessArrang TEXT,\n accesReducMobi TEXT,accessSensHand TEXT,\n sizeInM2 TEXT,caretakerAndHousing TEXT,\n multiTown TEXT,numberPlaceParking TEXT,\n numberPlaceParkingdHand TEXT,installParticular TEXT,\n servSubway TEXT,servBus TEXT,servTram TEXT,\n servTrain TEXT,servBoat TEXT,servOther TEXT,\n numberEquip TEXT,installUpd TEXT\n )''')\n\n conn.commit()\n conn.close()",
"def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)",
"def load_sba_datasets(dbm, direc):\n foia_504_1991_present = pd.read_excel(direc + 'FOIA - 504 (FY1991-Present).xlsx')\n foia_7a_1991_1999 = pd.read_excel(direc + 'FOIA - 7(a) (FY1991-FY1999).xlsx', skiprows=1)\n foia_7a_2000_2009 = pd.read_excel(direc + 'FOIA - 7(a)(FY2000-FY2009).xlsx', skiprows=1)\n foia_7a_2010_present = pd.read_excel(direc + 'FOIA - 7(a) (FY2010-Present).xlsx')\n\n dbm.write_df_table(\n foia_504_1991_present, table_name='sba__foia_504_1991_present', schema='data_ingest')\n dbm.write_df_table(\n foia_7a_1991_1999, table_name='sba__foia_7a_1991_1999', schema='data_ingest')\n dbm.write_df_table(\n foia_7a_2000_2009, table_name='sba__foia_7a_2000_2009', schema='data_ingest')\n dbm.write_df_table(\n foia_7a_2010_present, table_name='sba__foia_7a_2010_present', schema='data_ingest')",
"def cmd_install(self, wwwdata):\n server_dir = os.path.join(data_dir, 'server')\n shutil.copytree(server_dir, os.path.join(wwwdata, 'templeton'))\n return 0",
"def install():\n execute(generate)\n execute(upload)",
"def InstallPandasTools():\n global _originalSettings\n if len(_originalSettings) == 0:\n _originalSettings['Chem.Mol.__ge__'] = Chem.Mol.__ge__\n _originalSettings['Chem.Mol.__str__'] = Chem.Mol.__str__\n rdchem.Mol.__ge__ = _molge\n rdchem.Mol.__str__ = PrintAsBase64PNGString",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def makecldf(args):\n with_dataset(args, Dataset._install)",
"def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()",
"def install(self, clientDataDir, suppressPackageContentFileGeneration=False):\n\t\tself.setClientDataDir(clientDataDir)\n\t\tself.getMetaData()\n\t\tself.runPreinst()\n\t\tself.extractData()\n\t\tif not suppressPackageContentFileGeneration:\n\t\t\tself.createPackageContentFile()\n\t\tself.setAccessRights()\n\t\tself.runPostinst()\n\t\tself.cleanup()",
"def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)"
]
| [
"0.5937385",
"0.58411247",
"0.5744515",
"0.5672274",
"0.56464726",
"0.56296736",
"0.5511215",
"0.54886913",
"0.54392475",
"0.54213166",
"0.5409287",
"0.5405523",
"0.53193307",
"0.5306803",
"0.5306803",
"0.5288545",
"0.5281527",
"0.5243119",
"0.52429676",
"0.5222793",
"0.5219687",
"0.5195806",
"0.5174431",
"0.5170653",
"0.5139506",
"0.5139506",
"0.5128672",
"0.5116169",
"0.5081987",
"0.5076374"
]
| 0.6459073 | 0 |
Install datasets into json. | def install_json(dataset,
table_name='{db}_{table}.json',
data_dir=DATA_DIR,
debug=False,
use_cache=True,
pretty=False,
force=False,
hash_value=None):
args = {
'command': 'install',
'dataset': dataset,
'engine': 'json',
'table_name': table_name,
'data_dir': data_dir,
'pretty': pretty,
'force': force,
'hash_value': hash_value
}
return _install(args, use_cache, debug) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')",
"def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')",
"def main(json_data_filepath='dataset.json',\n dataset_path='.',\n filename_base='dataset',\n drop_irrelevant_tweets=True,\n add_company_datasets=False,\n logging_level=log.INFO,\n ):\n log.basicConfig(level=logging_level, format='%(message)s')\n log.info(f'building the dataset')\n\n if not os.path.isfile(json_data_filepath):\n log.fatal(f'\\tfilepath doesn\\'t exist: {json_data_filepath}')\n exit(-1)\n\n full_dataset_filepath = Path(dataset_path) / f'{filename_base}.csv'\n remove_filepath_if_exists(full_dataset_filepath)\n\n create_dataset(Path(json_data_filepath), full_dataset_filepath, drop_irrelevant_tweets)\n\n if add_company_datasets:\n create_separate_company_datasets(full_dataset_filepath,\n Path(dataset_path),\n filename_base)",
"def create_dataset():\n with open(\"/root/config.json\", \"r\") as f:\n config = json.load(f)\n\n # create environmental variables\n for (key, value) in config.items():\n os.environ[key] = str(value)\n\n # run blender\n command = '/usr/lib/blender/blender {} --python {} --background'.\\\n format(\"/root/models/default.blend\", \"/root/rendering.py\")\n os.system(command)\n\n # post processing\n post_processing()",
"def setup():\n for dir_path in [train_dir, output_dir]:\n Path(dir_path).mkdir(exist_ok=True)\n\n # create the training and test data files that we will use\n create_jsonlines_feature_files(train_dir)",
"def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)",
"def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")",
"def create_data_base():\n\n\tscript_files = []\n\tjson_files = []\n\t\n\t# get script files list\n\tfor file in os.listdir(\"learned_objects_scripts/\"):\n\t\tif file.endswith(\".script\"):\n\t\t\tscript_files.append(file)\n\n\t# get json files list\n\tfor file in os.listdir(\"object_models/\"):\n\t\tif file.endswith(\".json\"):\n\t\t\tjson_files.append(file)\n\t\n\t# create json file for new objects\n\tmodel_created = False\n\tfor file in script_files:\n\t\tif \"{}.json\".format(file[:-7]) not in json_files:\n\t\t\twith open(\"object_models/{}.json\".format(file[:-7]), 'w') as outfile:\n\t\t\t\tobj_model = object_script_to_model(\"learned_objects_scripts/\" + file)\n\t\t\t\tjson.dump(obj_model, outfile)\n\t\t\t\tmodel_created = True\n\t\t\t\tprint(\"model created for\", file)\n\tif not model_created:\n\t\tprint(\"data base is already up to date\")",
"def datasets(self):\n pass",
"def handle(self, args):\n\n # Ensure there is an uploads directory\n if not os.path.exists(args.uploads) or not os.path.isdir(args.uploads):\n if args.force:\n os.makedirs(args.uploads)\n else:\n raise ConsoleError(\n \"no uploads directory at '{}' use -f to create\".format(args.uploads)\n )\n\n for dataset in args.datasets:\n # Remove the trailing slash\n dataset = dataset.rstrip(os.path.sep)\n\n # Check if the dataset is valid\n if not args.force and not is_valid(dataset, False):\n print(color.format(\"cannot package invalid dataset at {}\", color.LIGHT_RED, dataset))\n continue\n\n name = os.path.basename(dataset)\n out = os.path.join(args.uploads, name)\n\n if not args.force and os.path.exists(out+\".zip\"):\n print(color.format(\"dataset exists at {} use -f to overwrite\", color.LIGHT_YELLOW, out))\n continue\n\n try:\n out = shutil.make_archive(out, \"zip\", root_dir=os.path.dirname(dataset), base_dir=name)\n self.update_manifest(out)\n except Exception as e:\n print(color.format(\"could not package dataset {} at {}: {}\", color.LIGHT_RED, dataset, out.rstrip(\".zip\")+\".zip\", e))\n continue\n\n print(color.format(\"packaged dataset at {}\", color.LIGHT_GREEN, out))",
"def add_packages_to_json(filepath):\n with open(filepath, \"r\") as f:\n data = json.load(f)\n data[\"package_versions\"] = {}\n data[\"package_versions\"][\"scipy\"] = pkg_version(\"scipy\")\n data[\"package_versions\"][\"numpy\"] = pkg_version(\"numpy\")\n data[\"package_versions\"][\"cython\"] = pkg_version(\"cython\")\n data[\"package_versions\"][\"qutip\"] = pkg_version(\"qutip\")\n data[\"package_versions\"][\"pytest\"] = pkg_version(\"pytest\")\n with open(filepath, \"w\") as f:\n json.dump(data, f, indent=4, separators=(\",\", \": \"))\n return data",
"def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()",
"def install_for_spec(self):\n self.create_package_json()\n os.system('npm install json-refs')\n os.system('npm install json2yaml')\n os.system('npm install yamljs')\n os.system('npm install swagger-split') # package only required while splitting hence being installed here\n self.delete_package_json()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def load_data(self, dataset, dataset_name):\n with open(dataset, \"r\", encoding=\"utf-8\") as f:\n self.data = json.load(f)\n self.dataset_name = dataset_name",
"def parse_json_data(settings, dataset):\n for directory in dataset: # for directory in list of directories\n directory[\"data\"] = []\n for record in directory[\"rawdata\"]: # each record is the raw JSON data of a file in a directory\n jsonrootpath = get_json_root_path(record)\n globaloptions = get_json_global_options(record)\n #for item in record[\"client_stats\"]:\n # if \"job options\" in item.keys():\n # print(item[\"job options\"][\"iodepth\"])\n process_json_record(settings, directory, record, jsonrootpath, globaloptions)\n #print(\"================================\")\n #print(directory[\"data\"])\n #for directory in dataset:\n # for item in directory[\"data\"]:\n # print(item[\"iodepth\"])\n directory[\"data\"] = sort_list_of_dictionaries(directory[\"data\"])\n return dataset",
"def import_data(self, keyname, data):\n return self.database.jsonset(keyname, Path.rootPath(), data)",
"def dataset_initialize(self, folder):\r\n if not os.path.isdir(folder):\r\n raise ValueError('Invalid folder: ' + folder)\r\n\r\n ref = self.config_values[self.CONFIG_NAME_USER] + '/INSERT_SLUG_HERE'\r\n licenses = []\r\n default_license = {'name': 'CC0-1.0'}\r\n licenses.append(default_license)\r\n\r\n meta_data = {\r\n 'title': 'INSERT_TITLE_HERE',\r\n 'id': ref,\r\n 'licenses': licenses\r\n }\r\n meta_file = os.path.join(folder, self.DATASET_METADATA_FILE)\r\n with open(meta_file, 'w') as f:\r\n json.dump(meta_data, f, indent=2)\r\n\r\n print('Data package template written to: ' + meta_file)\r\n return meta_file",
"def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))",
"def new_dataset(args):\n if not args.args:\n raise ParserError('you must specify an existing directory')\n outdir = Path(args.args.pop(0))\n if not outdir.exists():\n raise ParserError('you must specify an existing directory')\n\n id_pattern = re.compile('[a-z_0-9]+$')\n md = {}\n if args.args:\n md['id'] = args.args.pop(0)\n else:\n md['id'] = input('Dataset ID: ')\n\n while not id_pattern.match(md['id']):\n print('dataset id must only consist of lowercase ascii letters, digits and _ (underscore)!')\n md['id'] = input('Dataset ID: ')\n\n outdir = outdir / md['id']\n if not outdir.exists():\n outdir.mkdir()\n\n for key in ['title', 'url', 'license', 'conceptlist', 'citation']:\n md[key] = input('Dataset {0}: '.format(key))\n\n # check license!\n # check conceptlist!\n\n for path in Path(pylexibank.__file__).parent.joinpath('dataset_template').iterdir():\n if path.is_file():\n if path.suffix in ['.pyc']:\n continue # pragma: no cover\n target = path.name\n content = read_text(path)\n if '+' in path.name:\n target = re.sub(\n '\\+([a-z]+)\\+',\n lambda m: '{' + m.groups()[0] + '}',\n path.name\n ).format(**md)\n if target.endswith('_tmpl'):\n target = target[:-5]\n content = content.format(**md)\n write_text(outdir / target, content)\n else:\n target = outdir / path.name\n if target.exists():\n shutil.rmtree(str(target))\n shutil.copytree(str(path), str(target))\n del md['id']\n jsonlib.dump(md, outdir / 'metadata.json', indent=4)",
"def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')",
"def set_data(self):\n data = [\n {\"ASN1P-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"ASN1P\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n {\"asn1rt-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"asn1rt\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n {\"cadessignature-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"cadessignature\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n { \"JCP-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"JCP\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n { \"JCPRequest-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"JCPRequest\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }}\n ]\n return data",
"def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))",
"def _install(args, use_cache, debug):\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n if args['dataset'].endswith('.zip') or args.get('hash_value'):\n path_to_archive = args['dataset']\n if args.get('hash_value'):\n path_to_archive = os.path.join(\n PROVENANCE_DIR, args['dataset'],\n '{}-{}.zip'.format(args['dataset'], args['hash_value']))\n if not os.path.exists(path_to_archive):\n print('The committed file does not exist.')\n engine = install_committed(path_to_archive,\n engine,\n force=args.get('force', False))\n return engine\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n print(\"=> Installing\", data_sets_script.name)\n try:\n if engine.name == \"HDF5\":\n sqlite_opts = {\n 'command': 'install',\n 'dataset': data_sets_script,\n 'engine': 'sqlite',\n 'file': (args[\"file\"].split(\".\"))[0] + \".db\",\n 'table_name': args[\"table_name\"],\n 'data_dir': args[\"data_dir\"]\n }\n sqlite_engine = choose_engine(sqlite_opts)\n data_sets_script.download(sqlite_engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n elif args['dataset'].startswith('socrata') and not data_sets_scripts:\n socrata_id = args['dataset'].split('-', 1)[1]\n resource = find_socrata_dataset_by_id(socrata_id)\n\n if \"error\" in resource.keys():\n if resource[\"datatype\"][0] == \"map\":\n print(\"{} because map type datasets are not supported\".format(\n resource[\"error\"]))\n else:\n print(\"{} because it is of type {} and not tabular\".format(\n resource[\"error\"], resource[\"datatype\"][1]))\n elif len(resource.keys()) == 0:\n return\n else:\n print(\"=> Installing\", args['dataset'])\n name = f\"socrata-{socrata_id}\"\n create_socrata_dataset(engine, name, resource)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n elif args['dataset'].startswith('rdataset') and not data_sets_scripts:\n print(\"=> Installing\", args['dataset'])\n rdataset = args['dataset'].split('-')\n update_rdataset_catalog()\n package, dataset_name = rdataset[1], rdataset[2]\n create_rdataset(engine, package, dataset_name)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n else:\n message = \"Run retriever.datasets() to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine",
"def datasets(self, datasets):\n\n self._datasets = datasets",
"def update_from_json(self, json_string):\n parsed = json.loads(json_string)\n self.add_packages(parsed.pop('packages', []))\n self.data(parsed)"
]
| [
"0.6195561",
"0.6150888",
"0.61424214",
"0.60152555",
"0.5971217",
"0.59472746",
"0.5840958",
"0.58245456",
"0.58092654",
"0.58033806",
"0.57662797",
"0.5756459",
"0.5742945",
"0.57381874",
"0.5732792",
"0.5702144",
"0.5700371",
"0.5700371",
"0.5693736",
"0.56893134",
"0.5635628",
"0.5605677",
"0.5590064",
"0.55886",
"0.5579042",
"0.55509025",
"0.55432314",
"0.55271393",
"0.5518702",
"0.5515259"
]
| 0.6503668 | 0 |
Install datasets into hdf5. | def install_hdf5(dataset,
file='hdf5.h5',
table_name='{db}_{table}',
data_dir=DATA_DIR,
debug=False,
use_cache=True,
hash_value=None):
args = {
'command': 'install',
'dataset': dataset,
'engine': 'hdf5',
'file': file,
'table_name': table_name,
'data_dir': data_dir,
'hash_value': hash_value
}
return _install(args, use_cache, debug) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')",
"def setup(self, ds):\n pass",
"def init_hdf5_file(datasets, init_start, init_end, hdf5_file):\n schema = tokio.connectors.hdf5.SCHEMA.get(SCHEMA_VERSION)\n for dataset_name, dataset in datasets.items():\n hdf5_dataset_name = schema.get(dataset_name)\n if hdf5_dataset_name is None:\n if '/_' not in dataset_name:\n warnings.warn(\"Dataset key %s is not in schema\" % dataset_name)\n continue\n if hdf5_dataset_name not in hdf5_file:\n # attempt to convert dataset into a timeseries\n timeseries = hdf5_file.to_timeseries(dataset_name=hdf5_dataset_name)\n\n # if dataset -> timeseries failed, create and commit a new, empty timeseries\n if timeseries is None:\n timeseries = tokio.timeseries.TimeSeries(dataset_name=hdf5_dataset_name,\n start=init_start,\n end=init_end,\n timestep=dataset.timestep,\n num_columns=dataset.dataset.shape[1])\n hdf5_file.commit_timeseries(timeseries=timeseries)\n print(\"Initialized %s in %s with size %s\" % (\n hdf5_dataset_name,\n hdf5_file.name,\n timeseries.dataset.shape))",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')",
"def initialize_data(self , station = '', datasets = {} ): \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n self.out_name = self.out_dir + '/' + self.station + '_CEUAS_premerged_v0.nc'\n\n self.observations_table_vars = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units', 'source_id']\n\n \"\"\" Loading the econding of the tables created from the harvester script and to be applied again \"\"\"\n self.encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n self.encodings['era5fb'] = np.load('era5fb_encodings_all.npy' , allow_pickle = True ).item() \n self.dic_type_attributes = np.load('dic_type_attributes.npy',allow_pickle= True).item()\n \n self.era5fb_columns = self.dic_type_attributes['era5fb'].keys()\n\n self.obstab_nans_filled = False \n\n data['cdm_tables'] = {} \n \n \"\"\" Loop over all the datasets \n k: name of the dataset \n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ] \"\"\" \n for k,v in self.datasets.items() :\n data[k] = {}\n for F in v:\n \n logging.info(' Dataset ::: *** %s %s ' , k , F ) \n \n data[k][F] = {}\n\n h5py_file = h5py.File(F, 'r')\n data[k][F]['h5py_file'] = h5py_file \n \n a = h5py_file['recordtimestamp']\n \n data[k][F]['recordtimestamp'] = a\n data[k][F]['recordindex'] = h5py_file['recordindex']\n data[k][F]['dateindex'] = h5py_file['dateindex']\n a = h5py_file['recordtimestamp']\n data[k][F]['max_date'] = max(a)\n data[k][F]['min_date'] = min(a)\n \n data[k][F]['counter'] = 0\n\n #######\n # HEADER TABLE\n #######\n head_tab = h5py_file['header_table']\n logging.info('*** header_table')\n data[k][F]['header_table'] = {}\n for var in head_tab.keys():\n if ('string' in var or 'hdrlen' in var): continue\n try: \n data[k][F]['header_table'][var] = (np.array(head_tab[var][:])).astype(self.dic_type_attributes['header_table'][var]['type'] )\n except:\n print('failed convertion type header' , k , ' ' , F , ' ' , var )\n \n ####### \n # STATION CONFIGURATION\n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'station_configuration' , decode_times = False )\n data[k][F]['station_configuration'] = d.to_dataframe()\n logging.debug('Done with %s station_configuration' , str(k) )\n d.close()\n\n ####### \n # SOURCE CONFIGURATION \n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'source_configuration' , decode_times = False )\n data[k][F]['source_configuration'] = d\n logging.debug('Done with %s source_configuration' , str(k) )\n d.close()\n\n\n data['cdm_tables'] = {}\n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\"\n for t in self.standard_cdm: # [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n if t not in data['cdm_tables'].keys():\n #data['cdm_tables'][t] = ''\n cdm = xr.open_dataset(F , engine = 'h5netcdf' , group = t )\n data['cdm_tables'][t] = cdm \n\n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n\n self.data = data\n\n \"\"\" Making all date_times \"\"\"\n self.make_all_datetime()",
"def makecldf(args):\n with_dataset(args, Dataset._install)",
"def setup(self):\n\n folder_name, file_name, url, md5 = self.resource\n dataset_folder = os.path.join(self.data_root, folder_name)\n if not os.path.exists(dataset_folder):\n sh_utils.download_and_extract_archive(url, dataset_folder, md5, file_name)\n\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = tv_datasets.ImageFolder(\n root=dataset_folder, transform=test_transform\n )\n self.images_only_dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()",
"def make_data_hf(input_, label_, config):\n # Check the check dir, if not, create one\n if not os.path.isdir(os.path.join(os.getcwd(),config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(),config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir + '/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir + '/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n #checkimage(input_[1])\n hf.create_dataset('input', data=input_)\n hf.create_dataset('label', data=label_)",
"def setup(self):\n print(\"Looking for \", self.filename)\n if os.path.exists(self.filename):\n n, ext = os.path.splitext(self.filename)[:2]\n if ext == \".h5\" or ext == \".hdf5\":\n with h5py.File(self.filename, \"r\") as file:\n keys = list(file.keys())\n self.data = file[keys[0]].value\n print(\"Behavior Data length is \", self.data.shape[2])\n\n else:\n raise FileNotFoundError",
"def get_data(data_dir, hdf5):\r\n\r\n # Get the filenames of the lists containing image paths and labels.\r\n train_file, val_file = build_dataset_index(data_dir)\r\n\r\n # Check if (creating and) loading from hdf5 database is desired.\r\n if hdf5:\r\n # Create folder to store dataset.\r\n if not os.path.exists('hdf5'):\r\n os.makedirs('hdf5')\r\n # Check if hdf5 databases already exist and create them if not.\r\n if not os.path.exists('hdf5/tiny-imagenet_train.h5'):\r\n from tflearn.data_utils import build_hdf5_image_dataset\r\n print ' Creating hdf5 train dataset.'\r\n build_hdf5_image_dataset(train_file, image_shape=(64, 64), mode='file', output_path='hdf5/tiny-imagenet_train.h5', categorical_labels=True, normalize=True)\r\n\r\n if not os.path.exists('hdf5/tiny-imagenet_val.h5'):\r\n from tflearn.data_utils import build_hdf5_image_dataset\r\n print ' Creating hdf5 val dataset.'\r\n build_hdf5_image_dataset(val_file, image_shape=(64, 64), mode='file', output_path='hdf5/tiny-imagenet_val.h5', categorical_labels=True, normalize=True)\r\n\r\n # Load training data from hdf5 dataset.\r\n h5f = h5py.File('hdf5/tiny-imagenet_train.h5', 'r')\r\n X = h5f['X']\r\n Y = h5f['Y']\r\n\r\n # Load validation data.\r\n h5f = h5py.File('hdf5/tiny-imagenet_val.h5', 'r')\r\n X_test = h5f['X']\r\n Y_test = h5f['Y'] \r\n\r\n # Load images directly from disk when they are required.\r\n else:\r\n from tflearn.data_utils import image_preloader\r\n X, Y = image_preloader(train_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)\r\n X_test, Y_test = image_preloader(val_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)\r\n\r\n # Randomly shuffle the dataset.\r\n X, Y = shuffle(X, Y)\r\n\r\n return X, Y, X_test, Y_test",
"def hdf5(dirs: T.Dict[str, Path], env: T.Mapping[str, str]):\n\n if os.name == \"nt\":\n if \"ifort\" in env[\"FC\"]:\n msg = \"\"\"\nFor Windows with Intel compiler, use HDF5 binaries from HDF Group.\nhttps://www.hdfgroup.org/downloads/hdf5/\nlook for filename like hdf5-1.12.0-Std-win10_64-vs14-Intel.zip\n \"\"\"\n elif \"gfortran\" in env[\"FC\"]:\n msg = \"\"\"\nFor MSYS2 on Windows, just use MSYS2 HDF5.\nInstall from the MSYS2 terminal like:\npacman -S mingw-w64-x86_64-hdf5\nreference: https://packages.msys2.org/package/mingw-w64-x86_64-hdf5\n \"\"\"\n else:\n msg = \"\"\"\nFor Windows, use HDF5 binaries from HDF Group.\nhttps://www.hdfgroup.org/downloads/hdf5/\nInstead of this, it is generally best to use MSYS2 or Windows Subsystem for Linux\n \"\"\"\n raise SystemExit(msg)\n\n hdf5_name = \"hdf5\"\n install_dir = dirs[\"prefix\"] / hdf5_name\n source_dir = dirs[\"workdir\"] / hdf5_name\n\n git_update(source_dir, HDF5_GIT, tag=HDF5_TAG)\n\n cmd = [\n \"./configure\",\n f\"--prefix={install_dir}\",\n \"--enable-fortran\",\n \"--enable-build-mode=production\",\n ]\n\n subprocess.check_call(nice + cmd, cwd=source_dir, env=env)\n\n cmd = [\"make\", \"-C\", str(source_dir), \"-j\", \"install\"]\n subprocess.check_call(nice + cmd)",
"def _install(args, use_cache, debug):\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n if args['dataset'].endswith('.zip') or args.get('hash_value'):\n path_to_archive = args['dataset']\n if args.get('hash_value'):\n path_to_archive = os.path.join(\n PROVENANCE_DIR, args['dataset'],\n '{}-{}.zip'.format(args['dataset'], args['hash_value']))\n if not os.path.exists(path_to_archive):\n print('The committed file does not exist.')\n engine = install_committed(path_to_archive,\n engine,\n force=args.get('force', False))\n return engine\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n print(\"=> Installing\", data_sets_script.name)\n try:\n if engine.name == \"HDF5\":\n sqlite_opts = {\n 'command': 'install',\n 'dataset': data_sets_script,\n 'engine': 'sqlite',\n 'file': (args[\"file\"].split(\".\"))[0] + \".db\",\n 'table_name': args[\"table_name\"],\n 'data_dir': args[\"data_dir\"]\n }\n sqlite_engine = choose_engine(sqlite_opts)\n data_sets_script.download(sqlite_engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n elif args['dataset'].startswith('socrata') and not data_sets_scripts:\n socrata_id = args['dataset'].split('-', 1)[1]\n resource = find_socrata_dataset_by_id(socrata_id)\n\n if \"error\" in resource.keys():\n if resource[\"datatype\"][0] == \"map\":\n print(\"{} because map type datasets are not supported\".format(\n resource[\"error\"]))\n else:\n print(\"{} because it is of type {} and not tabular\".format(\n resource[\"error\"], resource[\"datatype\"][1]))\n elif len(resource.keys()) == 0:\n return\n else:\n print(\"=> Installing\", args['dataset'])\n name = f\"socrata-{socrata_id}\"\n create_socrata_dataset(engine, name, resource)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n elif args['dataset'].startswith('rdataset') and not data_sets_scripts:\n print(\"=> Installing\", args['dataset'])\n rdataset = args['dataset'].split('-')\n update_rdataset_catalog()\n package, dataset_name = rdataset[1], rdataset[2]\n create_rdataset(engine, package, dataset_name)\n if args['command'] == 'download':\n return engine\n else:\n script_list = SCRIPT_LIST()\n script = get_script(args['dataset'])\n script.download(engine, debug=debug)\n script.engine.final_cleanup()\n else:\n message = \"Run retriever.datasets() to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine",
"def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()",
"def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']",
"def test_hdf5_load_all():\n skip_if_no_h5py()\n import h5py\n\n # save random data to HDF5\n handle, filename = tempfile.mkstemp()\n dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),\n num_examples=10, dim=5,\n num_classes=3)\n with h5py.File(filename, 'w') as f:\n f.create_dataset('X', data=dataset.get_design_matrix())\n f.create_dataset('y', data=dataset.get_targets())\n\n # instantiate Train object\n trainer = yaml_parse.load(load_all_yaml % {'filename': filename})\n trainer.main_loop()\n\n # cleanup\n os.remove(filename)",
"def load_datasets(in_h5_path, partition='train'):\n\tif partition == 'train':\n\t\tx_train = HDF5Matrix(datapath=in_h5_path, dataset=\"train/X_train\")\n\t\ty_train = HDF5Matrix(datapath=in_h5_path, dataset=\"train/y_train\")\n\t\treturn x_train, y_train\n\telif partition == 'test':\n\t\tx_test = HDF5Matrix(datapath=in_h5_path, dataset=\"test/X_test\")\n\t\ty_test = HDF5Matrix(datapath=in_h5_path, dataset=\"test/y_test\")\n\t\treturn x_test, y_test\n\telse:\n\t\tprint(\"Invalid 'partition' parameter: Valid values: ['train', 'test']\")",
"def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)",
"def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):\n n_total = n_train + n_valid + n_test\n splits = create_splits(n_train, n_valid, n_test)\n hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)\n vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))\n hdf5_file.create_dataset('encoded_images', shape=(n_total,),\n dtype=vlen_dtype)\n hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)\n hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')",
"def main(dataset):\n # Save everything 'MNE_DATA' dir ... defaults to ~/mne_data\n mne_data_dir = mne.get_config(key='MNE_DATA', default=False)\n if not mne_data_dir:\n mne.set_config('MNE_DATA', str(DEFAULT_DATA_DIR))\n DEFAULT_DATA_DIR.mkdir(exist_ok=True)\n mne_data_dir = DEFAULT_DATA_DIR\n else:\n mne_data_dir = Path(mne_data_dir)\n\n ds_names = DATASET_OPTIONS.keys() if not dataset else (dataset,)\n\n for ds_name in ds_names:\n print('\\n----------------------')\n ds_path = mne_data_dir / ds_name\n _download(ds_name=ds_name, ds_path=ds_path)",
"def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")",
"def setup(self):\n EGS5.setup(self)\n if not len(self.inputs):\n raise Exception(\"Missing required input LHE file.\")",
"def main(config):\n save_main_dir = config[\"save_main_dir\"]\n\n if not os.path.isdir(save_main_dir):\n print(\"{} does not exist, creating it now...\", save_main_dir)\n pathlib.Path(save_main_dir).mkdir(parents=True, exist_ok=True)\n\n app = inviwopy.app\n network = app.network\n \n # Resize the canvas to improve rendering speed, only affects visual output\n if config[\"should_resize\"]:\n ivw_helpers.set_canvas_sizes(128, 128)\n\n hdf5_path = os.path.join(save_main_dir, config[\"hdf5_name\"])\n with h5py.File(hdf5_path, mode='w', libver='latest') as hdf5_file:\n hdf5_file.swmr_mode = True\n create_hdf_storage(hdf5_file, config)\n count = {\"train\": 0, \"val\": 0}\n for set_type in 'train', 'val':\n capture_lf_samples(hdf5_file, set_type, config, network, count) \n print(\"Finished writing to HDF5 in {}\".format(hdf5_path))",
"def create_data_hdu(self, dataset_size, config=None):\n if not config: config = self.config\n t0 = time.time()\n cols = []\n\n re_pattern = \"(\\d+)([A-Z])\" # match all numbers and uppercase letters\n counter = 0\n for name in config['columns']:\n\n column_entry = config['columns'][name]\n\n format = column_entry.get('format', None)\n format_match = re.search(re_pattern, format)\n dtype = self.fits_dtype[format_match.group(2)]\n unit = column_entry.get('unit', None)\n # self.logger.debug(\"dtype and unit for {}: {} {}\".format(name, dtype, unit))\n dim = column_entry.get('dim', None)\n\n if dim:\n dim_np = dim.strip(\")\").strip(\"(\")\n dim_np = dim_np.split(\",\")\n dim_np = [int(d) for d in dim_np]\n # self.logger.debug(\"dim for {}: {}\".format(name, dim_np))\n # print(\"name: {}, format: {}, unit: {}, dim: {}\".format(name, format, unit, dim))\n if BACKEND == 'pyfits' and pyfits.__version__ >= 3.4 and dim:\n final_shape = [dataset_size] + dim_np[::-1]\n elif BACKEND == 'astropy' and dim:\n final_shape = [dataset_size] + dim_np[::-1]\n else:\n final_shape = dataset_size\n array = np.zeros(final_shape) #, dtype=dtype)\n # print(\"{}, Final shape: {}, dtype: {}, array: {}\".format(name, final_shape, dtype, array))\n cols.append(pyfits.Column(name=name, format=format, unit=unit, dim=dim, array=array))\n counter += 1\n\n table_data_hdu = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))\n existing_keys = table_data_hdu.header.keys()\n for data_line in config['header_data']:\n\n card = pyfits.Card().fromstring(data_line)\n card.verify()\n if card.keyword in existing_keys:\n continue\n # del table_data_hdu.header[card.keyword]\n table_data_hdu.header.append(card)\n\n self.hdus.append(table_data_hdu)\n self.logger.debug(\"Took {:.4f} seconds to generated data HDU\".format(time.time() - t0))\n return table_data_hdu",
"def make_data(config, data, label):\n if not os.path.isdir(os.path.join(os.getcwd(), config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(), config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)",
"def load_dataset():\n\n train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n \n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes",
"def _create_fit_datasets(self):\n\n if self.h5_guess is None:\n warn('Need to guess before fitting!')\n return\n\n self.h5_fit = create_empty_dataset(self.h5_guess, loop_fit32, 'Fit')\n write_simple_attrs(self._h5_group, {'fit method': 'pycroscopy functional'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_fit, self._parms_dict)\n write_simple_attrs(self.h5_fit, {'Loop_fit_method': \"pycroscopy functional\", 'last_pixel': 0})"
]
| [
"0.65313053",
"0.6297535",
"0.6233249",
"0.62266046",
"0.6179072",
"0.6178218",
"0.6154918",
"0.6144979",
"0.6131246",
"0.61283696",
"0.61283696",
"0.60190755",
"0.5967659",
"0.59319526",
"0.58979344",
"0.5892333",
"0.5878183",
"0.58725953",
"0.5870886",
"0.5807656",
"0.5686695",
"0.5643302",
"0.56365913",
"0.56331384",
"0.5577022",
"0.5571264",
"0.5565024",
"0.55407983",
"0.55339044",
"0.55287355"
]
| 0.65487957 | 0 |
Convert python permissions to AWS API permissions The permissions model for the API makes more sense for a web service but is overly verbose for working with in Python. This and the setter allow transforming to/from the API syntax. The python code should consume the allowed_groups and allowed_users lists directly. | def aws_permissions(self):
perms = []
for g in self.allowed_groups:
perms.append({"Group": g})
for i in self.allowed_users:
perms.append({"UserId": i})
return perms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aws_permissions(self, perms):\n for perm in perms:\n group = perm.get(\"Group\")\n if group:\n self.allowed_groups.append(group)\n\n user = perm.get(\"UserId\")\n if user:\n self.allowed_users.append(user)",
"def set_permissions(self, permissions):\n\n\t\tif Platform.PLATFORM_POSIX == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('([-r][-w][-xsStT]){3,3}', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRUSR\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWUSR\n\t\t\t\tif permissions[2] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXUSR\n\t\t\t\tif permissions[3] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRGRP\n\t\t\t\tif permissions[4] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWGRP\n\t\t\t\tif permissions[5] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXGRP\n\t\t\t\tif permissions[6] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IROTH\n\t\t\t\tif permissions[7] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWOTH\n\t\t\t\tif permissions[8] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXOTH\n\t\t\t\t\t\n\t\t\telif permissions.__class__ == str and re.match('(0)?[0-7]{3,3}', permissions):\n\t\t\t\tif len(permissions) == 3:\n\t\t\t\t\tpermissions = '0' + permissions\n\t\t\t\tself.__permissions = octstr_to_int(permissions)\n\t\t\t\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\t\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError()\n\n\t\telif Platform.PLATFORM_WINDOWS == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('[-r][-w]', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IREAD\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWRITE\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError() \n\t\telse:\n\t\t\traise PlatformNotSupportedError()",
"def grant_from_json(self, permissions_json):\n permissions = json.loads(permissions_json)\n users_to_grant = User.objects.filter(username__in=permissions[0])\n groups_to_grant = Group.objects.filter(name__in=permissions[1])\n self.grant_from_permissions_list([users_to_grant, groups_to_grant])",
"def put(self):\n token = self.access_token_from_authorization_header()\n\n data = self.convert_argument_to_json()\n\n permissions = data['permissions']\n\n if len(permissions) is not 4:\n raise tornado.web.HTTPError(400, 'Some permissions are missing. Permissions count must be 4.')\n\n for ix, permission in enumerate(permissions):\n\n try:\n permission = int(permission)\n\n if permission not in [0, 1]:\n raise Exception('Permission must be either of 0 or 1.')\n\n permissions[ix] = int(permission)\n\n except Exception as ex:\n raise tornado.web.HTTPError(400, 'Permission must be integer')\n\n with self.session_scope() as session:\n token = convert_uuid_or_400(token)\n\n token = session.query(AuthToken).filter(AuthToken.uid == token).one_or_none()\n\n user = token.auth\n updated_permission = bitarray()\n\n updated_permission.extend(permissions)\n\n user.permissions = updated_permission.to01()\n\n session.flush()\n\n response = user.to_json()\n self.write(response)",
"def permissions(self) -> 'outputs.PermissionsResponse':\n return pulumi.get(self, \"permissions\")",
"def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()",
"def permissionsDefinitions(self):\n\n ### for the records:\n ### this method contains lots of generation logic. in fact this\n ### should move over to the WorkflowGenerator.py and reduce here in\n ### just deliver the pure data\n ### the parser should really just parse to be as independent as possible\n\n # permissions_mapping (abbreviations for lazy guys)\n # keys are case insensitive\n\n # STATE_PERMISSION_MAPPING in TaggedValueSupport.py now\n # contains the handy mappings from 'access' to 'Access contents\n # information' and so.\n\n state = self.state\n tagged_values = state.getTaggedValues()\n permission_definitions = []\n\n for tag_name, tag_value in tagged_values.items():\n # list of tagged values that are NOT permissions\n if tag_name in self.non_permissions:\n # short check if its registered, registry complains in log.\n tgvRegistry.isRegistered(tag_name, state.classcategory,\n silent=True)\n continue\n tag_name = tag_name.strip()\n\n # look up abbreviations if any\n permission = STATE_PERMISSION_MAPPING.get(tag_name.lower(),\n tag_name or '')\n\n if not tag_value:\n log.debug(\"Empty tag value, treating it as a reset \"\n \"for acquisition, so acquisition=0.\")\n permission_definitions.append({'permission' : permission,\n 'roles' : [],\n 'acquisition' : 0})\n continue\n\n # split roles-string into list\n raw_roles = tag_value.replace(';', ',')\n roles = [str(r.strip()) for r in raw_roles.split(',') if r.strip()]\n\n # verify if this permission is acquired\n nv = 'acquire'\n acquisition = 0\n if nv in roles:\n acquisition = 1\n roles.remove(nv)\n\n permission = utils.processExpression(permission, asString=False)\n permission_definitions.append(\n {'permission' : permission,\n 'roles' : roles,\n 'acquisition' : acquisition}\n )\n\n # If View was defined but Access was not defined, the Access\n # permission should be generated with the same rights defined\n # for View\n\n has_access = 0\n has_view = 0\n view = {}\n for permission_definition in permission_definitions:\n if (permission_definition.get('permission', None) ==\n STATE_PERMISSION_MAPPING['access']):\n has_access = 1\n if (permission_definition.get('permission', None) ==\n STATE_PERMISSION_MAPPING['view']):\n view = permission_definition\n has_view = 1\n if has_view and not has_access:\n permission = STATE_PERMISSION_MAPPING['access']\n permission_definitions.append({'permission': permission,\n 'roles': view['roles'],\n 'acquisition': view['acquisition']})\n return permission_definitions",
"def grant_from_permissions_list(self, permissions_list):\n self.users_allowed.add(*permissions_list[0])\n self.groups_allowed.add(*permissions_list[1])",
"def PermissionsFileProcessor(input_file):\n access_value_msg = GetApiMessage('Dataset').AccessValueListEntry\n try:\n permissions_array = []\n permissions_from_file = yaml.load(input_file[0])\n permissions_from_file = permissions_from_file.get('access', None)\n if not permissions_from_file or not isinstance(permissions_from_file, list):\n raise PermissionsFileError(\n 'Error parsing permissions file: no access list defined in file')\n for access_yaml in permissions_from_file:\n permission = encoding.PyValueToMessage(access_value_msg, access_yaml)\n if _ValidatePermission(permission):\n permissions_array.append(permission)\n else:\n raise PermissionsFileError(('Error parsing permissions file:'\n ' invalid permission definition'\n ' [{}]'.format(permission)))\n\n return sorted(permissions_array, key=lambda x: x.role)\n except yaml.YAMLParseError as ype:\n raise PermissionsFileError('Error parsing permissions file [{}]'.format(\n ype))",
"def get_group_permissions(self, obj=None):\n pass",
"def get_permissions(self):\n if self.action == 'update' and self.action == 'delete':\n permission_classes = [IsBlackListedToken, IsValidGroupUser]\n else:\n permission_classes = [IsBlackListedToken, ]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == 'update' and self.action == 'delete':\n permission_classes = [IsBlackListedToken, IsValidGroupUser]\n else:\n permission_classes = [IsBlackListedToken, ]\n return [permission() for permission in permission_classes]",
"def permission(guild_id: int, permissions: list):\n\n def wrapper(cmd):\n if not getattr(cmd, \"__permissions__\", None):\n cmd.__permissions__ = {}\n cmd.__permissions__[guild_id] = permissions\n return cmd\n\n return wrapper",
"def test_iam_permissions(\n self,\n ) -> Callable[\n [iam_policy_pb2.TestIamPermissionsRequest],\n iam_policy_pb2.TestIamPermissionsResponse,\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"test_iam_permissions\" not in self._stubs:\n self._stubs[\"test_iam_permissions\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/TestIamPermissions\",\n request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,\n response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,\n )\n return self._stubs[\"test_iam_permissions\"]",
"def test_iam_permissions(\n self,\n ) -> Callable[\n [iam_policy_pb2.TestIamPermissionsRequest],\n iam_policy_pb2.TestIamPermissionsResponse,\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"test_iam_permissions\" not in self._stubs:\n self._stubs[\"test_iam_permissions\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/TestIamPermissions\",\n request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,\n response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,\n )\n return self._stubs[\"test_iam_permissions\"]",
"def PermissionSet(self) -> _n_6_t_0:",
"def __upgrade_group_permissions(self, group: Group, permissions_list: list, action: str) -> list:\n messages = []\n\n for codename in permissions_list:\n getattr(group.permissions, action)(Permission.objects.get(codename=codename))\n\n if not len(permissions_list) == 0:\n messages.append(f'\\t{action}ed {permissions_list}')\n\n return messages",
"async def get_manipulation_permissions(self, requester: Requester,\n model: Model) -> Tuple[\n ManipulationPermissions, Dict[str, Any]]:\n raise NotImplementedError",
"def copy_permissions(self, source):\n self.grant_from_permissions_list((source.users_allowed.all(),\n source.groups_allowed.all()))",
"def test__parse_allow(input_data):\n output = parse_allow(input_data)\n vampytest.assert_instance(output, Permission)\n return output",
"def get_permissions(self):\n if self.action in [\"list\"]:\n permission_classes = [permissions.UserOrPlaylistIsAuthenticated]\n elif self.action in [\"create\", \"set_display_name\", \"push_attendance\"]:\n permission_classes = [\n permissions.PlaylistIsAuthenticated\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.BaseIsParamsVideoRoleThroughPlaylist\n ]\n elif self.action in [\n \"partial_update\",\n \"retrieve\",\n ]:\n permission_classes = [\n permissions.IsTokenPlaylistRouteObjectRelatedVideo\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.BaseIsParamsVideoRoleThroughPlaylist\n ]\n elif self.action in [\"list_attendances\"]:\n permission_classes = [\n permissions.IsTokenInstructor\n | permissions.IsTokenAdmin\n # With standalone site, admin can access\n | permissions.IsParamsVideoAdminThroughOrganization\n | permissions.IsParamsVideoAdminOrInstructorThroughPlaylist\n ]\n elif self.action is None:\n if self.request.method not in self.allowed_methods:\n raise MethodNotAllowed(self.request.method)\n permission_classes = self.permission_classes\n else:\n # When here it means we forgot to define a permission for a new action\n # We enforce the permission definition in this method to have a clearer view\n raise NotImplementedError(f\"Action '{self.action}' is not implemented.\")\n return [permission() for permission in permission_classes]",
"def set_permission(StackId=None, IamUserArn=None, AllowSsh=None, AllowSudo=None, Level=None):\n pass",
"def setpermissions(self, lvl):\n\n admingroup = Group.objects.get(name=self.comicsite.admin_group_name())\n participantsgroup = Group.objects.get(name=self.comicsite.participants_group_name())\n everyonegroup = Group.objects.get(name=\"everyone\")\n\n\n\n self.persist_if_needed()\n if lvl == self.ALL:\n assign_perm(\"view_ComicSiteModel\",admingroup,self)\n assign_perm(\"view_ComicSiteModel\",participantsgroup,self)\n assign_perm(\"view_ComicSiteModel\",everyonegroup,self)\n elif lvl == self.REGISTERED_ONLY:\n\n assign_perm(\"view_ComicSiteModel\",admingroup,self)\n assign_perm(\"view_ComicSiteModel\",participantsgroup,self)\n remove_perm(\"view_ComicSiteModel\",everyonegroup,self)\n elif lvl == self.ADMIN_ONLY:\n\n assign_perm(\"view_ComicSiteModel\",admingroup,self)\n remove_perm(\"view_ComicSiteModel\",participantsgroup,self)\n remove_perm(\"view_ComicSiteModel\",everyonegroup,self)\n else:\n raise ValueError(\"Unknown permissions level '\"+ lvl +\"'. I don't know which groups to give permissions to this object\")",
"def post(self):\n user_id = request.args.get('user_id')\n permisison_id = request.args.get('perm_id')\n settingsid = request.args.get('settings_id')\n permissionvalue = request.args.get('perm_value')\n return update_permissions(user_id, perm_id=permisison_id, settings_id=settingsid,\n permissions_value=permissionvalue)",
"def can(self, permissions: Union[str, List]) -> bool:",
"def get_assign_permission(userid, group):",
"def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action =='retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def get_all_permissions(self, obj=None):",
"def main(argv):\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"--groupId\", help=\"Group email address\")\n parser.add_option(\n \"--whoCanInvite\",\n help=\"Possible values: ALL_MANAGERS_CAN_INVITE, \" \"ALL_MEMBERS_CAN_INVITE\",\n )\n parser.add_option(\n \"--whoCanJoin\",\n help=\"Possible values: ALL_IN_DOMAIN_CAN_JOIN, \"\n \"ANYONE_CAN_JOIN, CAN_REQUEST_TO_JOIN, \"\n \"CAN_REQUEST_TO_JOIN\",\n )\n parser.add_option(\n \"--whoCanPostMessage\",\n help=\"Possible values: ALL_IN_DOMAIN_CAN_POST, \"\n \"ALL_MANAGERS_CAN_POST, ALL_MEMBERS_CAN_POST, \"\n \"ANYONE_CAN_POST, NONE_CAN_POST\",\n )\n parser.add_option(\n \"--whoCanViewGroup\",\n help=\"Possible values: ALL_IN_DOMAIN_CAN_VIEW, \"\n \"ALL_MANAGERS_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, \"\n \"ANYONE_CAN_VIEW\",\n )\n parser.add_option(\n \"--whoCanViewMembership\",\n help=\"Possible values: ALL_IN_DOMAIN_CAN_VIEW, \"\n \"ALL_MANAGERS_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, \"\n \"ANYONE_CAN_VIEW\",\n )\n (options, args) = parser.parse_args()\n\n if options.groupId is None:\n print(\"Give the groupId for the group\")\n parser.print_help()\n return\n\n settings = {}\n\n if (\n options.whoCanInvite\n or options.whoCanJoin\n or options.whoCanPostMessage\n or options.whoCanPostMessage\n or options.whoCanViewMembership\n ) is None:\n print(\"No access parameters given in input to update access permissions\")\n parser.print_help()\n else:\n settings = {\n \"whoCanInvite\": options.whoCanInvite,\n \"whoCanJoin\": options.whoCanJoin,\n \"whoCanPostMessage\": options.whoCanPostMessage,\n \"whoCanViewGroup\": options.whoCanViewGroup,\n \"whoCanViewMembership\": options.whoCanViewMembership,\n }\n\n # Set up a Flow object to be used if we need to authenticate.\n FLOW = flow_from_clientsecrets(\n CLIENT_SECRETS,\n scope=\"https://www.googleapis.com/auth/apps.groups.settings\",\n message=MISSING_CLIENT_SECRETS_MESSAGE,\n )\n\n storage = Storage(\"groupsettings.dat\")\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n print(\"invalid credentials\")\n # Save the credentials in storage to be used in subsequent runs.\n credentials = run_flow(FLOW, storage)\n\n # Create an httplib2.Http object to handle our HTTP requests and authorize it\n # with our good Credentials.\n http = httplib2.Http()\n http = credentials.authorize(http)\n\n service = build(\"groupssettings\", \"v1\", http=http)\n\n access_settings(service=service, groupId=options.groupId, settings=settings)"
]
| [
"0.63265866",
"0.57607347",
"0.5758299",
"0.57271314",
"0.57171214",
"0.57127684",
"0.5686844",
"0.5601842",
"0.5593251",
"0.55737066",
"0.5557996",
"0.5557996",
"0.55541414",
"0.5542022",
"0.5542022",
"0.5533457",
"0.5517421",
"0.55076104",
"0.5475774",
"0.5434879",
"0.5431917",
"0.5423467",
"0.5416213",
"0.54051375",
"0.54021156",
"0.5399991",
"0.5373006",
"0.537097",
"0.5361757",
"0.53336895"
]
| 0.64832705 | 0 |
Find the root of the repo, which contains a .git folder | def find_repo_root():
path = os.getcwd()
while ".git" not in set(os.listdir(path)) and path != "/":
path = os.path.dirname(path)
if path == "/":
raise Exception("No repo found, stopping at /")
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))",
"def get_git_root():\n path = os.getcwd()\n git_repo = git.Repo(path, search_parent_directories=True)\n git_root = git_repo.git.rev_parse(\"--show-toplevel\")\n return git_root",
"def get_git_root(path):\n\tgit_repo = git.Repo(path, search_parent_directories=True)\n\tgit_root = git_repo.git.rev_parse(\"--show-toplevel\")\n\treturn git_root",
"def find_git_repository(self, path):\n while path is not None:\n git_path = os.path.join(path,'.git')\n if os.path.exists(git_path) and os.path.isdir(git_path):\n return path\n path = os.path.dirname(path)\n return None",
"def _find_repo() -> str:\n\tstart = os.path.abspath(os.getcwd())\n\tcurrent = start\n\twhile current != \"/\":\n\t\trepo = os.path.join(current, \".repo\")\n\t\tif os.path.exists(repo):\n\t\t\tLOGGER.debug(\"Found .repo at %s\", repo)\n\t\t\treturn repo\n\t\tcurrent = os.path.dirname(current)\n\traise RepoNotFoundError(\"Not .repo found in any directory along {}\".format(start))",
"def repo_root_path() -> str:\n\n global __REPO_ROOT\n if __REPO_ROOT:\n return __REPO_ROOT\n\n path = os.path.normpath(os.getcwd())\n while os.path.split(path)[1]:\n if is_repo_root(path):\n break\n\n path = os.path.split(path)[0]\n else:\n # fallback to the location of this file if the CWD is not in the\n # repo root:\n path = os.path.normpath(os.path.dirname(__file__))\n while os.path.split(path)[1]:\n if is_repo_root(path):\n break\n\n path = os.path.split(path)[0]\n else:\n print(\"Could not find repo root!\")\n sys.exit(1)\n\n __REPO_ROOT = path\n return __REPO_ROOT",
"def getGitPath() -> osp:\n current_dir = osp.dirname(osp.realpath(__file__))\n git_dir = osp.dirname(osp.dirname(current_dir))\n return git_dir",
"def get_repo_root_from_file_in_repo(path_to_file_in_repo):\n try:\n repo = Repo(path=path_to_file_in_repo, search_parent_directories=True)\n\n submodules_root = repo.git.rev_parse(\"--show-superproject-working-tree\")\n if submodules_root:\n return submodules_root\n\n git_root = repo.git.rev_parse(\"--show-toplevel\")\n return git_root\n except InvalidGitRepositoryError:\n return None",
"def get_repository_dir():\n expected = os.path.abspath(__file__).rsplit('/', 2)[0]\n\n # get_path verifies the existance of these directories\n get_path(expected, 'data')\n get_path(expected, 'latex')\n\n return expected",
"def gitroot(dir=\"\"):\n # Supress errors from Git\n git_cmd = \"git rev-parse --show-toplevel \" + dir + \" 2> \" + os.devnull\n if dir:\n original_cwd = os.getcwd()\n os.chdir(dir)\n try:\n sub_out = subprocess.check_output(git_cmd, shell=True)\n cmd_out = sub_out.decode().rstrip(). splitlines()[0]\n except:\n cmd_out = \"\"\n if dir:\n os.chdir(original_cwd)\n return cmd_out",
"def get_git_dir(tree):\n\n return os.path.join(tree, \".git\")",
"def git_dir():\n return subprocess.check_output([\"git\", \"rev-parse\", \"--git-dir\"]).decode().strip()",
"def get_repo():\n call_dir = os.getcwd()\n return Repo(call_dir, search_parent_directories=True)",
"def var_REPO_ROOT(self):\n return rh.git.find_repo_root()",
"def get_repo_dir():\n dirname, _ = os.path.split(os.path.abspath(__file__))\n dirname = os.path.dirname(dirname)\n logging.debug(\"Repo dir is %s\", dirname)\n return dirname",
"def rel_cwd():\n return os.path.relpath(os.getcwd(), git_toplevel())",
"def ensure_git_root():\n root = get_root()\n if root is None:\n error(\"Not in a git repository.\", exit=True)\n if os.getcwd() != root:\n error(\"Must call from the top folder of the git repository\",\n exit=True)",
"def get_project_root(repo=None):\n if repo is None:\n repo = get_repo()\n return repo.git.rev_parse(u\"--show-toplevel\")",
"def get_root(directory=None):\n cmd = 'git rev-parse --show-toplevel'\n try:\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n except CalledProcessError:\n return None\n return output.strip()",
"def GetGitSourceDirectory(root):\n git_source_directory = set()\n popen_out = os.popen(\"cd %s && git ls-files --full-name .\" %\n pipes.quote(root))\n for line in popen_out.readlines():\n dir_name = os.path.join(root, os.path.dirname(line))\n # Add the directory as well as all the parent directories.\n while dir_name != root:\n git_source_directory.add(dir_name)\n dir_name = os.path.dirname(dir_name)\n git_source_directory.add(root)\n return git_source_directory",
"def find_git_dir(directory):\n directory = os.path.abspath(directory)\n if not os.path.exists(directory):\n return \"\"\n\n for _ in range(10):\n path = os.path.join(directory, \".git\")\n if os.path.exists(path):\n return directory\n\n if directory == \"/\":\n return \"\"\n\n directory = os.path.abspath(os.path.join(directory, os.pardir))\n\n return \"\"",
"def findRepositoryByBackTracking():\n \n cLookBack = '.'\n while(True):\n cDir = os.path.abspath(cLookBack)\n print(\"Searching in %s\" % cDir)\n if os.path.isdir( os.path.join(cDir, DB_SUBFOLDER) ):\n return cDir\n else:\n if os.path.abspath(cLookBack) == os.path.abspath(cLookBack + '/..'):\n return os.path.abspath('.')\n cLookBack = cLookBack + '/..'\n \n return cDir",
"def _find_root() -> pathlib.Path:\n cwd = pathlib.Path.cwd()\n while not (\n pathlib.Path(cwd, \"pyproject.toml\").exists() or\n pathlib.Path(cwd, \"poetry.lock\").exists() or\n pathlib.Path(\"/\") == cwd\n ):\n cwd = cwd.parent\n return cwd",
"def get_git_tree(target):\n\n root = is_system_root(target)\n is_file = os.path.isfile(target)\n folder = os.path.dirname(target) if is_file else target\n if os.path.exists(os.path.join(folder, \".git\")):\n return folder\n else:\n if root:\n return None\n else:\n return get_git_tree(os.path.dirname(folder))",
"def find_root():\n\n curdir = os.path.curdir\n fs_root = \"/\"\n # Do as build/envsetup.sh does\n # if this files exists, we're at root\n root_clue = \"build/core/envsetup.mk\"\n found = False\n while not found and not os.path.samefile(fs_root, curdir):\n if os.path.exists(os.path.join(curdir, root_clue)):\n found = True\n break\n curdir = os.path.join(os.path.pardir, curdir)\n return curdir if found else None",
"def repo_dir(path=None, max_levels=100):\n # Start from a path, and iterate until we find the repo root.\n path = path or _get_caller_path()\n path, children = _repo_dir_and_children(path, max_levels=max_levels)\n return path",
"def navigate_to_git_root() -> bool:\n dir_climb_count = 0\n continue_dir_traverse = True\n while continue_dir_traverse:\n if not Utils.contains_dir('.git'):\n print(f\"Current dir {os.getcwd()} is not a Git repository.\")\n # Change directory up one level.\n os.chdir(\"../\")\n dir_climb_count += 1\n else:\n print(f\"Current dir {os.getcwd()} is a recognized Git repository.\")\n return True\n\n if dir_climb_count > 3:\n continue_dir_traverse = False\n\n if not Utils.contains_dir('.git'):\n logging.error('Unable to locate Git repository.')\n\n return False",
"def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)",
"def _find_reporoot(self, reporoot_opt, relnotessubdir_opt):\n reporoot = os.path.abspath(reporoot_opt)\n # When building on RTD.org the root directory may not be\n # the current directory, so look for it.\n try:\n return repo.Repo.discover(reporoot).path\n except Exception:\n pass\n\n for root in ('.', '..', '../..'):\n if os.path.exists(os.path.join(root, relnotessubdir_opt)):\n return root\n\n raise Exception(\n 'Could not discover root directory; tried: %s' % ', '.join([\n os.path.abspath(root) for root in ('.', '..', '../..')\n ])\n )",
"def path(self) -> str:\n self.__verify_repo_initialized()\n return str(self._repo_path.parent)"
]
| [
"0.8146076",
"0.81069934",
"0.79744226",
"0.7808931",
"0.773586",
"0.76759857",
"0.7481751",
"0.74334836",
"0.73678917",
"0.7341794",
"0.73145497",
"0.72178036",
"0.71953976",
"0.7154531",
"0.7116067",
"0.70113045",
"0.6963013",
"0.6949734",
"0.69328886",
"0.67888033",
"0.6772206",
"0.6747813",
"0.67344713",
"0.67261815",
"0.66514355",
"0.6591543",
"0.6590347",
"0.6566839",
"0.65657514",
"0.6507304"
]
| 0.89188886 | 0 |
Do all the relevant response 1D and 2D hists, for one eta bin. Can optionally impose maximum pt cut on L1 jets (to avoid problems with saturation), and on number of PU vertices. | def plot_checks(inputfile, outputfile, absetamin, absetamax, max_pt, pu_min, pu_max):
print "Doing eta bin: %g - %g, max L1 jet pt: %g" % (absetamin, absetamax, max_pt)
# Input tree
tree_raw = inputfile.Get("valid")
# Output folders
output_f = outputfile.mkdir('eta_%g_%g' % (absetamin, absetamax))
output_f_hists = output_f.mkdir("Histograms")
# Eta cut string
eta_cutStr = " TMath::Abs(eta)<%g && TMath::Abs(eta) > %g " % (absetamax, absetamin)
# Pt cut string
pt_cutStr = "pt < %g" % max_pt
# PU cut string
pu_cutStr = "numPUVertices <= %f && numPUVertices >= %f" % (pu_max, pu_min)
# Avoid L1 saturated jets cut (from 2017 any l1 jet with a saturated tower is auto given pt=1024GeV)
avoidSaturation_cut = "pt < 1023.1"
cutStr = " && ".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])
# Draw response (pT^L1/pT^Gen) for all pt bins
tree_raw.Draw("rsp>>hrsp_eta_%g_%g(100,0,5)" % (absetamin, absetamax), cutStr)
hrsp_eta = ROOT.gROOT.FindObject("hrsp_eta_%g_%g" % (absetamin, absetamax))
hrsp_eta.SetTitle(";response (p_{T}^{L1}/p_{T}^{Ref});")
if absetamin < 2.9:
fit_result = hrsp_eta.Fit("gaus", "QER", "",
hrsp_eta.GetMean() - hrsp_eta.GetRMS(),
hrsp_eta.GetMean() + hrsp_eta.GetRMS())
else:
peak = hrsp_eta.GetBinCenter(hrsp_eta.GetMaximumBin())
fit_result = hrsp_eta.Fit("gaus", "QER", "",
peak - (0.5 * hrsp_eta.GetRMS()),
peak + (0.5 * hrsp_eta.GetRMS()))
# mean = hrsp_eta.GetFunction("gaus").GetParameter(1)
# err = hrsp_eta.GetFunction("gaus").GetParError(1)
output_f_hists.WriteTObject(hrsp_eta)
# nb_pt, pt_min, pt_max = 63, 0, 252 # for GCT/Stage 1
nb_pt, pt_min, pt_max = 512, 0, 1024 # for Stage 2
nb_rsp, rsp_min, rsp_max = 100, 0, 5
# Draw rsp (pT^L1/pT^Gen) Vs GenJet pT
tree_raw.Draw("rsp:ptRef>>h2d_rsp_gen(%d,%g,%g,%d,%g,%g)" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)
h2d_rsp_gen = ROOT.gROOT.FindObject("h2d_rsp_gen")
h2d_rsp_gen.SetTitle(";p_{T}^{Ref} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})")
output_f_hists.WriteTObject(h2d_rsp_gen)
h2d_rsp_gen_norm = cu.norm_vertical_bins(h2d_rsp_gen)
output_f_hists.WriteTObject(h2d_rsp_gen_norm)
# Draw rsp (pT^L1/pT^Gen) Vs L1 pT
tree_raw.Draw("rsp:pt>>h2d_rsp_l1(%d,%g,%g,%d,%g,%g)" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)
h2d_rsp_l1 = ROOT.gROOT.FindObject("h2d_rsp_l1")
h2d_rsp_l1.SetTitle(";p_{T}^{L1} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})")
output_f_hists.WriteTObject(h2d_rsp_l1)
h2d_rsp_l1_norm = cu.norm_vertical_bins(h2d_rsp_l1)
output_f_hists.WriteTObject(h2d_rsp_l1_norm)
# Draw pT^Gen Vs pT^L1
tree_raw.Draw("pt:ptRef>>h2d_gen_l1(%d,%g,%g,%d,%g,%g)" % (nb_pt, pt_min, pt_max, nb_pt, pt_min, pt_max), cutStr)
h2d_gen_l1 = ROOT.gROOT.FindObject("h2d_gen_l1")
h2d_gen_l1.SetTitle(";p_{T}^{Ref} [GeV];p_{T}^{L1} [GeV]")
output_f_hists.WriteTObject(h2d_gen_l1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_rsp_eta(inputfile, outputfile, eta_bins, pt_min, pt_max, pt_var, pu_min, pu_max):\n\n gr_rsp_eta = ROOT.TGraphErrors()\n\n # Input tree\n tree_raw = inputfile.Get(\"valid\")\n\n # Output folders\n output_f = outputfile.GetDirectory('eta_%g_%g' % (eta_bins[0], eta_bins[-1]))\n output_f_hists = None\n if not output_f:\n output_f = outputfile.mkdir('eta_%g_%g' % (eta_bins[0], eta_bins[-1]))\n output_f_hists = output_f.mkdir(\"Histograms\")\n else:\n output_f_hists = output_f.GetDirectory(\"Histograms\")\n\n # Go through eta bins, get response hist, fit with Gaussian and add to\n # the overall graph\n for i, eta in enumerate(eta_bins[:-1]):\n absetamin = eta\n absetamax = eta_bins[i + 1] # Eta cut string\n\n # Cut strings\n eta_cutStr = \"TMath::Abs(eta) < %f && TMath::Abs(eta) > %f\" % (absetamax, absetamin)\n pt_cutStr = \"%s < %g && %s > %g\" % (pt_var, pt_max, pt_var, pt_min)\n pu_cutStr = \"numPUVertices <= %f && numPUVertices >= %f\" % (pu_max, pu_min)\n avoidSaturation_cut = \"pt < 1023.1\"\n cutStr = \" && \".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])\n print cutStr\n\n nb_rsp = 100\n rsp_min, rsp_max = 0, 5\n rsp_name = 'hrsp_eta_%g_%g_%s_%g_%g' % (absetamin, absetamax, pt_var, pt_min, pt_max)\n tree_raw.Draw(\"rsp>>%s(%d,%g,%g)\" % (rsp_name, nb_rsp, rsp_min, rsp_max), cutStr)\n h_rsp = ROOT.gROOT.FindObject(rsp_name)\n h_rsp.SetTitle(\";response (p_{T}^{L1}/p_{T}^{Ref});\")\n\n print 'Integral', h_rsp.Integral()\n\n if h_rsp.Integral() <= 0:\n print \"No entries - skipping\"\n continue\n\n # Fit with Gaussian\n peak = h_rsp.GetBinCenter(h_rsp.GetMaximumBin())\n if absetamin < 2.9:\n fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\",\n h_rsp.GetMean() - h_rsp.GetRMS(),\n h_rsp.GetMean() + h_rsp.GetRMS())\n else:\n fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\",\n peak - (0.5 * h_rsp.GetRMS()),\n peak + (0.5 * h_rsp.GetRMS()))\n\n mean = h_rsp.GetMean()\n err = h_rsp.GetMeanError()\n\n check_fit = True\n if check_fit:\n if int(fit_result) == 0:\n mean = h_rsp.GetFunction(\"gaus\").GetParameter(1)\n err = h_rsp.GetFunction(\"gaus\").GetParError(1)\n else:\n print \"cannot fit with Gaussian - using raw mean instead\"\n\n output_f_hists.WriteTObject(h_rsp)\n\n # add to graph\n N = gr_rsp_eta.GetN()\n print absetamin, \"-\", absetamax, mean, err\n gr_rsp_eta.SetPoint(N, 0.5 * (absetamin + absetamax), mean)\n gr_rsp_eta.SetPointError(N, 0.5 * (absetamax - absetamin), err)\n\n gr_rsp_eta.SetTitle(\";|#eta^{L1}|; <response> = <p_{T}^{L1}/p_{T}^{Ref}>\")\n gr_rsp_eta.SetName(\"gr_rsp_eta_%g_%g_%s_%g_%g\" % (eta_bins[0], eta_bins[-1], pt_var, pt_min, pt_max))\n output_f.WriteTObject(gr_rsp_eta)",
"def get_response_with_weights(Response,zenith,azimuth,deg=True,binsize=5,cut=60.0,lookup=True):\n\n if lookup == True:\n # look up, no weighting\n rsp_mean = get_response_from_pixelhit_vector(Response,zenith,azimuth,binsize=binsize,cut=cut)\n\n else:\n \n # calculate the weighting for neighbouring pixels using their angular distance\n # also returns the indices of which pixels to be used for response averaging\n widx = get_response_weights_vector(zenith,azimuth,binsize,cut=cut)\n # This is a vectorised function so that each entry gets its own weighting\n # at the correct positions of the input angles ([:, None] is the same as \n # column-vector multiplcation of a lot of ones)\n\n # check for negative weights and indices and remove\n widx[1][widx[0][:,0,:] < 0] = 0.\n widx[1][widx[0][:,1,:] < 0] = 0.\n for i in range(4):\n widx[0][i,0,widx[0][i,0,:] < 0] = 0.\n widx[0][i,1,widx[0][i,1,:] < 0] = 0.\n \n # one energy bin\n #print(Response.shape,len(Response.shape))\n if len(Response.shape) < 4:\n rsp0 = Response[widx[0][0,1,:],widx[0][0,0,:],:]*widx[1][0,:][:, None]\n rsp1 = Response[widx[0][1,1,:],widx[0][1,0,:],:]*widx[1][1,:][:, None]\n rsp2 = Response[widx[0][2,1,:],widx[0][2,0,:],:]*widx[1][2,:][:, None]\n rsp3 = Response[widx[0][3,1,:],widx[0][3,0,:],:]*widx[1][3,:][:, None]\n # with energy matrix included\n elif len(Response.shape) >= 4:\n rsp0 = Response[widx[0][0,1,:],widx[0][0,0,:],:,:,:]*widx[1][0,:][:, None, None, None]\n rsp1 = Response[widx[0][1,1,:],widx[0][1,0,:],:,:,:]*widx[1][1,:][:, None, None, None]\n rsp2 = Response[widx[0][2,1,:],widx[0][2,0,:],:,:,:]*widx[1][2,:][:, None, None, None]\n rsp3 = Response[widx[0][3,1,:],widx[0][3,0,:],:,:,:]*widx[1][3,:][:, None, None, None]\n else:\n print('How this should really not happen ...')\n \n rsp_mean = rsp0 + rsp1 + rsp2 + rsp3\n\n # return response\n return rsp_mean",
"def create_pt_hist(hist, binning, binning_uflow, pt_bin_edges, pt_bin_edges_uflow, variable_bin_edges):\n # THIS IS A HORRIBLE HACK BECAUSE I DIDNT FILL MY HISTS\n all_pt_bins = list(np.append(pt_bin_edges_uflow[:-1], pt_bin_edges))\n all_pt_bins.append(8000)\n # print(all_pt_bins)\n nbins_pt = len(all_pt_bins)-1\n h_new = ROOT.TH1D(\"hpt\"+cu.get_unique_str(), \"\", nbins_pt, array('d', all_pt_bins))\n for pt_ind in range(1, h_new.GetNbinsX()+1):\n this_sum = 0\n this_err_sq = 0\n this_pt = all_pt_bins[pt_ind-1]\n # ARGH THIS IS SO FRUSTRATING\n this_binning = binning if this_pt >= pt_bin_edges[0] else binning_uflow\n for var_ind, var in enumerate(variable_bin_edges[:-1]):\n bin_num = this_binning.GetGlobalBinNumber(var, this_pt)\n this_sum += hist.GetBinContent(bin_num)\n this_err_sq += hist.GetBinError(bin_num)**2\n h_new.SetBinContent(pt_ind, this_sum)\n h_new.SetBinError(pt_ind, math.sqrt(this_err_sq))\n return h_new",
"def setBinning(self, doJpsiee=False):\n\n # Binning as defined in TP framework\n coarse_eta_bins = [-2.47,-1.52,-1.37,-0.60,0.00,0.60,1.37,1.52,2.47] # 9 items\n\n coarse_et_bins = [4.,20.,30.,40.,50.,60.,150.] # 7 items\n\n default_eta_bins = [-2.47,-2.37,-2.01,-1.81,-1.52,-1.37,-1.15,-0.80,-0.60,-0.10,0.00,\n 0.10, 0.60, 0.80, 1.15, 1.37, 1.52, 1.81, 2.01, 2.37, 2.47] # 21 items\n\n default_et_bins = [4.,7.,10.,15.,20.,25.,30.,35.,40.,45.,50.,60.,80.,150.] # 14 items\n\n etabins = [-2.47,-2.37,-2.01,-1.81,-1.52,-1.37,-1.15,-0.8,-0.6,-0.1,\n 0.0,0.1,0.6,0.8,1.15,1.37,1.52,1.81,2.01,2.37,2.47] # 21 items\n\n # TH2 with variable bin x-Axis, but constant bin y-Axis takes only Double_t arrays\n etbins_Zee = [0.,2.,4.,6.,8.,10.,\n 12.,14.,16.,18.,20.,22.,24.,26.,28.,\n 30.,32.,34.,36.,38.,40.,42.,44.,46.,48.,50.,55.,60.,65.,70.,100.] # 31 items\n\n etbins_Jpsiee = [ 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5,\n 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5,\n 10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,\n 15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,\n 20,20.5,21,21.5,22,22.5,23,23.5,24,24.5,\n 25,25.5] # 52 items\n\n\n if doJpsiee:\n self._nEtbins=51\n self._etbins = etbins_Jpsiee[0:self._nEtbins+1]\n else:\n self._nEtbins=30\n self._etbins = etbins_Zee[0:self._nEtbins+1]\n\n # Define the binning\n self._nEtabins=20\n self._ndefaultEtbins=13\n self._ndefaultEtabins=20\n self._ncoarseEtbins=6\n self._ncoarseEtabins=8\n\n #Fill the arrays\n self._etabins = etabins[0:self._nEtabins+1]\n self._defaultEtbins = default_et_bins[0:self._ndefaultEtbins+1]\n self._defaultEtabins = default_eta_bins[0:self._ndefaultEtabins+1]\n self._coarseEtbins = coarse_et_bins[0:self._ncoarseEtbins+1]\n self._coarseEtabins = coarse_eta_bins[0:self._ncoarseEtabins+1]",
"def ANN_efficiency_vs_PU(title, x_data, pT, CSV, model, ANN_Cuts, Ratio_Cuts, CSV_Cuts, bins, y_max, pT_Cut=200, BG=False, DrawTitle=False):\n assert x_data.shape[1]==21, \"x_data does not contain PV. Make sure it is made from a PU sample and has shape (x, 21).\"\n\tassert x_data.shape[0] == len(pT) == len(CSV), \"data inputs need to have the same length\"\n\tassert len(ANN_Cuts) == len(Ratio_Cuts) == len(CSV_Cuts) == len(bins)-1, \"cuts need to have the same length and be compatible with amount of bins\"\n\n ran = (0,80)\n nbins = 80\n import array\n\tif BG:\n\t\tbins_ = array.array('d',[0.0, 11.0]+range(19,41,8)+[42.0, 52.0, 80])\n\telse:\n \tbins_ = array.array('d',[0.0, 11.0]+range(15,41,4)+[42.0, 52.0, 58.0, 65.0, 80])\n\n\tif pT_Cut >= 1200:\n\t\tbins_ = array.array('d',[0.0, 20.0, 40.0, 80.0])\n\n\n #make histograms of efficiency vs PU\n AllJets_Hist = rt.TH1D(\"AllJets\",\"AllJets\",nbins,ran[0],ran[1])\n ANN_Hist = rt.TH1D(\"ANN\",\"ANN\",nbins,ran[0],ran[1])\n Ratio_Hist = rt.TH1D(\"Ratio\",\"Ratio\",nbins,ran[0],ran[1])\n CSV_Hist = rt.TH1D(\"CSV\",\"CSV\",nbins,ran[0],ran[1])\n\n\tAllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_Hist = ANN_Hist.Rebin(len(bins_)-1,\"ANN\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n \n\tpred_y = model.predict(ANN_functional_shape(x_data))\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\t\n\tfor i,pT_value in enumerate(pT):\n\t\t\tif pT_value < pT_Cut: continue\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJets_Hist.Fill(x_data[i,-1])\n\t\t\tif CSV[i] >= CSV_Cuts[bin_numbers[i]]: CSV_Hist.Fill(x_data[i,-1])\n\t if pred_y[i] >= ANN_Cuts[bin_numbers[i]]: ANN_Hist.Fill(x_data[i,-1])\n\t\t\tif x_data[i,12] != 0:\n\t\t\t\tL_R = x_data[i,15]/float(x_data[i,12])\n\t\t\t\tif L_R >= Ratio_Cuts[bin_numbers[i]]: Ratio_Hist.Fill(x_data[i,-1])\n\t \n\t'''\t\t\n AllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_Hist = ANN_Hist.Rebin(len(bins_)-1,\"ANN\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n\t'''\n #Make Graphs and draw them\n canvas = rt.TCanvas('canvas','canvas',600,600)\n\tif DrawTitle == False: rt.gStyle.SetOptTitle(0)\n legend = rt.TLegend(0.1,0.9,0.35,0.75)\n ANN_Graph = rt.TGraphAsymmErrors()\n Ratio_Graph = rt.TGraphAsymmErrors()\n CSV_Graph = rt.TGraphAsymmErrors()\n if DrawTitle: Ratio_Graph.SetTitle(title+\"_vs_PU_pT{}{}\".format('jet',pT_Cut))\n ANN_Graph.Divide(ANN_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n Ratio_Graph.Divide(Ratio_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n CSV_Graph.Divide(CSV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n ANN_Graph.SetLineColor(3)\n Ratio_Graph.SetLineColor(2)\n CSV_Graph.SetLineColor(4)\n legend.AddEntry(ANN_Graph, \"ANN\", \"LEP\")\n legend.AddEntry(Ratio_Graph, \"L4/L1\", \"LEP\")\n legend.AddEntry(CSV_Graph, \"CSV\", \"LEP\")\n Ratio_Graph.GetXaxis().SetTitle(\"#PV\")\n if BG:\n\t\tRatio_Graph.GetYaxis().SetTitle('mistag rate')\n\telse:\t\n\t\tRatio_Graph.GetYaxis().SetTitle('efficiency')\n Ratio_Graph.GetYaxis().SetTitleOffset(1.5)\n\tRatio_Graph.SetMinimum(0.)\n Ratio_Graph.SetMaximum(y_max)\n Ratio_Graph.Draw()\n ANN_Graph.Draw(\"SAME\")\n CSV_Graph.Draw(\"SAME\")\n legend.Draw()\n canvas.SaveAs('Thesis_Plots/'+title+\"_vs_PU_pT{}{}.png\".format('jet',pT_Cut))",
"def binned_AUC(func_predict, X, y, X_kin, VARS_kin, pt_edges, eta_edges, label):\n\n AUC = np.zeros((len(pt_edges)-1, len(eta_edges)-1))\n\n\n # ** Compute predictions **\n if type(X) is list: # Evaluate one by one\n y_pred = np.zeros(len(X))\n for k in range(len(y_pred)):\n y_pred[k] = func_predict(X[k])\n else:\n y_pred = func_predict(X)\n\n\n # Loop over bins\n for i in range(len(pt_edges) - 1):\n for j in range(len(eta_edges) - 1):\n\n pt_range = [ pt_edges[i], pt_edges[i+1]]\n eta_range = [eta_edges[j], eta_edges[j+1]]\n\n # Indices\n ind = np.logical_and(aux.pick_ind(X_kin[:, VARS_kin.index('trk_pt')], pt_range),\n aux.pick_ind(X_kin[:, VARS_kin.index('trk_eta')], eta_range))\n\n print(f'\\nEvaluate classifier <{label}> ...')\n print(f'*** pT = [{pt_range[0]:.3f},{pt_range[1]:.3f}], eta = [{eta_range[0]:.3f},{eta_range[1]:.3f}] ***')\n \n if np.sum(ind) > 0: # Do we have any events in this cell\n\n # Evaluate metric\n met = aux.Metric(y_true = y[ind], y_soft = y_pred[ind])\n print('AUC = {:.5f}'.format(met.auc))\n AUC[i,j] = met.auc\n\n else:\n print('No events found in this (eta,pt) cell!')\n \n # Evaluate total performance\n met = aux.Metric(y_true = y, y_soft = y_pred)\n fig,ax = plot_auc_matrix(AUC, pt_edges, eta_edges)\n ax.set_title('{}: Integrated AUC = {:.3f}'.format(label, met.auc))\n\n return fig,ax,met",
"def eta(self):\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n if self._is_ts:\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n data = self.data[i]\r\n u = np.unique(self.events[i])\r\n event_types = u[np.unique(self.events[i]) != 0]\r\n h[i] = np.empty((event_types.shape[0], self.len_et),\r\n dtype=complex)\r\n\r\n # This offset is used to pull the event indices below, but we\r\n # have to broadcast it so the shape of the resulting idx+offset\r\n # operation below gives us the (nevents, len_et) array we want,\r\n # per channel.\r\n offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n # Loop over event types\r\n for e_idx in range(event_types.shape[0]):\r\n idx = np.where(self.events[i] == event_types[e_idx])[0]\r\n event_trig = data[idx + offset]\r\n #Correct baseline by removing the first point in the series\r\n #for each channel:\r\n if self._correct_baseline:\r\n event_trig -= event_trig[0]\r\n\r\n h[i][e_idx] = np.mean(event_trig, -1)\r\n\r\n #In case the input events are an Events:\r\n else:\r\n #Get the indices necessary for extraction of the eta:\r\n add_offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n\r\n idx = (self.events.time / self.sampling_interval).astype(int)\r\n\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n #If this is a list with one element:\r\n if self._len_h == 1:\r\n event_trig = self.data[0][idx + add_offset]\r\n #Otherwise, you need to index straight into the underlying data\r\n #array:\r\n else:\r\n event_trig = self.data.data[i][idx + add_offset]\r\n\r\n h[i] = np.mean(event_trig, -1)\r\n\r\n h = np.array(h).squeeze()\r\n return ts.TimeSeries(data=h,\r\n sampling_interval=self.sampling_interval,\r\n t0=self.offset * self.sampling_interval,\r\n time_unit=self.time_unit)",
"def runHisto1EneAndPyMcaFit(self, **kws):\n self.histo1_energy(**kws)\n self.x = self.h1e.bin_center\n self.y = self.h1e.histogram\n self.fitSplitPVoigt(**kws)",
"def make_eta(eta_step_bucket, npart, hist_rule=\"square-root\"):\n\n lowbound = np.min(eta_step_bucket)\n upbound = np.max(eta_step_bucket) + 1e-10\n pts = len(eta_step_bucket)\n if hist_rule == \"square-root\":\n hist_num = int(np.sqrt(pts))\n elif hist_rule == \"sturges\":\n hist_num = int(np.log2(pts)) + 1\n elif hist_rule == \"rice-rule\":\n hist_num = int(2 * pts ** (1 / 3))\n eta_hist = np.zeros(hist_num)\n eta_hist, bins = np.histogram(\n np.array(eta_step_bucket), bins=np.linspace(lowbound, upbound, num=hist_num + 1)\n )\n # plt.figure()\n # _=plt.hist(np.array(eta_step_bucket),bins=np.linspace(lowbound, upbound, num=hist_num+1))\n # plt.title('Input eta histogram')\n eta_hist = eta_hist / np.sum(eta_hist)\n\n # make cdf\n eta_cdf = np.zeros(eta_hist.shape)\n eta_cdf[0] = eta_hist[0]\n for j in range(1, hist_num):\n eta_cdf[j] = eta_hist[j] + eta_cdf[j - 1]\n eta_cdf = np.concatenate((np.zeros(1), eta_cdf))\n\n # make eta\n x = np.random.rand(npart)\n eta_sampled = np.interp(x, eta_cdf, bins)\n # plt.figure()\n # _=plt.hist(eta_sampled,bins=np.linspace(lowbound, upbound, num=hist_num+1))\n # plt.title('Sampled eta histogram')\n return eta_sampled",
"def plot_rsp_pt(inputfile, outputfile, absetamin, absetamax, pt_bins, pt_var, pt_max, pu_min, pu_max):\n\n # Input tree\n tree_raw = inputfile.Get(\"valid\")\n\n # Output folders\n output_f = outputfile.GetDirectory('eta_%g_%g' % (absetamin, absetamax))\n output_f_hists = None\n if not output_f:\n output_f = outputfile.mkdir('eta_%g_%g' % (absetamin, absetamax))\n output_f_hists = output_f.mkdir(\"Histograms\")\n else:\n output_f_hists = output_f.GetDirectory(\"Histograms\")\n\n gr_rsp_pt = ROOT.TGraphErrors()\n\n # Cut strings\n eta_cutStr = \"TMath::Abs(eta) < %f && TMath::Abs(eta) > %f\" % (absetamax, absetamin)\n # keep the pt < pt_max to safeguard against staurated L1 jets\n pt_cutStr = \"%s < %g && pt < %g\" % (pt_var, pt_bins[-1], pt_max)\n pu_cutStr = \"numPUVertices <= %f && numPUVertices >= %f\" % (pu_max, pu_min)\n avoidSaturation_cut = \"pt < 1023.1\"\n cutStr = \" && \".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])\n\n n_rsp_bins = 100\n rsp_min = 0\n rsp_max = 5\n\n pt_array = array('d', pt_bins)\n\n # First make a 2D plot\n h2d_rsp_pt = ROOT.TH2D(\"h2d_rsp_%s_%g_%g\" % (pt_var, absetamin, absetamax),\n \"%g < |#eta| < %g;p_{T};response\" % (absetamin, absetamax),\n len(pt_bins) - 1, pt_array,\n n_rsp_bins, rsp_min, rsp_max)\n tree_raw.Draw(\"rsp:%s>>h2d_rsp_%s_%g_%g\" % (pt_var, pt_var, absetamin, absetamax), cutStr)\n\n output_f_hists.WriteTObject(h2d_rsp_pt)\n\n # Now for each pt bin, do a projection on 1D hist of response and fit a Gaussian\n print pt_bins\n for i, (pt_min, pt_max) in enumerate(pairwise(pt_bins)):\n h_rsp = h2d_rsp_pt.ProjectionY(\"rsp_%s_%g_%g\" % (pt_var, pt_min, pt_max), i + 1, i + 1)\n print i, pt_min, pt_max\n\n if h_rsp.Integral() <= 0:\n print \"No entries - skipping\"\n continue\n\n # Fit with Gaussian\n mean = h_rsp.GetMean()\n err = h_rsp.GetMeanError()\n\n peak = h_rsp.GetBinCenter(h_rsp.GetMaximumBin())\n # if h_rsp.GetRMS() < 0.2:\n # fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", peak - h_rsp.GetRMS(), peak + h_rsp.GetRMS())\n # else:\n # fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", peak - 0.5*h_rsp.GetRMS(), peak + 0.5*h_rsp.GetRMS())\n fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", peak - h_rsp.GetRMS(), peak + h_rsp.GetRMS())\n # fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", mean - h_rsp.GetRMS(), mean + h_rsp.GetRMS())\n\n output_f_hists.WriteTObject(h_rsp)\n\n # TODO: better check against Gaussian fit - are peaks ~ similar?\n # if int(fit_result) == 0 and check_gaus_fit(h_rsp):\n if int(fit_result) == 0 and abs(h_rsp.GetFunction(\"gaus\").GetParameter(1) - peak) / peak < 0.1:\n mean = h_rsp.GetFunction(\"gaus\").GetParameter(1)\n err = h_rsp.GetFunction(\"gaus\").GetParError(1)\n # Add the Gaussian to the total graph\n N = gr_rsp_pt.GetN()\n gr_rsp_pt.SetPoint(N, 0.5 * (pt_min + pt_max), mean)\n gr_rsp_pt.SetPointError(N, 0.5 * (pt_max - pt_min), err)\n else:\n print \"Cannot fit Gaussian in plot_rsp_pt, using raw mean instead\"\n\n # Save the graph\n gr_rsp_pt.SetTitle(\"%g < |#eta^{L1}| < %g;p_{T}; <response> = <p_{T}^{L1}/p_{T}^{Ref}>\" % (absetamin, absetamax))\n gr_rsp_pt.SetName(\"gr_rsp_%s_eta_%g_%g\" % (pt_var, absetamin, absetamax))\n\n output_f.WriteTObject(gr_rsp_pt)",
"def fit_hist(self, h, iters = 20):\n N = np.sum(h)\n for iter in range(iters):\n\n term1, term2, term3, term4 = 0,0,0,0\n\n for i in range(h.shape[0]):\n term1 += (math.log(i+1) * h[i])\n term2 += h[i]*(math.pow(i/self.k_alp[1],self.k_alp[0]))*(math.log((i+1)/self.k_alp[1]))\n term3 += h[i]*(math.pow(i/self.k_alp[1],self.k_alp[0]))\n term4 += h[i]*(math.pow(i/self.k_alp[1],self.k_alp[0]))*((math.log((i+1)/self.k_alp[1]))**2)\n # print(term1,term2,term3,term4)\n\n dL_dk = (N / self.k_alp[0]) - (N * math.log(self.k_alp[1])) + term1 - term2\n dL_dalpha = (self.k_alp[0] / self.k_alp[1]) * (term3 - N)\n d2L_dk2 = -(N / (self.k_alp[0] ** 2)) - term4\n d2L_dalpha2 = (self.k_alp[0] / (self.k_alp[1] ** 2)) * (N - ((self.k_alp[0] + 1) * term3))\n d2L_dkdalpha = ((1 / self.k_alp[1]) * term3) + ((self.k_alp[0]/self.k_alp[1])*term2) - (N/self.k_alp[1])\n # print(dL_dk,dL_dalpha, d2L_dk2,d2L_dalpha2,d2L_dkdalpha)\n\n self.k_alp = self.k_alp + \\\n np.dot(np.linalg.inv(np.array([[d2L_dk2, d2L_dkdalpha],[d2L_dkdalpha, d2L_dalpha2]])) ,\n np.array([-dL_dk, -dL_dalpha]))",
"def selection_correction_method1(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break\n return h_in, h_out",
"def selection_correction_method1_v2(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break",
"def getHistPta(self, name, **kwargs):\n hists = []\n format_string = \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[jetpT]:02d}TrkPt{0[trkpT]:02d}\"\n\n for i in range(self._range[0], self._range[1]):\n if \"dir\" in kwargs:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": kwargs[\"dir\"],\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"jetpT\": i,\n \"trkpT\": j,\n }\n )\n ).Clone()\n for j in range(0, 11)\n ] # Get jT histograms from file an array\n else:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": self._directory,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"jetpT\": i,\n \"trkpT\": j,\n }\n )\n ).Clone()\n for j in range(0, 11)\n ] # Get jT histograms from file an array\n hists.append(hist)\n # print('{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}'.format({'dir':self._directory, 'histname':name,'NFin':self._NFIN,'pT':1}))\n\n # Get Jet Pt bins\n jetPt = parse_jet_pt_bins(hist)\n\n # Get Track pt Bins\n trkPt = parse_jet_pt_bins(search=\"constituent\")\n\n # print(len(hist))\n # print(hist)\n # print(jetPt)\n for hist, N, bgN in zip(hists, self._measN, self._measBgN):\n for h in hist:\n h.Sumw2()\n # print(\"Rebinning {} by {} in set {} that has {} bins\".format(h.GetTitle(), self._rebin, self._name, h.GetNbinsX()))\n h.Rebin(self._rebin)\n print(kwargs)\n if self.properties.get(\"isWeight\", False):\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n else:\n if kwargs.get(\"isBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 1)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 1)\n h.Scale(1.0 / bgN, \"width\")\n print(\"{} is bg\".format(name))\n else:\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n h.Scale(1.0 / N, \"width\")\n\n h.SetMarkerStyle(self.properties.get(\"style\", 24))\n h.SetMarkerSize(0.5)\n h.SetLineColor(1)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt, trkPt\n else:\n return hist",
"def fillHistograms(self, params, hists, mode = INTENS):\n\t\tif mode.IS_THEO and not self.hasTheo:\n\t\t\tprint \"No theory loaded, cannot fill histogram\"\n\t\tif not len(hists) == self.nSect:\n\t\t\traise IndexError(\"Histogram number mismatch\")\n\t\tcorrAmp = self.getCorrectedAmplitudes(params)\n\t\tfor s in range(self.nSect):\n\t\t\tcount = 0\n\t\t\tstart = self.borders[s ]\n\t\t\tstop = self.borders[s+1]\n\t\t\tfor i in range(start, stop):\n\t\t\t\tampl = corrAmp[2*i] + 1.j * corrAmp[2*i+1]\n\t\t\t\tnorm = self.norms[i]\n\t\t\t\tcoma = np.zeros((2,2))\n\t\t\t\tjac = np.zeros((2))\n\t\t\t\tcoma[0,0] = self.coma[2*i ,2*i ]\n\t\t\t\tcoma[0,1] = self.coma[2*i ,2*i+1]\n\t\t\t\tcoma[1,0] = self.coma[2*i+1,2*i ]\n\t\t\t\tcoma[1,1] = self.coma[2*i+1,2*i+1]\n\t\t\t\tif mode == INTENS:\n\t\t\t\t\tval = abs(ampl)**2\n\t\t\t\t\tjac[0] = 2*ampl.real\n\t\t\t\t\tjac[1] = 2*ampl.imag\n\t\t\t\telif mode == INTENSNORM:\n\t\t\t\t\tval = abs(ampl)**2/norm\n\t\t\t\t\tjac[0] = 2*ampl.real/norm\n\t\t\t\t\tjac[1] = 2*ampl.imag/norm\n\t\t\t\telif mode == REAL:\n\t\t\t\t\tval = ampl.real\n\t\t\t\t\tjac[0] = 1.\n\t\t\t\telif mode == IMAG:\n\t\t\t\t\tval = ampl.imag\n\t\t\t\t\tjac[1] = 1.\n\t\t\t\telif mode == REIMCORRELATION:\n\t\t\t\t\tval = coma[0,1]\n\t\t\t\telif mode == PHASE:\n\t\t\t\t\tval = phase(ampl)\n\t\t\t\t\tif ampl.real == 0.:\n\t\t\t\t\t\tif ampl.imag > 0.:\n\t\t\t\t\t\t\tjac[0] = -1./ampl.imag\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tjac[0] = 1./ampl.imag\n\t\t\t\t\telse:\n\t\t\t\t\t\tcommon = 1. + ampl.imag**2/ampl.real**2\n\t\t\t\t\t\tjac[0] = -ampl.imag/ampl.real**2/common\n\t\t\t\t\t\tjac[1] = 1./ampl.real/common\n\t\t\t\telif mode == INTENSTHEO:\n\t\t\t\t\tval = abs(self.theo[i])**2\n\t\t\t\telif mode == REALTHEO:\n\t\t\t\t\tval = self.theo[i].real\n\t\t\t\telif mode == IMAGTHEO:\n\t\t\t\t\tval = self.theo[i].imag\n\t\t\t\telif mode == PHASETHEO:\n\t\t\t\t\tval = phase(self.theo[i])\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Unknown mode '\" + mode + \"'\")\n\t\t\t\terr = np.dot(jac, np.dot(coma,jac))**.5\n\t\t\t\thists[s].SetBinContent(self.bin3pi+1, count + 1, val)\n\t\t\t\thists[s].SetBinError(self.bin3pi+1, count + 1, err)\n\t\t\t\tcount += 1",
"def _step_EM(\n self, X, indices_ones, pi, alpha_1, alpha_2, tau_1, tau_2, n1, n2\n ):\n\n eps_1 = max(1e-4 / n1, 1e-9)\n eps_2 = max(1e-4 / n2, 1e-9)\n nq, nl = self.n_row_clusters, self.n_column_clusters\n\n ########################## E-step ##########################\n u = X.dot(tau_2) # Shape is (n1,nl)\n v = X.T.dot(tau_1) # Shape is (n2,nq)\n\n # Update of tau_1 with sparsity trick.\n l_tau_1 = (\n (\n (u.reshape(n1, 1, nl))\n * (self._np.log(pi) - self._np.log(1 - pi)).reshape(1, nq, nl)\n ).sum(2)\n + self._np.log(alpha_1.reshape(1, nq))\n + (self._np.log(1 - pi) @ tau_2.T).sum(1)\n )\n\n # For computationnal stability reasons 1.\n l_tau_1 -= l_tau_1.max(axis=1).reshape(n1, 1)\n tau_1 = self._np.exp(l_tau_1)\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1) # Normalize.\n\n # For computationnal stability reasons 2.\n tau_1[tau_1 < eps_1] = eps_1\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1) # Re-Normalize.\n\n # Update of tau_2 with sparsity trick.\n l_tau_2 = (\n (\n (v.reshape(n2, nq, 1))\n * (self._np.log(pi) - self._np.log(1 - pi)).reshape(1, nq, nl)\n ).sum(1)\n + self._np.log(alpha_2.reshape(1, nl))\n + (tau_1 @ self._np.log(1 - pi)).sum(0)\n )\n\n # For computationnal stability reasons 1.\n l_tau_2 -= l_tau_2.max(axis=1).reshape(n2, 1)\n tau_2 = self._np.exp(l_tau_2)\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1) # Normalize.\n\n # For computationnal stability reasons 2.\n tau_2[tau_2 < eps_2] = eps_2\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1) # Re-Normalize.\n ########################## M-step ##########################\n alpha_1 = tau_1.mean(0)\n alpha_2 = tau_2.mean(0)\n pi = (\n tau_1[indices_ones[0]].reshape(-1, nq, 1)\n * tau_2[indices_ones[1]].reshape(-1, 1, nl)\n ).sum(0) / (tau_1.sum(0).reshape(nq, 1) * tau_2.sum(0).reshape(1, nl))\n return pi, alpha_1, alpha_2, tau_1, tau_2",
"def ANN_efficiency_vs_PU_pT_PV(title, x_data, pT, CSV, model_noPT, model_withPT, model_withPV, ANN_noPT_Cuts, ANN_withPT_Cuts, ANN_withPV_Cuts, Ratio_Cuts, CSV_Cuts, bins, y_max, pT_Cut=200, BG=False, DrawTitle=False, LargeLegend=False):\n assert x_data.shape[1]==21, \"x_data does not contain PV. Make sure it is made from a PU sample and has shape (x, 21).\"\n\tassert x_data.shape[0] == len(pT) == len(CSV), \"data inputs need to have the same length\"\n\tassert len(ANN_noPT_Cuts) == len(ANN_withPT_Cuts) == len(ANN_withPV_Cuts) == len(Ratio_Cuts) == len(CSV_Cuts) == len(bins)-1, \"cuts need to have the same length and be compatible with amount of bins\"\n\n ran = (0,80)\n nbins = 80\n import array\n\tif BG:\n\t\tbins_ = array.array('d',[0.0, 11.0]+range(19,41,8)+[42.0, 52.0, 80])\n\telse:\n \tbins_ = array.array('d',[0.0, 11.0]+range(15,41,4)+[42.0, 52.0, 58.0, 65.0, 80])\n\n\tif pT_Cut >= 1200:\n\t\tbins_ = array.array('d',[0.0, 20.0, 40.0, 80.0])\n\n\n #make histograms of efficiency vs PU\n AllJets_Hist = rt.TH1D(\"AllJets\",\"AllJets\",nbins,ran[0],ran[1])\n ANN_noPT_Hist = rt.TH1D(\"ANN_noPT\",\"ANN_noPT\",nbins,ran[0],ran[1])\n\tANN_withPT_Hist = rt.TH1D(\"ANN_withPT\",\"ANN_withPT\",nbins,ran[0],ran[1])\n\tANN_withPV_Hist = rt.TH1D(\"ANN_withPV\",\"ANN_withPV\",nbins,ran[0],ran[1])\n Ratio_Hist = rt.TH1D(\"Ratio\",\"Ratio\",nbins,ran[0],ran[1])\n CSV_Hist = rt.TH1D(\"CSV\",\"CSV\",nbins,ran[0],ran[1])\n\n\tAllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_noPT_Hist = ANN_noPT_Hist.Rebin(len(bins_)-1,\"ANN_noPT\",bins_)\n\tANN_withPT_Hist = ANN_withPT_Hist.Rebin(len(bins_)-1,\"ANN_withPT\",bins_)\n\tANN_withPV_Hist = ANN_withPV_Hist.Rebin(len(bins_)-1,\"ANN_withPV\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n \n\tpred_y_noPT = model_noPT.predict(ANN_functional_shape(x_data))\n\tpred_y_withPT = model_withPT.predict(ANN_functional_shape(x_data)+[pT/200.])\n\tpred_y_withPV = model_withPV.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\t\n\tfor i,pT_value in enumerate(pT):\n\t\t\tif pT_value < pT_Cut: continue\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJets_Hist.Fill(x_data[i,-1])\n\t\t\tif CSV[i] >= CSV_Cuts[bin_numbers[i]]: CSV_Hist.Fill(x_data[i,-1])\n\t if pred_y_noPT[i] >= ANN_noPT_Cuts[bin_numbers[i]]: ANN_noPT_Hist.Fill(x_data[i,-1])\n\t\t\tif pred_y_withPT[i] >= ANN_withPT_Cuts[bin_numbers[i]]: ANN_withPT_Hist.Fill(x_data[i,-1])\n\t\t\tif pred_y_withPV[i] >= ANN_withPV_Cuts[bin_numbers[i]]: ANN_withPV_Hist.Fill(x_data[i,-1])\n\n\t\t\tif x_data[i,12] != 0:\n\t\t\t\tL_R = x_data[i,15]/float(x_data[i,12])\n\t\t\t\tif L_R >= Ratio_Cuts[bin_numbers[i]]: Ratio_Hist.Fill(x_data[i,-1])\n \n\t#Make Graphs and draw them\n canvas = rt.TCanvas('canvas','canvas',600,600)\n\tif DrawTitle == False: rt.gStyle.SetOptTitle(0)\n\tif LargeLegend:\n\t\tlegend = rt.TLegend(0.1,0.9,0.4,0.7)\n\telse:\n \tlegend = rt.TLegend(0.1,0.9,0.35,0.75)\n ANN_noPT_Graph = rt.TGraphAsymmErrors()\n\tANN_withPT_Graph = rt.TGraphAsymmErrors()\n\tANN_withPV_Graph = rt.TGraphAsymmErrors()\n Ratio_Graph = rt.TGraphAsymmErrors()\n CSV_Graph = rt.TGraphAsymmErrors()\n if DrawTitle: Ratio_Graph.SetTitle(title+\"_vs_PU_pT{}{}\".format('jet',pT_Cut))\n ANN_noPT_Graph.Divide(ANN_noPT_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n\tANN_withPT_Graph.Divide(ANN_withPT_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n\tANN_withPV_Graph.Divide(ANN_withPV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n Ratio_Graph.Divide(Ratio_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n CSV_Graph.Divide(CSV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n ANN_noPT_Graph.SetLineColor(3)\n ANN_withPT_Graph.SetLineColor(6)\n\tANN_withPV_Graph.SetLineColor(7)\n\tRatio_Graph.SetLineColor(2)\n CSV_Graph.SetLineColor(4)\n #legend.AddEntry(ANN_noPT_Graph, \"ANN without p_{T}/PV\", \"LEP\")\n\tlegend.AddEntry(ANN_noPT_Graph, \"ANN without p_{T}\", \"LEP\")\n legend.AddEntry(ANN_withPT_Graph, \"ANN with p_{T}\", \"LEP\")\n\t#legend.AddEntry(ANN_withPV_Graph, \"ANN with PV\", \"LEP\")\n\tlegend.AddEntry(Ratio_Graph, \"L4/L1\", \"LEP\")\n legend.AddEntry(CSV_Graph, \"CSV\", \"LEP\")\n Ratio_Graph.GetXaxis().SetTitle(\"#PV\")\n if BG:\n\t\tRatio_Graph.GetYaxis().SetTitle('mistag rate')\n\telse:\t\n\t\tRatio_Graph.GetYaxis().SetTitle('efficiency')\n Ratio_Graph.GetYaxis().SetTitleOffset(1.5)\n\tRatio_Graph.SetMinimum(0.)\n Ratio_Graph.SetMaximum(y_max)\n Ratio_Graph.Draw()\n ANN_noPT_Graph.Draw(\"SAME\")\n\tANN_withPT_Graph.Draw(\"SAME\")\n\t#ANN_withPV_Graph.Draw(\"SAME\")\n CSV_Graph.Draw(\"SAME\")\n legend.Draw()\n canvas.SaveAs('Thesis_Plots/'+title+\"_vs_PU_pT{}{}.png\".format('jet',pT_Cut))",
"def build_muons_from_HZZ4mu_event(t, evt_num,\n eta_bin=[0, 2.4],\n pT_bin=[5, 200],\n d0_bin=[0, 1], # cm.\n inv_m_bin=[105, 140],\n dR_max=0.002,\n verbose=False):\n bad_muons = (None, None, None, None)\n # t.GetEntry(evt_num)\n \n if not passed_H4mu_evt_selection(t, inv_m_min=inv_m_bin[0], inv_m_max=inv_m_bin[1]):\n return bad_muons\n\n rec_ndcs_ls = list(t.lep_Hindex) # Elements that correspond to 4 leptons which build Higgs candidate.\n lep_genindex_ls = list(t.lep_genindex)\n\n # if not validate_lep_genindex(lep_genindex_ls):\n # continue\n # We have a good lep_genindex: at least 4 leptons have been matched. \n # Could be 5 or more leps.\n gen_ndcs_ls = get_ndcs_gen(rec_ndcs_ls, lep_genindex_ls)\n # Now gen_ndcs_ls should be the same length as rec_ndcs_ls:\n assert len(rec_ndcs_ls) == len(gen_ndcs_ls)\n \n mu_ls = make_muon_ls(t, rec_ndcs_ls, gen_ndcs_ls)\n \n all_muons_passed = apply_kinem_selections(mu_ls, inv_mass_bin=inv_m_bin, eta_bin=eta_bin, pT_bin=pT_bin,\n d0_bin=d0_bin, dR_max=dR_max)\n if not all_muons_passed:\n return bad_muons\n\n # NOW event is good.\n # n_evts_passed += 1\n\n # Return the 4 good muons.\n assert len(mu_ls) == 4\n return tuple(mu_ls)",
"def test_ds_1d(i, num_bins):\n np.random.seed(2191+i)\n simulated_dataset = simulate_direction(num_bins, ntaxa=47, nsamples=int(360/num_bins), Sigma_trace=1)\n X, K, sigma, mu = simulated_dataset\n y = np.zeros((X.shape[0]*X.shape[1], np.shape(X)[2])) #reformat data for model\n for i in range(len(X)):\n for j in range(len(X[0])):\n y[X.shape[1]*i+j] = X[i,j]\n no_struc = 1\n one_dim = fitModel_1d_util(y)\n for i in range(2):\n print([one_d_AIC(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n #for i in range(2):\n # print([one_d_AWE(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n #print(\"silhouette\")\n #for i in range(len(one_dim[1])):\n # mixing, sigma, delta, Q, Q_edge, edge_mean, mu, likelihoods, iterations = one_dim[1][i]\n # print(silhouette(mixing, sigma, mu, y))\n two_dim = fitModel_2d_util(y)\n for i in range(2):\n print([one_d_AIC(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n print([AIC(two_dim[1][i], y) for i in range(len(two_dim[1]))])\n #one_dim_scores = one_dim[0] #Scores start at 2 bins\n #two_dim_scores = two_dim[0]\n selection = 1 #if selection is negative just assume i'm referring to the 2d case\n return simulated_dataset, one_dim, two_dim, selection",
"def doSPEC(self, nbBins=4, n=2): \n # --------------------------------------------------------------------------------------------- #\n # Determine the energy edges for the calculation \n eEdges = np.logspace( np.log10(self.emin), np.log10(self.emax), nbBins+1)\n tmpEmin, tmpEmax = self.emin, self.emax\n\n # --------------------------------------------------------------------------------------------- #\n # Initilialize the result storage FITS file\n fitsNnam = os.path.join(self.workpath, 'SPECresults.fits')\n prihdr = fits.Header() \n prihdr.set('USER-CRE', 'A.Loh', 'File generator')\n prihdr.set('FT1', self.ft1, 'Data file')\n prihdr.set('FT2', self.ft2, 'Spacecraft file')\n prihdr.set('START', self.metstart, 'Start time (MET)')\n prihdr.set('STOP', self.metstop, 'Stop time (MET)')\n prihdr.set('EMIN', tmpEmin, 'Min Energy (MeV)')\n prihdr.set('EMAX', tmpEmax, 'Max energy (MeV)')\n prihdr.set('RA', self.ra, 'RA (deg)')\n prihdr.set('DEC', self.dec, 'Dec (deg)')\n prihdr.set('DT', dt, 'Dec (deg)')\n prihdr.set('SHIFT', shift, 'Dec (deg)')\n hdu = fits.PrimaryHDU(header=prihdr) # hdu table that will be filled\n hdu.writeto(fitsNnam)\n ct = []\n emin = []\n emax = []\n flux = []\n fluxerr = []\n index = []\n indexerr = []\n ts = []\n upperlim = []\n status = []\n hdus = fits.open(fitsNnam)\n\n # --------------------------------------------------------------------------------------------- #\n # Loop through the energy bins and launch a likelihood fit\n # Split the calculation and create all the required files\n for count in xrange(nbBins):\n self.emin = eEdges[count]\n self.emax = eEdges[count + 1]\n self.suffix = '_'+str(count)\n ct.append(count)\n emin.append(self.emin)\n emax.append(self.emax)\n flux.append(-1)\n fluxerr.append(-1)\n index.append(-1)\n indexerr.append(-1)\n ts.append(-1)\n upperlim.append(-1)\n status.append('pending')\n\n fil = os.path.join( self.workpath, 'tmp_'+str(count)+'.py' )\n tmp = open(fil, 'w')\n if self.mode == 'binned':\n tmp.write(\"import algamma; import os; a=algamma.algamma(); a.ft1='{}';\\\n a.ft2='{}'; a.emin={}; a.emax={}; a.suffix='_{}';\\\n a.workpath='{}'; a._gtSelect(); a._gtMktime();\\\n a._gtLtcube(); a._gtBincube(); a._gtExpmap(); a._gtSrcmap();\\\n a._gtLike(); a._uppLim()\\nif os.path.isfile(a.outgtlike):\\n\\\n \\ta.rmGt(); a._specResults(); os.remove('{}')\".format(self.ft1, self.ft2, \n self.emin, self.emax, count, self.workpath, fil))\n elif self.mode == 'unbinned':\n tmp.write(\"import algamma; import os; from astropy.io import fits; a=algamma.algamma();\\\n a.ft1='{}'; a.ft2='{}'; a.mode='unbinned'; a.model='{}'; a.emin={};\\\n a.emax={}; a.suffix='_{}'; a.workpath='{}'; a._gtSelect(); a._gtMktime();\\\n a._gtLtcube(); a._gtExpmap(); a._gtLike(); a._uppLim()\\nif os.path.isfile(a.outgtlike):\\n\\\n \\ta.rmGt(); a._specResults(); os.remove('{}')\"\n .format(self.ft1, self.ft2, self.lcmodel, self.emin, self.emax,\n count, self.workpath, fil))\n else:\n pass\n tmp.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Create the fits file\n c1 = fits.Column(name='count', array=ct, format='I')\n c2 = fits.Column(name='emin', array=tstart, format='D')\n c3 = fits.Column(name='emax', array=emax, format='D')\n c4 = fits.Column(name='flux', array=flux, format='D')\n c5 = fits.Column(name='fluxerr', array=fluxerr, format='D')\n c6 = fits.Column(name='index', array=flux, format='D')\n c7 = fits.Column(name='indexerr', array=fluxerr, format='D')\n c8 = fits.Column(name='ts', array=ts, format='D')\n c9 = fits.Column(name='upperlim', array=upperlim, format='D')\n c10 = fits.Column(name='status', array=status, format='10A')\n hdus.append(fits.BinTableHDU.from_columns([c1, c2, c3, c4, c5, c6, c7, c8, c9, c10], name='SPECTRUM'))\n hdus.writeto(fitsNnam, clobber=True)\n hdus.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Run the analysis\n while fits.getdata(fitsNnam, ext=1)['status'].count('pending').sum() > 0:\n nbRun = fits.getdata(fitsNnam, ext=1)['status'].count('running').sum()\n while nbRun == n: \n # Limit the number of parallel runs\n time.sleep(10)\n try:\n nbRun = fits.getdata(fitsNnam, ext=1)['status'].count('running').sum()\n except:\n time.sleep(30)\n nbRun = fits.getdata(fitsNnam, ext=1)['status'].count('running').sum()\n ctRun = fits.getdata(fitsNnam, ext=1)['count'][fits.getdata(fitsNnam, ext=1)['status'] == 'pending']\n\n # Launch the file\n hdu = fits.open(fitsNnam)\n hdu['SPECTRUM'].data['status'][ctRun[0]] = 'running'\n hdu.writeto(fitsNnam, clobber=True)\n hdu.close()\n fil = os.path.join( self.workpath, 'tmp_'+str(ctRun[0])+'.py' )\n os.popen(\"nohup python {} &\".format(fil))\n\n return",
"def pingjiazhibiao(result):\n import math\n list_ed_normal = []\n list_es_normal = []\n list_ed_true = []\n list_es_true = []\n # these definations are for statistic\n ed_pred_all, es_pred_all,ed_true_all,es_true_all,ed_match,es_match,ed_normal,es_normal,ed_nomiss,es_nomiss= 0,0,0,0,0,0,0,0,0,0\n total_error_ed,total_error_es = 0,0\n sample_missimg_num = 0\n a4cdDict = {}\n a4csDict = {}\n for i in range(-5,7):\n a4cdDict[i] = 0\n a4csDict[i] = 0\n for i in result:\n pred = i[0]\n ed_pred = pred[0]\n es_pred = pred[1]\n if ed_pred == [] or es_pred == []:\n sample_missimg_num += 1\n true = i[1]\n ed_true = true[0]\n es_true = true[1]\n\n # avoid many to one\n ed_pred.sort()\n es_pred.sort()\n deleteAmong10frames(ed_pred)\n deleteAmong10frames(es_pred)\n \n for j in ed_pred:\n ed_pred_all += 1\n for t in ed_true:\n if math.fabs(j - t) < 6:\n ed_normal += 1\n total_error_ed += math.fabs(t - j)\n a4cdDict[j-t]+=1\n break\n # all - normal = FP\n # normal is TP\n a4cdDict[6] = ed_pred_all-ed_normal\n\n for j in es_pred:\n es_pred_all += 1\n for t in es_true:\n if math.fabs(j - t) < 6:\n es_normal += 1\n total_error_es += math.fabs(t - j)\n a4csDict[j-t]+=1\n break\n a4csDict[6] = es_pred_all-es_normal\n for j in ed_true:\n ed_true_all += 1\n for t in ed_pred:\n if math.fabs(t - j) < 6:\n ed_nomiss += 1\n break\n\n for j in es_true:\n es_true_all += 1\n for t in es_pred:\n if math.fabs(t - j) < 6:\n es_nomiss += 1\n break\n # aFD precision recall \n ed_result = total_error_ed / ed_normal,(ed_normal / ed_pred_all),(ed_nomiss / ed_true_all)\n es_result = total_error_es / es_normal,(es_normal / es_pred_all),(es_nomiss / es_true_all)\n return ed_result,a4cdDict, es_result,a4csDict, sample_missimg_num / len(result)",
"def ANN_binned_tagged_jets_hist(datalist, model, discriminant_cuts, CSV_cuts, bins, nbins, mode=\"pT_jet\",Save=False,addFeature=False):\n title = \"binned_tagged_jets_vs_\"+mode\n\tdiscriminant = \"ANN\"\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n print \"working on\",datatitle\n ran = data[4]\n\t\tCSV = data[2]\n\t\tpT = data[1]\n\t\tx_data = data[0]\n AllJetsHistlist.append(rt.TH1D(datatitle+\"_AllJets\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(datatitle+\"_CSV\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(datatitle+\"_Discriminant\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n\t\n\t\tif addFeature == False:\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\t\telif addFeature == \"pT\":\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\t\telif addFeature == \"PV\":\n\t\t\tassert x_data.shape[1] == 21, \"wrong x_data format: PV cannot be found\"\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\t\telse:\n\t\t\tprint \"invalid feature input\"\n\t\t\treturn None\n\t\tbin_numbers = ANN_bin_selection(pT,bins)\n\n\t for i,pT_value in enumerate(pT):\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJetsHistlist[n].Fill(pT_value)\n\t if pred_y[i] >= discriminant_cuts[bin_numbers[i]]: DiscriminantHistlist[n].Fill(pT_value)\n\t if CSV[i] >= CSV_cuts[bin_numbers[i]]: CSVHistlist[n].Fill(pT_value)\n\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n canvaslist.append(rt.TCanvas(datatitle+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(datatitle+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+datatitle+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()",
"def analyze(self, event):\n '''\n\tif not (event.HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ or event.HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ or event.HLT_IsoTkMu24 or event.HLT_IsoMu24):\n\t self.out.fillBranch(\"pass_selection\",0)\n return True\n '''\n\telectrons = Collection(event, \"Electron\")\n muons = Collection(event, \"Muon\")\n jets = Collection(event, \"Jet\")\n Z = ROOT.TLorentzVector()\n\n\ttight_muons = []\n\tgoodmuons_pt = []\n goodmuons_eta = [] \n\n\tif (len(muons)<=1):\n\t\tself.out.fillBranch(\"pass_selection\",0)\n return True\n\tfor i in range(0,len(muons)):\n #if (muons[i].eta) < 2.4 and (muons[i].mediumId) and (muons[i].pfIsoId)>=3:\n\t if (muons[i].eta) < 2.4 and (muons[i].mediumId):\n\t if (muons[i].pt) <= 25:\n continue\n\t\tfor j in range(i+1,len(muons)):\n \t\t #if (muons[j].eta) < 2.4 and (muons[j].mediumId) and (muons[j].pfIsoId)>=3:\n\t if (muons[j].eta) < 2.4 and (muons[j].mediumId):\n\t if (muons[j].pt) <= 20:\n\t\t\t continue\n\t\t if (muons[i].charge + muons[j].charge == 0):\n\t\t\t Z = muons[i].p4() + muons[j].p4()\n\t\t\t if (Z.M() > 76 and Z.M() < 106):\n\t\t\t\tself.out.fillBranch(\"pass_selection\",1)\n\t \t\tself.out.fillBranch(\"z_pt\",Z.Pt())\n\t\t\t\tself.out.fillBranch(\"z_mass\",Z.M())\n\t\t\t\tself.out.fillBranch(\"z_phi\",Z.Phi())\n\t\t\t\ttight_muons.append(muons[i]) \n\t\t\t\ttight_muons.append(muons[j])\n\t\n\tif len(tight_muons) < 2:\n\t self.out.fillBranch(\"pass_selection\",0)\n\t return True\n\n ngoodmuons = 0\n ngoodmuons = len(tight_muons)\n\tif ngoodmuons != 2:\n print(ngoodmuons)\n\n goodmuons_pt.append(tight_muons[0].pt)\n goodmuons_pt.append(tight_muons[1].pt)\n goodmuons_eta.append(tight_muons[0].eta)\n goodmuons_eta.append(tight_muons[1].eta) \n \n self.out.fillBranch(\"muon_pt\",goodmuons_pt)\n self.out.fillBranch(\"muon_eta\",goodmuons_eta) \n \n\tngoodjets = 0\n goodjets_pt = []\n\tgoodjets_id = []\n\tgoodjets_phi = []\n\tgoodjets_dphi_zjet = []\n\n\tfor k in range(0,len(jets)):\n #print(4)\n\t if abs(jets[k].eta) > 2.4:\n continue\n #print(5) \n\t if jets[k].pt < 30:\n\t\tcontinue\n\t #print(6)\n\t pass_lepton_dr_cut = True\n\n\t for i in range(0,len(tight_muons)):\n\t\t#if deltaR(muons[tight_muons[i]].eta,muons[tight_muons[i]].phi,jets[k].eta,jets[k].phi) < 0.4:\n if deltaR(tight_muons[i].eta,tight_muons[i].phi,jets[k].eta,jets[k].phi) < 0.4:\n\t pass_lepton_dr_cut = False\n\n\t if not pass_lepton_dr_cut:\n\t\tcontinue\n\n ngoodjets += 1\n goodjets_pt.append(jets[k].pt)\n\t #goodjets_id.append(jets[k].jetId)\n\t goodjets_phi.append(jets[k].phi)\t \n\t #goodjets_dphi_zjet.append(deltaPhi(Z.Phi(),jets[k].phi)) \n\n if ngoodjets != len(goodjets_pt):\n print(error)\n\n self.out.fillBranch(\"jet_pt\",goodjets_pt)\n\t#self.out.fillBranch(\"jet_id\",goodjets_id)\n\tself.out.fillBranch(\"jet_phi\",goodjets_phi)\n\t#self.out.fillBranch(\"dphi_zjet\",goodjets_dphi_zjet)\n\t'''\n\tif(njet!=0):\n\t print(njet)\n '''\n\tif hasattr(event,\"Generator_weight\"):\n self.out.fillBranch(\"gen_weight\",event.Generator_weight)\n else:\n self.out.fillBranch(\"gen_weight\",0)\n\treturn True",
"def frequencyEstimator(ctd, ladcp, bathy, rho_neutral, strain,\\\n wl_min=100, wl_max=500, full_set=False):\n \n U, V, p_ladcp = oc.loadLADCP(ladcp)\n S, T, p_ctd, lat, lon = oc.loadCTD(ctd)\n \n \n Ek, Ep, Etotal, eta_power,\\\n Upow, Vpow, UVkx, eta_kx,\\\n N2mean, wl_min, wl_max,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec =\\\n internal_wave_energy(ctd, ladcp,\\\n rho_neutral,\\\n bathy, strain, wl_min=wl_min, wl_max=wl_max)\n \n eta_power_export = np.vstack(eta_power)\n eta_kx_export = np.vstack(eta_kx)\n Up_export = np.vstack(Upow)\n Vp_export = np.vstack(Vpow)\n UVkx_export = np.vstack(UVkx)\n \n\n np.savetxt('eta_power.csv',eta_power_export)\n np.savetxt('eta_kx.csv',eta_kx_export)\n np.savetxt('Upow.csv',Up_export)\n np.savetxt('Vpow.csv',Vp_export)\n np.savetxt('UVkx.csv',UVkx_export)\n\n\n \n \n # look for wavenumber maxes\n \n \n # Use ratios to solve for internal frequncys\n f = np.nanmean(gsw.f(lat))\n \n omega = f*np.sqrt(Etotal/(Ek-Ep))\n\n m = np.mean((wl_min, wl_max))\n m = (2*np.pi)/m\n kh = (m/np.sqrt(np.abs(N2mean)))*(np.sqrt(omega**2 - f**2))\n mask = kh == 0\n kh[mask]= np.nan\n lambdaH = 1e-3*(2*np.pi)/kh\n \n # get mean spectra\\\n \n eta_mean = []\n for station in eta_power:\n eta_mean.append(np.nanmean(station, axis=0))\n \n eta_mean = np.vstack(eta_mean).T\n \n \n aspect = kh/m \n \n file2save = pd.DataFrame(lambdaH)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('lambdaH.xlsx')\n file2save = pd.DataFrame(kh)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('Kh.xlsx')\n file2save = pd.DataFrame(omega)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('omega.xlsx')\n file2save = pd.DataFrame(aspect)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('aspect.xlsx')\n \n np.savetxt('eta_mean.csv', eta_mean)\n \n \n np.savetxt('kh.csv', kh)\n np.savetxt('lamdah.csv', lambdaH)\n np.savetxt('omega.csv', omega)\n \n if full_set:\n return lambdaH, kh, omega, N2mean,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec, aspect\n \n else:\n return lambdaH, kh, omega, N2mean",
"def haperfluxMany(inputlist, maplist, radius, rinner, router, galactic=True, decimal=True, noise_model=0):\n\n ## Names and frequencies of the sample maps included in this repo.\n\n freqlist = ['30','44','70','100','143','217','353','545','857','1249','1874','2141','2998','3331','4612','4997','11992','16655','24983','24983','24983','33310']\n freqval = [28.405889, 44.072241,70.421396,100.,143.,217.,353.,545.,857.,1249.,1874.,2141.,2141.,2998.,2998.,3331.,4612.,4997.,11992.,16655.,24983.,24983.,24983.,33310.]\n band_names = [\"akari9\", \"dirbe12\",\"iras12\",\"wise12\",\"akari18\",\"iras25\",\"iras60\",\"akari65\",\"akari90\",\"dirbe100\",\"iras100\",\"akari140\",\"dirbe140\",\"akari160\",\"dirbe240\",\"planck857\", \"planck545\"]\n\n k0 = 1.0\n k1 = rinner/radius\n k2 = router/radius\n apcor = ((1 - (0.5)**(4*k0**2))-((0.5)**(4*k1**2) - (0.5)**(4*k2**2)))**(-1)\n\n # 'galactic' overrules 'decimal'\n if (galactic==True):\n dt=[('sname',np.dtype('S13')),('glon',np.float32),('glat',np.float32)]\n targets = np.genfromtxt(inputlist, delimiter=\",\",dtype=dt)\n\n ns = len(targets['glat'])\n\n fd3 = -1\n fd_err3 = -1\n\n fn = np.genfromtxt(maplist, delimiter=\" \", dtype='str')\n nmaps = len(fn)\n ## Initialize the arrays which will hold the results\n fd_all = np.zeros((ns,nmaps))\n fd_err_all = np.zeros((ns,nmaps))\n fd_bg_all = np.zeros((ns,nmaps))\n\n # Start the actual processing: Read-in the maps.\n for ct2 in range(0,nmaps):\n xtmp_data, xtmp_head = hp.read_map(fn[ct2], h=True, verbose=False, nest=False)\n freq = dict(xtmp_head)['FREQ']\n units = dict(xtmp_head)['TUNIT1']\n freq_str = str(freq)\n idx = freqlist.index(str(freq))\n currfreq = int(freq)\n\n if (radius == None):\n radval = fwhmlist[idx]\n else:\n radval = radius\n\n\n for ct in range(0,ns):\n\n glon = targets['glon'][ct]\n glat = targets['glat'][ct]\n\n fd_all[ct,ct2], fd_err_all[ct,ct2], fd_bg_all[ct,ct2] = \\\n haperflux(inmap= xtmp_data, freq= currfreq, lon=glon, lat=glat, aper_inner_radius=radius, aper_outer_radius1=rinner, \\\n aper_outer_radius2=router,units=units, noise_model=noise_model)\n\n if (np.isfinite(fd_err_all[ct,ct2]) == False):\n fd_all[ct,ct2] = -1\n fd_err_all[ct,ct2] = -1\n else:\n if radius==None:\n fd_all[ct,ct2] = fd_all[ct,ct2]*apcor\n fd_err_all[ct,ct2] = fd_err_all[ct,ct2]*apcor\n\n return fd_all, fd_err_all, fd_bg_all",
"def get_response_weights_vector(zenith,azimuth,binsize=5,cut=57.4):\n\n # assuming useful input:\n # azimuthal angle is periodic in the range [0,360[\n # zenith ranges from [0,180[ \n # checking azimuth range (can be exactly 360?)\n azimuth[azimuth == 360] -= 0.01\n \n # check which pixel (index) was hit on regular grid\n hit_pixel_zi = np.floor(zenith/binsize).astype(int)\n hit_pixel_ai = np.floor(azimuth/binsize).astype(int)\n\n # and which pixel centre\n hit_pixel_z = (hit_pixel_zi+0.5)*binsize\n hit_pixel_a = (hit_pixel_ai+0.5)*binsize\n\n # check which zeniths are beyond threshold\n bad_idx = np.where(hit_pixel_z > cut) \n \n # calculate nearest neighbour pixels indices\n za_idx = np.array([[np.floor(azimuth/binsize+0.5),np.floor(zenith/binsize+0.5)],\n [np.floor(azimuth/binsize+0.5),np.floor(zenith/binsize-0.5)],\n [np.floor(azimuth/binsize-0.5),np.floor(zenith/binsize+0.5)],\n [np.floor(azimuth/binsize-0.5),np.floor(zenith/binsize-0.5)]]).astype(int)\n\n # take care of bounds at zenith (azimuth is allowed to be -1!)\n (za_idx[:,1,:])[np.where(za_idx[:,1,:] < 0)] += 1\n (za_idx[:,1,:])[np.where(za_idx[:,1,:] >= 180/binsize)] = int(180/binsize-1)\n # but azimuth may not be larger than range [0,360/binsize[\n (za_idx[:,0,:])[np.where(za_idx[:,0,:] >= 360/binsize)] = 0\n \n # and pixel centres of neighbours\n azimuth_neighbours = (za_idx[:,0]+0.5)*binsize\n zenith_neighbours = (za_idx[:,1]+0.5)*binsize\n\n # calculate angular distances to neighbours\n dists = angular_distance(azimuth_neighbours,zenith_neighbours,azimuth,zenith)\n\n # inverse weighting to get impact of neighbouring pixels\n n_in = len(zenith)\n weights = (1/dists)/np.sum(1/dists,axis=0).repeat(4).reshape(n_in,4).T\n # if pixel is hit directly, set weight to 1.0\n weights[np.isnan(weights)] = 1\n # set beyond threshold weights to zero\n weights[:,bad_idx] = 0\n\n return za_idx,weights",
"def get_response_from_pixelhit_vector(Response,zenith,azimuth,binsize=5,cut=60):\n\n # assuming useful input:\n # azimuthal angle is periodic in the range [0,360[\n # zenith ranges from [0,180[\n\n # checking azimuth range (can be exactly 360?)\n azimuth[azimuth == 360] -= 0.01\n \n # check which pixel (index) was hit on regular grid\n hit_pixel_zi = np.floor(zenith/binsize).astype(int)\n hit_pixel_ai = np.floor(azimuth/binsize).astype(int)\n\n #print('hit_pixel_zi',hit_pixel_zi)\n #print('hit_pixel_ai',hit_pixel_ai)\n \n # and which pixel centre\n hit_pixel_z = (hit_pixel_zi+0.5)*binsize\n hit_pixel_a = (hit_pixel_ai+0.5)*binsize\n\n #print('hit_pixel_z',hit_pixel_z)\n #print('hit_pixel_a',hit_pixel_a)\n \n \n # check which zeniths are beyond threshold\n bad_idx = np.where(hit_pixel_z > cut)\n\n # set hit pixels to output array\n za_idx = np.array([hit_pixel_zi,hit_pixel_ai]).astype(int)\n\n #print(za_idx)\n # weight array includes ones and zeros only (no neighbouring pixels included)\n # bad_idx get zeros (outside range)\n weights = np.ones(len(zenith))\n weights[bad_idx] = 0\n\n # check for negative weights and indices and remove\n weights[za_idx[0,:] < 0] = 0.\n weights[za_idx[1,:] < 0] = 0.\n za_idx[0,za_idx[0,:] < 0] = 0.\n za_idx[1,za_idx[1,:] < 0] = 0.\n \n # get responses at pixels\n rsp = Response[za_idx[0,:],za_idx[1,:],:]*weights[:,None]\n\n return rsp",
"def lsh_attention_single_head(query, value, n_buckets, n_hashes,\n causal_mask=True,\n length_norm=False):\n\n qdim, vdim = query.shape[-1], value.shape[-1]\n chunk_size = n_hashes * n_buckets\n\n seqlen = query.shape[0]\n\n with nn.stochastic(jax.random.PRNGKey(0)):\n rng = nn.make_rng()\n\n buckets = hash_vectors(\n query, rng, num_buckets=n_buckets, num_hashes=n_hashes)\n # buckets should be (seq_len)\n assert buckets.shape[-1] == n_hashes * seqlen\n\n total_hashes = n_hashes\n\n # create sort and unsort\n ticker = jax.lax.tie_in(query, jnp.arange(n_hashes * seqlen))\n buckets_and_t = seqlen * buckets + (ticker % seqlen)\n buckets_and_t = jax.lax.stop_gradient(buckets_and_t)\n # ticker = jnp.tile(jnp.reshape(ticker, [1, -1]), [batch_size, 1])\n sbuckets_and_t, sticker = jax.lax.sort_key_val(\n buckets_and_t, ticker, dimension=-1)\n _, undo_sort = jax.lax.sort_key_val(sticker, ticker, dimension=-1)\n sbuckets_and_t = jax.lax.stop_gradient(sbuckets_and_t)\n sticker = jax.lax.stop_gradient(sticker)\n undo_sort = jax.lax.stop_gradient(undo_sort)\n\n st = (sticker % seqlen)\n\n sqk = jnp.take(query, st, axis=0)\n sv = jnp.take(value, st, axis=0)\n\n bkv_t = jnp.reshape(st, (chunk_size, -1))\n bqk = jnp.reshape(sqk, (chunk_size, -1, qdim))\n bv = jnp.reshape(sv, (chunk_size, -1, vdim))\n bq = bqk\n bk = bqk\n\n if length_norm:\n bk = length_normalized(bk)\n\n # get previous chunks\n bk = look_one_back(bk)\n bv = look_one_back(bv)\n bkv_t = look_one_back(bkv_t)\n\n # compute dot product attention\n dots = jnp.einsum('hie,hje->hij', bq, bk) * (qdim ** 0.5)\n\n if causal_mask:\n # apply causal mask\n # TODO(yitay): This is not working yet\n # We don't need causal reformer for any task YET.\n pass\n\n dots_logsumexp = logsumexp(dots, axis=-1, keepdims=True)\n slogits = jnp.reshape(dots_logsumexp, [-1])\n dots = jnp.exp(dots - dots_logsumexp)\n\n x = jnp.matmul(dots, bv)\n x = jnp.reshape(x, [-1, qdim])\n\n # Unsort\n o = permute_via_gather(x, undo_sort, sticker, axis=0)\n logits = permute_via_sort(slogits, sticker, undo_sort, axis=0)\n logits = jnp.reshape(logits, [total_hashes, seqlen, 1])\n probs = jnp.exp(logits - logsumexp(logits, axis=0, keepdims=True))\n o = jnp.reshape(o, [n_hashes, seqlen, qdim])\n out = jnp.sum(o * probs, axis=0)\n out = jnp.reshape(out, [seqlen, qdim])\n\n return out",
"def bottle_split_bottleExpansion((B1,P1,nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n # Now do the population bottleneck event.\n phi = dadi.Integration.one_pop(phi, xx, B1, P1)\n\n # grow the ancient population\n\n\n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs",
"def main_eaftest(point_ind, permutations=10240, alpha=0.05):\n \n # ------ Attainment indicator values information ------\n \n npoints = len(point_ind)\n nvars = len(point_ind[0]) # Número de execuções total\n nruns = nvars // 2 # Número de execuções de 1 algo\n \n print \"- Attainment indicator values information:\"\n print \" * Number of points:\", npoints\n print \" * Joint executions:\", nvars, \"(%d + %d)\" % (nruns, nruns)\n print\n \n assert nvars % 2 == 0, \"Number of total joint executions must be even.\"\n assert nvars <= 64, \"Not implemented with more than 64 joint executions.\"\n \n # ------ Test Statistic ------\n\n print \"- Computing the test statistic...\" \n stat2 = libaft.ksstat(point_ind, order=2)\n print \" * Test statistic = %d/%d\" % (stat2, nruns)\n print \" = %f\" % (stat2 / float(nruns)), '\\n'\n \n # ------ Estimate null distribution ------\n\n print \"- Using %d random permutations to estimate null distribution.\" % permutations\n print \" Please be patient...\"\n maxdist = np.zeros(permutations, dtype=np.int32) # Max distance array\n rtime = time.time()\n \n masks = bintools.make_masks(permutations, nvars, seed=64)\n for i, maxd in enumerate(KERNEL.runkernel(point_ind, masks)):\n maxdist[i] = maxd\n if (i+1) % (permutations//20) == 0:\n print \" %6d permutations, %7.3f sec\" % (i+1, time.time()-rtime)\n print \" * Time elapsed: %7.3f\" % (time.time()-rtime)\n \n # Compute null distribution from max distance array\n tail = np.bincount(maxdist, minlength=nruns+1)\n print \" * Non-normalized null distribution:\"\n print tail\n print\n \n # ------ Accept/reject null hypothesis ------\n \n # NB: -1 resulta das diferentes convenções para a definição de valor crítico\n crit = criticalvalue(tail, alpha * permutations) - 1\n pval = pvalue(tail, stat2) / float(permutations)\n \n print \"- Null hypothesis decision:\" \n print \" * Critical value = %d/%d\" % (crit, nruns)\n print \" = %f\" % (crit / float(nruns))\n print \" * p-value = %f\" % pval\n if pval <= alpha:\n print \" <= alpha (%s)\\n\" % alpha\n print \" * Decision: REJECT the null hypothesis\"\n else:\n print \" > alpha (%s)\\n\" % alpha\n print \" * Decision: do NOT REJECT the null hypothesis\"\n print"
]
| [
"0.57946926",
"0.54707724",
"0.5410318",
"0.5387399",
"0.53183043",
"0.5195197",
"0.51609874",
"0.5160073",
"0.511659",
"0.5098509",
"0.5075455",
"0.5061853",
"0.5058991",
"0.5058988",
"0.5052867",
"0.5048729",
"0.5021096",
"0.4992996",
"0.496611",
"0.49569523",
"0.49510986",
"0.49467552",
"0.49455422",
"0.49392575",
"0.49357417",
"0.4930496",
"0.4922314",
"0.489895",
"0.4892217",
"0.487354"
]
| 0.61818737 | 0 |
Plot graph of response in bins of eta If the response hist for each bin exists already, then we use that. If not, we make the hist. pt_min and pt_max are so that the graph can be made for a given pt interval pt_var is the variable to bin on (pt or ptRef) | def plot_rsp_eta(inputfile, outputfile, eta_bins, pt_min, pt_max, pt_var, pu_min, pu_max):
gr_rsp_eta = ROOT.TGraphErrors()
# Input tree
tree_raw = inputfile.Get("valid")
# Output folders
output_f = outputfile.GetDirectory('eta_%g_%g' % (eta_bins[0], eta_bins[-1]))
output_f_hists = None
if not output_f:
output_f = outputfile.mkdir('eta_%g_%g' % (eta_bins[0], eta_bins[-1]))
output_f_hists = output_f.mkdir("Histograms")
else:
output_f_hists = output_f.GetDirectory("Histograms")
# Go through eta bins, get response hist, fit with Gaussian and add to
# the overall graph
for i, eta in enumerate(eta_bins[:-1]):
absetamin = eta
absetamax = eta_bins[i + 1] # Eta cut string
# Cut strings
eta_cutStr = "TMath::Abs(eta) < %f && TMath::Abs(eta) > %f" % (absetamax, absetamin)
pt_cutStr = "%s < %g && %s > %g" % (pt_var, pt_max, pt_var, pt_min)
pu_cutStr = "numPUVertices <= %f && numPUVertices >= %f" % (pu_max, pu_min)
avoidSaturation_cut = "pt < 1023.1"
cutStr = " && ".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])
print cutStr
nb_rsp = 100
rsp_min, rsp_max = 0, 5
rsp_name = 'hrsp_eta_%g_%g_%s_%g_%g' % (absetamin, absetamax, pt_var, pt_min, pt_max)
tree_raw.Draw("rsp>>%s(%d,%g,%g)" % (rsp_name, nb_rsp, rsp_min, rsp_max), cutStr)
h_rsp = ROOT.gROOT.FindObject(rsp_name)
h_rsp.SetTitle(";response (p_{T}^{L1}/p_{T}^{Ref});")
print 'Integral', h_rsp.Integral()
if h_rsp.Integral() <= 0:
print "No entries - skipping"
continue
# Fit with Gaussian
peak = h_rsp.GetBinCenter(h_rsp.GetMaximumBin())
if absetamin < 2.9:
fit_result = h_rsp.Fit("gaus", "QER", "",
h_rsp.GetMean() - h_rsp.GetRMS(),
h_rsp.GetMean() + h_rsp.GetRMS())
else:
fit_result = h_rsp.Fit("gaus", "QER", "",
peak - (0.5 * h_rsp.GetRMS()),
peak + (0.5 * h_rsp.GetRMS()))
mean = h_rsp.GetMean()
err = h_rsp.GetMeanError()
check_fit = True
if check_fit:
if int(fit_result) == 0:
mean = h_rsp.GetFunction("gaus").GetParameter(1)
err = h_rsp.GetFunction("gaus").GetParError(1)
else:
print "cannot fit with Gaussian - using raw mean instead"
output_f_hists.WriteTObject(h_rsp)
# add to graph
N = gr_rsp_eta.GetN()
print absetamin, "-", absetamax, mean, err
gr_rsp_eta.SetPoint(N, 0.5 * (absetamin + absetamax), mean)
gr_rsp_eta.SetPointError(N, 0.5 * (absetamax - absetamin), err)
gr_rsp_eta.SetTitle(";|#eta^{L1}|; <response> = <p_{T}^{L1}/p_{T}^{Ref}>")
gr_rsp_eta.SetName("gr_rsp_eta_%g_%g_%s_%g_%g" % (eta_bins[0], eta_bins[-1], pt_var, pt_min, pt_max))
output_f.WriteTObject(gr_rsp_eta) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_pt_hist(hist, binning, binning_uflow, pt_bin_edges, pt_bin_edges_uflow, variable_bin_edges):\n # THIS IS A HORRIBLE HACK BECAUSE I DIDNT FILL MY HISTS\n all_pt_bins = list(np.append(pt_bin_edges_uflow[:-1], pt_bin_edges))\n all_pt_bins.append(8000)\n # print(all_pt_bins)\n nbins_pt = len(all_pt_bins)-1\n h_new = ROOT.TH1D(\"hpt\"+cu.get_unique_str(), \"\", nbins_pt, array('d', all_pt_bins))\n for pt_ind in range(1, h_new.GetNbinsX()+1):\n this_sum = 0\n this_err_sq = 0\n this_pt = all_pt_bins[pt_ind-1]\n # ARGH THIS IS SO FRUSTRATING\n this_binning = binning if this_pt >= pt_bin_edges[0] else binning_uflow\n for var_ind, var in enumerate(variable_bin_edges[:-1]):\n bin_num = this_binning.GetGlobalBinNumber(var, this_pt)\n this_sum += hist.GetBinContent(bin_num)\n this_err_sq += hist.GetBinError(bin_num)**2\n h_new.SetBinContent(pt_ind, this_sum)\n h_new.SetBinError(pt_ind, math.sqrt(this_err_sq))\n return h_new",
"def plot_rsp_pt(inputfile, outputfile, absetamin, absetamax, pt_bins, pt_var, pt_max, pu_min, pu_max):\n\n # Input tree\n tree_raw = inputfile.Get(\"valid\")\n\n # Output folders\n output_f = outputfile.GetDirectory('eta_%g_%g' % (absetamin, absetamax))\n output_f_hists = None\n if not output_f:\n output_f = outputfile.mkdir('eta_%g_%g' % (absetamin, absetamax))\n output_f_hists = output_f.mkdir(\"Histograms\")\n else:\n output_f_hists = output_f.GetDirectory(\"Histograms\")\n\n gr_rsp_pt = ROOT.TGraphErrors()\n\n # Cut strings\n eta_cutStr = \"TMath::Abs(eta) < %f && TMath::Abs(eta) > %f\" % (absetamax, absetamin)\n # keep the pt < pt_max to safeguard against staurated L1 jets\n pt_cutStr = \"%s < %g && pt < %g\" % (pt_var, pt_bins[-1], pt_max)\n pu_cutStr = \"numPUVertices <= %f && numPUVertices >= %f\" % (pu_max, pu_min)\n avoidSaturation_cut = \"pt < 1023.1\"\n cutStr = \" && \".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])\n\n n_rsp_bins = 100\n rsp_min = 0\n rsp_max = 5\n\n pt_array = array('d', pt_bins)\n\n # First make a 2D plot\n h2d_rsp_pt = ROOT.TH2D(\"h2d_rsp_%s_%g_%g\" % (pt_var, absetamin, absetamax),\n \"%g < |#eta| < %g;p_{T};response\" % (absetamin, absetamax),\n len(pt_bins) - 1, pt_array,\n n_rsp_bins, rsp_min, rsp_max)\n tree_raw.Draw(\"rsp:%s>>h2d_rsp_%s_%g_%g\" % (pt_var, pt_var, absetamin, absetamax), cutStr)\n\n output_f_hists.WriteTObject(h2d_rsp_pt)\n\n # Now for each pt bin, do a projection on 1D hist of response and fit a Gaussian\n print pt_bins\n for i, (pt_min, pt_max) in enumerate(pairwise(pt_bins)):\n h_rsp = h2d_rsp_pt.ProjectionY(\"rsp_%s_%g_%g\" % (pt_var, pt_min, pt_max), i + 1, i + 1)\n print i, pt_min, pt_max\n\n if h_rsp.Integral() <= 0:\n print \"No entries - skipping\"\n continue\n\n # Fit with Gaussian\n mean = h_rsp.GetMean()\n err = h_rsp.GetMeanError()\n\n peak = h_rsp.GetBinCenter(h_rsp.GetMaximumBin())\n # if h_rsp.GetRMS() < 0.2:\n # fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", peak - h_rsp.GetRMS(), peak + h_rsp.GetRMS())\n # else:\n # fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", peak - 0.5*h_rsp.GetRMS(), peak + 0.5*h_rsp.GetRMS())\n fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", peak - h_rsp.GetRMS(), peak + h_rsp.GetRMS())\n # fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\", mean - h_rsp.GetRMS(), mean + h_rsp.GetRMS())\n\n output_f_hists.WriteTObject(h_rsp)\n\n # TODO: better check against Gaussian fit - are peaks ~ similar?\n # if int(fit_result) == 0 and check_gaus_fit(h_rsp):\n if int(fit_result) == 0 and abs(h_rsp.GetFunction(\"gaus\").GetParameter(1) - peak) / peak < 0.1:\n mean = h_rsp.GetFunction(\"gaus\").GetParameter(1)\n err = h_rsp.GetFunction(\"gaus\").GetParError(1)\n # Add the Gaussian to the total graph\n N = gr_rsp_pt.GetN()\n gr_rsp_pt.SetPoint(N, 0.5 * (pt_min + pt_max), mean)\n gr_rsp_pt.SetPointError(N, 0.5 * (pt_max - pt_min), err)\n else:\n print \"Cannot fit Gaussian in plot_rsp_pt, using raw mean instead\"\n\n # Save the graph\n gr_rsp_pt.SetTitle(\"%g < |#eta^{L1}| < %g;p_{T}; <response> = <p_{T}^{L1}/p_{T}^{Ref}>\" % (absetamin, absetamax))\n gr_rsp_pt.SetName(\"gr_rsp_%s_eta_%g_%g\" % (pt_var, absetamin, absetamax))\n\n output_f.WriteTObject(gr_rsp_pt)",
"def plot_checks(inputfile, outputfile, absetamin, absetamax, max_pt, pu_min, pu_max):\n\n print \"Doing eta bin: %g - %g, max L1 jet pt: %g\" % (absetamin, absetamax, max_pt)\n\n # Input tree\n tree_raw = inputfile.Get(\"valid\")\n\n # Output folders\n output_f = outputfile.mkdir('eta_%g_%g' % (absetamin, absetamax))\n output_f_hists = output_f.mkdir(\"Histograms\")\n\n # Eta cut string\n eta_cutStr = \" TMath::Abs(eta)<%g && TMath::Abs(eta) > %g \" % (absetamax, absetamin)\n # Pt cut string\n pt_cutStr = \"pt < %g\" % max_pt\n # PU cut string\n pu_cutStr = \"numPUVertices <= %f && numPUVertices >= %f\" % (pu_max, pu_min)\n # Avoid L1 saturated jets cut (from 2017 any l1 jet with a saturated tower is auto given pt=1024GeV)\n avoidSaturation_cut = \"pt < 1023.1\"\n cutStr = \" && \".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])\n\n # Draw response (pT^L1/pT^Gen) for all pt bins\n tree_raw.Draw(\"rsp>>hrsp_eta_%g_%g(100,0,5)\" % (absetamin, absetamax), cutStr)\n hrsp_eta = ROOT.gROOT.FindObject(\"hrsp_eta_%g_%g\" % (absetamin, absetamax))\n hrsp_eta.SetTitle(\";response (p_{T}^{L1}/p_{T}^{Ref});\")\n if absetamin < 2.9:\n fit_result = hrsp_eta.Fit(\"gaus\", \"QER\", \"\",\n hrsp_eta.GetMean() - hrsp_eta.GetRMS(),\n hrsp_eta.GetMean() + hrsp_eta.GetRMS())\n else:\n peak = hrsp_eta.GetBinCenter(hrsp_eta.GetMaximumBin())\n fit_result = hrsp_eta.Fit(\"gaus\", \"QER\", \"\",\n peak - (0.5 * hrsp_eta.GetRMS()),\n peak + (0.5 * hrsp_eta.GetRMS()))\n\n # mean = hrsp_eta.GetFunction(\"gaus\").GetParameter(1)\n # err = hrsp_eta.GetFunction(\"gaus\").GetParError(1)\n output_f_hists.WriteTObject(hrsp_eta)\n\n # nb_pt, pt_min, pt_max = 63, 0, 252 # for GCT/Stage 1\n nb_pt, pt_min, pt_max = 512, 0, 1024 # for Stage 2\n nb_rsp, rsp_min, rsp_max = 100, 0, 5\n\n # Draw rsp (pT^L1/pT^Gen) Vs GenJet pT\n tree_raw.Draw(\"rsp:ptRef>>h2d_rsp_gen(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)\n h2d_rsp_gen = ROOT.gROOT.FindObject(\"h2d_rsp_gen\")\n h2d_rsp_gen.SetTitle(\";p_{T}^{Ref} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})\")\n output_f_hists.WriteTObject(h2d_rsp_gen)\n\n h2d_rsp_gen_norm = cu.norm_vertical_bins(h2d_rsp_gen)\n output_f_hists.WriteTObject(h2d_rsp_gen_norm)\n\n # Draw rsp (pT^L1/pT^Gen) Vs L1 pT\n tree_raw.Draw(\"rsp:pt>>h2d_rsp_l1(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)\n h2d_rsp_l1 = ROOT.gROOT.FindObject(\"h2d_rsp_l1\")\n h2d_rsp_l1.SetTitle(\";p_{T}^{L1} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})\")\n output_f_hists.WriteTObject(h2d_rsp_l1)\n\n h2d_rsp_l1_norm = cu.norm_vertical_bins(h2d_rsp_l1)\n output_f_hists.WriteTObject(h2d_rsp_l1_norm)\n\n # Draw pT^Gen Vs pT^L1\n tree_raw.Draw(\"pt:ptRef>>h2d_gen_l1(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_pt, pt_min, pt_max), cutStr)\n h2d_gen_l1 = ROOT.gROOT.FindObject(\"h2d_gen_l1\")\n h2d_gen_l1.SetTitle(\";p_{T}^{Ref} [GeV];p_{T}^{L1} [GeV]\")\n output_f_hists.WriteTObject(h2d_gen_l1)",
"def make_eta(eta_step_bucket, npart, hist_rule=\"square-root\"):\n\n lowbound = np.min(eta_step_bucket)\n upbound = np.max(eta_step_bucket) + 1e-10\n pts = len(eta_step_bucket)\n if hist_rule == \"square-root\":\n hist_num = int(np.sqrt(pts))\n elif hist_rule == \"sturges\":\n hist_num = int(np.log2(pts)) + 1\n elif hist_rule == \"rice-rule\":\n hist_num = int(2 * pts ** (1 / 3))\n eta_hist = np.zeros(hist_num)\n eta_hist, bins = np.histogram(\n np.array(eta_step_bucket), bins=np.linspace(lowbound, upbound, num=hist_num + 1)\n )\n # plt.figure()\n # _=plt.hist(np.array(eta_step_bucket),bins=np.linspace(lowbound, upbound, num=hist_num+1))\n # plt.title('Input eta histogram')\n eta_hist = eta_hist / np.sum(eta_hist)\n\n # make cdf\n eta_cdf = np.zeros(eta_hist.shape)\n eta_cdf[0] = eta_hist[0]\n for j in range(1, hist_num):\n eta_cdf[j] = eta_hist[j] + eta_cdf[j - 1]\n eta_cdf = np.concatenate((np.zeros(1), eta_cdf))\n\n # make eta\n x = np.random.rand(npart)\n eta_sampled = np.interp(x, eta_cdf, bins)\n # plt.figure()\n # _=plt.hist(eta_sampled,bins=np.linspace(lowbound, upbound, num=hist_num+1))\n # plt.title('Sampled eta histogram')\n return eta_sampled",
"def plot_histogram(self,ax=None,**kwargs):\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n probs,bins,patches = ax.hist(self.scores_list,normed=True,label=\"Sample\",**kwargs)\n ax.vlines(self.xhat,*ax.get_ylim(),label='Mean',color='r')\n ax.legend()\n return ax,probs,bins",
"def hist(self, ax=None, nburnt=0, xlabel=None):\n # creating figure if not given as input\n fig = None\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # histogram of value of samples\n bins = np.arange(self._minval - 0.5 * self._step,\n self._maxval + 1.5 * self._step,\n self._step)\n samples = self.samples[nburnt:self.nsample]\n ax.hist(samples, bins=bins, normed=True, label='sampled distribution')\n\n # prior (uniform) distribution\n if self._maxval > self._minval:\n x = 2 * [self._minval] + 2 * [self._maxval]\n y = [0.0] + 2 * [1.0 / (self._maxval - self._minval)] + [0.0]\n ax.plot(x, y, '-', lw=2, color='grey', label='prior distribution')\n\n # legend, labels and title\n ax.legend(loc='upper right', fontsize=10, framealpha=0.8)\n ax.set_xlabel(self.name if not xlabel else xlabel)\n ax.set_ylabel('Probability density')\n ax.set_title('Nb of samples: {}'.format(len(samples)))\n ax.grid(True)\n\n # statistics\n s = \"Mean & std dev:\\n{:.3G} +/- {:.3G}\".format(np.mean(samples),\n np.std(samples))\n quantiles = np.percentile(samples, [2.5, 97.5])\n s += \"\\n95% confidence interval:\\n{:.3G}, {:.3G}\".format(*quantiles)\n ax.text(min(ax.get_xlim()), max(ax.get_ylim()), s,\n fontdict={'fontsize': 10},\n horizontalalignment='left',\n verticalalignment='top',\n bbox={'color': 'w', 'alpha': 0.8})\n\n if fig:\n fig.show()",
"def make_histogram(outpath, plotdata_y, bins=None, color='red',\n xlabel='', ylabel='', x_range=None):\n if bins is None:\n bins = get_optimum_bins(plotdata_y)\n pyplot.hist(plotdata_y, bins=bins, color=color, range=x_range)\n pyplot.grid(True, which='major', linestyle='-')\n pyplot.grid(True, which='minor')\n pyplot.xlabel(xlabel, fontsize=20)\n pyplot.ylabel(ylabel, fontsize=20)\n pyplot.tick_params(axis='both', which='major', labelsize=16)\n pyplot.tick_params(axis='both', which='minor', labelsize=8)\n pyplot.tight_layout()\n pyplot.savefig(outpath)\n pyplot.close()\n return outpath",
"def _make_histogram(\n dict_,\n data,\n bins=25,\n show_output=False,\n figsize=(10, 6),\n fontsize=15,\n plot_title=False,\n):\n indicator = dict_[\"ESTIMATION\"][\"indicator\"]\n\n treated = data[[indicator, \"prop_score\"]][data[indicator] == 1].values\n untreated = data[[indicator, \"prop_score\"]][data[indicator] == 0].values\n\n treated = treated[:, 1].tolist()\n untreated = untreated[:, 1].tolist()\n\n # Make the histogram using a list of lists\n fig = plt.figure(figsize=figsize)\n hist = plt.hist(\n [treated, untreated],\n bins=bins,\n weights=[\n np.ones(len(treated)) / len(treated),\n np.ones(len(untreated)) / len(untreated),\n ],\n density=0,\n alpha=0.55,\n label=[\"Treated\", \"Unreated\"],\n )\n\n if show_output is True:\n plt.tick_params(axis=\"both\", labelsize=14)\n plt.legend(loc=\"upper right\", prop={\"size\": 14})\n plt.xticks(np.arange(0, 1.1, step=0.1))\n plt.grid(axis=\"y\", alpha=0.25)\n plt.xlabel(\"$P$\", fontsize=fontsize)\n plt.ylabel(\"$f(P)$\", fontsize=fontsize)\n\n if plot_title is True:\n plt.title(r\"Support of $P(\\hat{Z})$ for $D=1$ and $D=0$\")\n\n else:\n plt.close(fig)\n\n return hist, treated, untreated",
"def histogram(arr, xlbl, xrng=None, nbins=20, alpha=1.):\n if xrng is None:\n xrng = (np.min(arr),np.max(arr))\n p = figure(plot_width=600, plot_height=400)\n # Histogram\n hist, edges = np.histogram(arr, range=xrng, density=True, bins=nbins)\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color='blue', alpha=alpha)\n # Label\n p.xaxis.axis_label = xlbl\n # Show\n show(p)",
"def ANN_Make_Binned_ROC_histograms(title,model, x_data, pT, CSV, bins, PU_range='full',addFeature=False):\n nbins = 60\n\n ANN_hist_list = []\n CSV_hist_list = []\n for bin_ in range(len(bins)-1):\n ANN_hist_list.append(rt.TH1D(\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n\n\tif addFeature == False:\n\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\telif addFeature == \"pT\":\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\telif addFeature == \"PV\":\n\t\tassert x_data.shape[1] == 21, \"wrong x_data shape: PV cannot be found\"\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\telse:\n\t\tprint \"invalid feature selection\"\n\t\treturn None\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\n for n,particle in enumerate(x_data):\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n if bin_numbers[n] == -100: continue\n ANN_hist_list[int(bin_numbers[n])].Fill(pred_y[n])\n CSV_hist_list[int(bin_numbers[n])].Fill(CSV[n])\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in ANN_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)",
"def draw_histogram(xx, hist_ax, alpha=1.0, colorV=None, facecolor='#80D080', edgecolor=None, nbins=75,\n fontsize=8, linewidth=1, xlabel=None, ylabel=None, label=None):\n plt.sca(hist_ax)\n if colorV is None:\n n, bins, patches = hist_ax.hist(xx, nbins, histtype='stepfilled', alpha=alpha, linewidth=linewidth, label=label)\n plt.setp(patches, 'facecolor', facecolor)\n if edgecolor is not None:\n plt.setp(patches, 'edgecolor', edgecolor)\n else:\n n, bins, patches = hist_ax.hist(xx, nbins, alpha=alpha, linewidth=linewidth, label=label)\n\n if xlabel is not None:\n hist_ax.set_xlabel(xlabel, fontsize=fontsize)\n if ylabel is not None:\n hist_ax.set_ylabel(ylabel, fontsize=fontsize)\n return hist_ax",
"def end_hist(pulse, trap):\n all_trial_n, all_trial_n_ave = trap.sideband_cool_sch(pulse)\n n_max = np.amax(all_trial_n)\n hist_xar = sp.arange(n_max + 1) - 0.5\n \n # fig, ax = plt.subplots()\n plt.hist(all_trial_n[:, -1], bins = hist_xar)\n plt.xlabel('Phonon State')\n plt.ylabel('Distribution')\n # return ax",
"def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()",
"def hist(self,geo,pfile):\n\n # Create histogram of box data, rounding to nearest integers if temperature\n boxdata = self.img.flatten()\n imin = int(round(min(boxdata))) - 1\n imax = int(round(max(boxdata))) + 1\n ni = imax-imin+1 # number of bins to plot\n h = np.zeros(ni,dtype=int) # initialise with zeros\n for val in boxdata: # assign each image value to a bin\n i = int(round(val)) - imin \n h[i] += 1\n n = sum(h) # total number of values binned\n h = h * 100.0/n # convert no.in bins to %frequency\n plt.figure(WINDOW_HIST,figsize=(4,4))\n plt.clf()\n # Create title for histogram plot\n ttl = self.desc + '\\n' + \\\n 'Box: X=' + str(self.ix-self.mbox) + ':' \\\n + str(self.ix) + ':' \\\n + str(self.ix+self.mbox) + \\\n ', Y=' + str(self.iy-self.mbox) + ':' \\\n + str(self.iy) + ':' \\\n + str(self.iy+self.mbox)\n plt.title(ttl)\n plt.ylabel(\"% Frequency\")\n tdisp = self.label in ( 'T9', 'T10', 'TS' )\n if tdisp: plt.xlabel(\"Pixel Temperature [K]\")\n else: plt.xlabel(\"Pixel Value [0:255]\")\n xval = np.arange(imin,imax+1,dtype=int)\n # Set colour of histogram according to channel\n plt.bar(xval,h,color=plot_colours.get(self.label,'gray'))\n x0,x1 = plt.xlim()\n y0,y1 = plt.ylim()\n boxmean = np.mean(boxdata)\n boxsd = np.std(boxdata)\n midpix = self.img[self.mbox,self.mbox]\n plt.plot( boxmean+[0,0], [y0,y1], ':', color='black' )\n plt.errorbar ( boxmean, 0.9*y1, xerr=boxsd, color='black', \n capsize=4 )\n plt.plot ( midpix, 0.9*y1, 's', color='black', \n markerfacecolor='none' ) \n plt.tight_layout()\n if boxmean > 0.5 * ( x1 + x0 ): xt = x0 + 0.4 * ( x1 - x0 )\n else: xt = x0 + 0.95*(x1-x0)\n yt = y0 + 0.95*(y1-y0)\n yd = 0.05*(y1-y0)\n text = 'Mean = {:6.2f}'.format(boxmean)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n text = 'S.D. = {:6.2f}'.format(boxsd)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n text = 'NPix = {:6n}'.format(n)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n if tdisp: text = 'MidPix = {:6.2f}'.format(midpix)\n else: text = 'MidPix = {:6n}'.format(midpix)\n plt.text(xt,yt,text,ha=\"right\")\n if geo.cal:\n lat,lon,zen = geo.locate(self.ix,self.iy) \n text = 'Lat = {:6.2f}'.format(lat)\n yt -= yd\n plt.text(xt,yt,text,ha=\"right\") \n text = 'Lon = {:6.2f}'.format(lon)\n yt -= yd\n plt.text(xt,yt,text,ha=\"right\") \n if pfile: \n file = input ( \"Save to file (<CR>=hist.pdf): \" ) or \"hist.pdf\"\n plt.savefig(file)",
"def plot_hist(df, num_bins=8):\n df.hist(figsize=(24, 20), bins=num_bins)\n plt.axes",
"def plot_histogram(beta=3):\n m = 10 ** beta\n\n # generate m normal random variables of 100 points each\n X = np.random.randn(100, m)\n\n # take the maximum along the rows\n Z = np.max(X, axis=1)\n\n # plot the pdf with a gaussian kernel density estimate\n plt.subplot(121)\n sns.distplot(Z, kde=True)\n plt.title(r'Histogram of Z for $\\beta$ = {}'.format(beta))\n\n # plot the cdf and find t in relation with Q3)\n plt.subplot(122)\n plt.hist(Z, bins=25, normed=True, cumulative=True)\n plt.title(r'P[Z $\\leq$ t]$\\geq$0.9 for t$\\geq$%0.4f' % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the inverse cdf' % (norm.ppf(0.9 ** (1/m))))\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the Chernoff bounding method'\n % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n # save the plot to file & show the plot\n plt.savefig('histogram_beta_{}.png'.format(beta))\n\n plt.show()",
"def var_plot(var1, var2, tag=[\"o\",\"o\"],varmin=None, varmax= None,\n nbin=100,xscale=\"log\",yscale=\"log\",xlabel=\"\"):\n\n if varmin == None:\n varmin = min(min(var1),min(var2))\n if varmax == None:\n varmax = max(max(var1),max(var2))\n print(\"min = \",varmin,\" max=\",varmax)\n\n mask1 = (var1<=varmax) & (var1>=varmin)\n mask2 = (var2<=varmax) & (var2>=varmin)\n\n fig, (axa, axb) = plt.subplots(nrows=1, ncols=2,figsize=(12,5))\n\n # First population\n n, bins, p = axa.hist(var1[mask1],bins=nbin,alpha=0.5,\n label=MyLabel(var1[mask1],tag[0]))\n\n # Second population\n axa.hist(var2[mask2],bins=bins,alpha=0.5,\n label=MyLabel(var2[mask2],tag[1]))\n\n # Decoration\n axa.set_xscale(xscale)\n axa.set_yscale(yscale)\n axa.set_xlabel(xlabel)\n axa.legend()\n\n # Ratio betwenn the two plots\n axb.hist(var2[mask2]/var1[mask1],bins=nbin,alpha=0.5,\n label=MyLabel(var2[mask2]/var1[mask1],tag[1]+\"/\"+tag[0]))\n axb.set_xlabel(xlabel+ \" ratio\")\n axb.set_yscale(yscale)\n axb.legend()",
"def plt_hist(pop, bin_limit=fit_range):\n plt.hist(pop, bins=range(0,bin_limit+1))\n plt.grid(True)\n plt.title('Distribution of Population')\n plt.show()",
"def histo ( self ,\n xbins = 20 , xmin = None , xmax = None ,\n ybins = 20 , ymin = None , ymax = None ,\n hpars = () , \n histo = None ,\n integral = False ,\n errors = False , \n density = False ) :\n \n \n histos = self.make_histo ( xbins = xbins , xmin = xmin , xmax = xmax ,\n ybins = ybins , ymin = ymin , ymax = ymax ,\n hpars = hpars ,\n histo = histo )\n\n # loop over the historgam bins \n for ix,iy,x,y,z in histo.items() :\n\n xv , xe = x.value() , x.error()\n yv , ye = y.value() , y.error()\n \n # value at the bin center \n c = self ( xv , yv , error = errors ) \n\n if not integral : \n histo[ix,iy] = c\n continue\n\n # integral over the bin \n v = self.integral( xv - xe , xv + xe , yv - ye , yv + ye )\n \n if errors :\n if 0 == c.cov2 () : pass\n elif 0 != c.value() and 0 != v : \n v = c * ( v / c.value() )\n \n histo[ix,iy] = v \n\n ## coovert to density historgam, if requested \n if density : histo = histo.density()\n \n return histo",
"def add_histogram(ax, x, bins=10):\n\n divider = make_axes_locatable(ax)\n zax = divider.append_axes(\"bottom\", \"30%\", pad=0.1)\n ax.figure.add_axes(zax, axisbg=ax.figure.get_facecolor())\n\n H = HistogrammPlot(ax=zax, bins=bins)\n H.plot(x)",
"def plot_hist(datasets, bins, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n plt.hist(data, bins=bins[idx], density=True, label=labels[idx], alpha=alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/hist_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def draw_histogram(data, # type: thelper.typedefs.ArrayType\n bins=50, # type: Optional[int]\n xlabel=\"\", # type: Optional[thelper.typedefs.LabelType]\n ylabel=\"Proportion\", # type: Optional[thelper.typedefs.LabelType]\n show=False, # type: Optional[bool]\n block=False, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.DrawingType\n fig, ax = plt.subplots()\n ax.hist(data, density=True, bins=bins)\n if len(ylabel) > 0:\n ax.set_ylabel(ylabel)\n if len(xlabel) > 0:\n ax.set_xlabel(xlabel)\n ax.set_xlim(xmin=0)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return fig\n plt.pause(0.5)\n return fig, ax",
"def plot_histogram_assess(assess_input, figure_output):\n\n sns.set_style(\"white\")\n raw_auc = pd.read_table(assess_input, index_col=\"Motif\")\n raw_auc = raw_auc.drop_duplicates()\n # df = df.T.drop_duplicates().T\n raw_auc = raw_auc.sort(columns=\"MNCP\", axis=0, ascending=False)\n labels = raw_auc.index\n x = 10\n if len(labels) > 50:\n x = 15\n elif len(labels) < 10:\n x = 5\n f, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(x, 10), sharex=True)\n a = sns.barplot(x=labels, y=raw_auc[\"AUC\"],\n palette='colorblind', x_order=labels, ax=ax1)\n b = sns.barplot(x=labels, y=raw_auc[\"MNCP\"],\n palette=\"colorblind\", x_order=labels, ax=ax2)\n c = sns.barplot(x=labels, y=raw_auc[\"Spearman\"],\n palette=\"colorblind\", x_order=labels, ax=ax3)\n d = sns.barplot(x=labels, y=raw_auc[\"Pearson\"],\n palette=\"colorblind\", x_order=labels, ax=ax4)\n d.set_xticklabels(labels, rotation=90)\n\n sns.despine()\n f.savefig(figure_output + \".eps\", bbox_inches='tight')\n f.savefig(figure_output + \".png\", bbox_inches='tight')",
"def plotPosteriors(posteriors):\n for i,p in enumerate(posteriors):\n plt.hist(p,bins=20,histtype='stepfilled',alpha=0.5,\n density=True,label='Bin {0}'.format(i))\n plt.legend()\n plt.ylabel(\"Probability\")\n plt.xlabel(\"Posterior\")\n\n return",
"def plot_histogram(hs, bins, ax=None, labels=None, title=None, **bar_params):\r\n # identify how many histogram series:\r\n if len(hs) == len(bins) - 1:\r\n nhs = 1\r\n hs = [hs]\r\n else:\r\n nhs = len(hs)\r\n if labels == None:\r\n labels = ['' for i in range(nhs)]\r\n width = (bins[1]-bins[0])/nhs\r\n x = np.array(bins[0:-1])\r\n if ax==None:\r\n f, ax = plt.subplots()\r\n for i in range(nhs):\r\n ax.bar(x + width * (i+0.5), hs[i], width=width, label=labels[i], **bar_params)\r\n if labels[0] != '':\r\n plt.legend()\r\n if title!=None:\r\n plt.title(title)\r\n return ax",
"def plot_trait_histogram(params, traits):\n\n pylab.subplot(122).clear()\n pylab.xlabel(r\"$x$\")\n pylab.ylabel(r\"$I$\")\n pylab.hist(traits, 100, range = (0, params[\"max_trait\"]), normed = True, \n facecolor = \"black\")\n pylab.xlim(0, params[\"max_trait\"])\n pylab.ylim(0, params[\"population\"])\n ax = pylab.gca()\n ax.yaxis.major.formatter.set_powerlimits((0,0))\n pylab.draw()",
"def plot_win_hist(s: pd.Series, title: str, \n bins: int=20) -> Tuple[plt.Figure, plt.Axes]:\n fig, ax = plt.subplots()\n ax.hist(s, bins=bins)\n ax.set_title(title)\n fig.tight_layout()\n return fig, ax",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):",
"def double_hist(\n hyper_params_grid,\n do_change,\n fixed,\n hp2res,\n hp_labels,\n template_output,\n ax,\n fig,\n **kwargs,\n):\n logg = logging.getLogger(f\"c.{__name__}.double_hist\")\n logg.debug(f\"Start double_hist\")\n\n if ax is None or fig is None:\n fig, ax = plt.subplots()\n\n colors = [\"b\", \"g\", \"r\", \"c\", \"m\", \"y\", \"k\"]\n\n # two params to change\n la = do_change[0]\n lb = do_change[1]\n\n bin_width = 0.8\n # bin_pos is the CENTER of the bar\n bin_pos = [i + bin_width / 2 for i in range(len(hyper_params_grid[la]))]\n # sub length of a step for secondary param\n bin_step = bin_width / len(hyper_params_grid[lb])\n\n logg.debug(f\"bin_width {bin_width} bin_pos {bin_pos} bin_step {bin_step}\")\n\n hp_dict = fixed.copy()\n\n # iterate over the first\n for ia, val_a in enumerate(hyper_params_grid[la]):\n hp_dict[la] = val_a\n last_bars = []\n bar_labels = []\n # iterate over the second\n for ib, val_b in enumerate(hyper_params_grid[lb]):\n hp_dict[lb] = val_b\n hp_val = res_get(hp2res, hp_dict, hp_labels)\n bar_pos = ia + bin_step * ib + bin_step / 2\n bar_label = f\"{val_b}\"\n bar_labels.append(bar_label)\n c = colors[ib % len(colors)]\n new_bar = ax.bar(x=bar_pos, height=hp_val, width=bin_step, color=c)\n last_bars.append(new_bar)\n\n # we do the horror of last_bars to set the legend only once, as the lb\n # params are the same in all la bins\n for i, last_bar in enumerate(last_bars):\n last_bar.set_label(bar_labels[i])\n\n ax.set_xticks(bin_pos)\n # ax.set_xticklabels([x for x in hyper_params_grid[la]], rotation=90)\n ax.set_xticklabels([x for x in hyper_params_grid[la]], rotation=0)\n ax.set_xlabel(f\"{la} - {lb}\")\n ax.set_ylabel(f\"Loss\")\n ax.legend()\n\n title = f\"Loss for fixed params {fixed}\"\n max_title_len = 70\n wrapped_title = \"\\n\".join(wrap(title, max_title_len))\n ax.set_title(wrapped_title)\n\n # fig.savefig(\"./output/test_double_hist.png\")\n fig.savefig(template_output.format(\"double_hist\"))",
"def hist(self, bins):\n x = self.x\n plt.hist(x, bins)\n plt.xlabel('Observed Data')\n plt.ylabel('Frequency')\n plt.show()"
]
| [
"0.6409904",
"0.6375965",
"0.6235954",
"0.5931041",
"0.58610284",
"0.5844425",
"0.58327806",
"0.58259004",
"0.5734927",
"0.57153136",
"0.57044625",
"0.56913066",
"0.5640836",
"0.5631618",
"0.56309974",
"0.56043017",
"0.55923885",
"0.5573512",
"0.5562074",
"0.5520641",
"0.55038106",
"0.54969466",
"0.549367",
"0.5483649",
"0.5483026",
"0.5467652",
"0.5457102",
"0.54565614",
"0.5439215",
"0.5429776"
]
| 0.7293212 | 0 |
Find peak of hist, check against fitted gaus | def check_gaus_fit(hist):
s = ROOT.TSpectrum(1)
s.Search(hist, 1, "new")
peaks_buff = s.GetPositionX()
x_peak = peaks_buff[0]
return (abs(hist.GetFunction('gaus').GetParameter(1) - x_peak) / abs(x_peak)) < 0.1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hist_peak_search(hist, bins):\n\n ix = peakutils.indexes(-hist, thres = 0.15/max(-hist), min_dist = 2)\n peaks = list(bins[list(ix)])\n\n return peaks",
"def getFirstVallay(hist):\n hist = smooth(hist)\n safe_range = (len(hist)//100, len(hist)//4)\n start = max(int(round(np.argmax(hist[:int(len(hist)/4)]))), 10) # Start searching from maximum peak\n start = min(start, safe_range[1]) # start point should be smaller than 50 pixel\n start = max(safe_range[0], start) # start point should be bigger than 20\n limit = min(int(start * 1.5), 100) # Set limit of the first valley as 150% of maximum peak location\n max_val = 0\n for i in range(start, limit):\n if hist[i] > max_val:\n max_val = hist[i]\n if hist[i] <= max_val*0.5:\n return i\n return limit",
"def detect_peak(data):\n nonlocal last, ascent_dist, ascent_start\n if data > last:\n if ascent_start is None:\n ascent_start = last\n ascent_dist += 1\n else:\n if ascent_dist:\n peak = last\n ascent_dist = 0\n if (peak - ascent_start) > thresh:\n last = data\n ascent_start = None\n return peak\n ascent_start = None\n last = data\n return None",
"def hist_and_thresh(self):\n bins, occ, _ = self.histogram()\n self.thresh = np.mean(bins) # initial guess\n self.peaks_and_thresh() # in case peak calculation fails\n # if np.size(self.peak_indexes) == 2: # est_param will only find one peak if the number of bins is small\n # # set the threshold where the fidelity is max\n # self.search_fidelity(self.peak_centre[0], self.peak_widths[0] ,self.peak_centre[1])\n try: \n thresh = threshold_minimum(np.array(self.stats['Counts']), len(bins))\n int(np.log(thresh)) # if thresh <= 0 this gives ValueError\n self.thresh = thresh\n except (ValueError, RuntimeError, OverflowError): pass\n try:\n # atom is present if the counts are above threshold\n self.stats['Atom detected'] = [x // self.thresh for x in self.stats['Counts']]\n # self.fidelity, self. err_fidelity = np.around(self.get_fidelity(), 4) # this is a relatively slow operation\n except (ValueError, OverflowError): pass\n return bins, occ, self.thresh",
"def peak(self):\n pass",
"def getPeaksFromHist(orig_hist, width_thres=0, height_thres=0):\n if len(orig_hist) < 10:\n return []\n if width_thres == 0:\n width_thres = 5\n if height_thres == 0:\n height_thres = np.mean(orig_hist)\n\n hist = smooth(np.array(orig_hist), 20)\n hist[hist < height_thres] = height_thres\n hist = hist - min(hist)\n\n peak_hist1 = np.zeros((len(hist), 1))\n peak_list = []\n for i in range(4, len(hist) - 4):\n peak_hist1[i] = hist[i + 1] - hist[i - 1]\n\n for i in range(width_thres, len(peak_hist1) - width_thres):\n\n\n if all(peak_hist1[i - width_thres:i] > 0) and all(peak_hist1[i + 1:i + width_thres] < 0):\n peak_list.append(i)\n\n if len(peak_list) != 0:\n new_list = []\n derivate = np.zeros((len(peak_list), 1))\n for i in range(1, len(peak_list)):\n derivate[i] = peak_list[i] - peak_list[i - 1]\n i = 0\n while i < len(derivate) - 1:\n s = peak_list[i]\n c = 0\n if derivate[i + 1] == 1 and not i + 1 == len(derivate) - 1:\n j = i + 1\n while j <= len(derivate):\n if derivate[j] == 1 and not j + 1 == len(derivate):\n s += peak_list[j]\n c += 1\n j += 1\n else:\n s /= (c + 1)\n i = j\n break\n else:\n i += 1\n new_list.append(s)\n if i != len(derivate) - 1:\n new_list.append(peak_list[i])\n peak_list = new_list\n\n peak_list.sort()\n return peak_list",
"def find_optimal_threshold(self, hist):\n\n\n threshold = int((len(hist)-1)/2)\n ct = len(hist) - 1\n\n while True:\n if(ct < 1):\n break\n threshold1 = self.evalue(hist,0,threshold)\n threshold2 = self.evalue(hist,threshold,len(hist) - 2)\n nt = int((threshold1+threshold2)/2)\n ct = nt - threshold\n threshold = nt\n\n return threshold",
"def peak(data, fft_data=None):\n return np.max(np.abs(data))",
"def calculate_peak_prominence(data, index):\n current_peak = data[index]\n\n # ignore values at either end of the dataset or values that are not local maxima\n if (\n index == 0\n or index == len(data) - 1\n or data[index - 1] > current_peak\n or data[index + 1] > current_peak\n or (data[index - 1] == current_peak and data[index + 1] == current_peak)\n ):\n return 0\n\n # by definition, the prominence of the highest value in a dataset is equal to the value itself\n if current_peak == max(data):\n return np.log(current_peak)\n\n # find index of nearest maxima which is higher than the current peak\n higher_peaks_inds = [i for i, x in enumerate(data) if x > current_peak]\n\n right_peaks = [x for x in higher_peaks_inds if x > index]\n if right_peaks:\n closest_right_ind = min(right_peaks)\n else:\n closest_right_ind = np.inf\n\n left_peaks = [x for x in higher_peaks_inds if x < index]\n if left_peaks:\n closest_left_ind = max(left_peaks)\n else:\n closest_left_ind = -np.inf\n\n right_distance = closest_right_ind - index\n left_distance = index - closest_left_ind\n\n if (right_distance) > (left_distance):\n closest = closest_left_ind\n else:\n closest = closest_right_ind\n\n # find the value at the lowest point between the nearest higher peak (the key col)\n lo = min(closest, index)\n hi = max(closest, index)\n between_slice = data[lo:hi]\n key_col = min(between_slice)\n\n prominence = np.log(data[index] - key_col + 1)\n\n return prominence",
"def bls_peakfinder(results):\n maxima = find_peaks(results.power, distance=100)[0]\n\n top_power_inds = maxima[np.argsort(results.power[maxima])[::-1]]\n\n highest_peak = results.power[top_power_inds[0]]\n next_highest_peak = results.power[top_power_inds[1]]\n\n significance = highest_peak / next_highest_peak\n\n return top_power_inds, significance",
"def determine_peaks_and_limits(\n data, smoothed, limits,\n peak_prom, peak_height,\n valley_prom, valley_height,\n debug, smooth_window_size, outfile,\n skip_smooth,\n):\n mm = max(smoothed)\n peaks, props = find_peaks(smoothed, height=peak_height, prominence=peak_prom) # maxima (peaks positions)\n rpeaks, rprops = find_peaks([-i+mm for i in smoothed], height=valley_height, prominence=valley_prom) # minima (peaks limits)\n\n if len(peaks) > 3 :\n print(\"WARNING: More than 3 peaks detected.\\nPossible erroneous detection:\\n\\t-Restart setting the -ll parameter.\\n\\t-check histogram and modify peak height and prominence arguments accordingly.\\n\\t-Contaminant peaks may also break detection, remove them with tools such as blobtools or by hard-filtering low coverage contigs.\")\n print(\"NOTE: Assuming the last 2 peaks are diploid and haploid...\")\n\n if debug :\n debug_plot_peak_errors(data, smoothed, peaks, limits.values(), rpeaks, smooth_window_size, outfile, skip_smooth)\n\n if len(peaks) > 0 :\n print(\"Peaks found: \" + \"x, \".join(str(p) for p in peaks) + \"x\")\n else :\n raise Exception(\"No peaks found! Try changing the input parameters or setting thresholds manually!\")\n if len(rpeaks) > 0 :\n print(\"Valleys found: \" + \"x, \".join(str(p) for p in rpeaks) + \"x\")\n else :\n print(\"No valleys found!\")\n\n valleys = [0] + list(rpeaks) + [len(smoothed)]\n thresholds = get_threshold_between_peaks(smoothed, peaks, valleys)\n\n relevant_peaks = peaks[-3:]\n #valleys = rpeaks[-3:]\n print(\"Relevant peaks: \" + \"x, \".join(str(p) for p in relevant_peaks) + \"x\")\n print(\"Thresholds:\\n\\t- \" + \"\\t- \".join(\"{}: {}x\\n\".format(k,p) for k,p in thresholds.items()))\n\n return relevant_peaks, thresholds",
"def find_optimal_threshold(self, hist):\n\n # print(\"number of pixels using sum: \", sum(hist))\n probability = np.array((1/sum(hist))*hist)\n expected_value = probability*np.array(range(256))\n # print(\"probability: \\n\", probability)\n # print(\"expected_value: \\n\", expected_value)\n\n threshold = len(hist)/2\n temp_threshold = 0\n\n while abs(threshold - temp_threshold) > 0.001:\n temp1 = []\n temp2 = []\n print(\"New threshold: \", threshold)\n for i in range(len(hist)):\n if i < threshold:\n temp1.append(expected_value[i])\n else:\n temp2.append(expected_value[i])\n mean1 = sum(temp1)\n print(\"mean1: \\n\", mean1)\n mean2 = sum(temp2)\n print(\"mean2: \\n\", mean2)\n temp_threshold = threshold\n threshold = (mean1+mean2)/2\n print(\"threshold: \", threshold)\n print(\"temp_threshold: \", temp_threshold)\n\n return threshold",
"def find_fbs(self,q=99.9):\n fbs=np.zeros((self.dataset['pred_avg'].shape[0],1))\n for itrc in np.arange(0,self.dataset['pred_avg'].shape[0]):\n trc=self.dataset['pred_avg'][itrc]\n nonzero=np.where(trc!=0)[0]\n perc=np.nanpercentile(trc[list(nonzero)],q)\n potential_fbs=np.where(trc[:]>=perc)[0]\n if len(potential_fbs)!=0:\n fbs[itrc]=np.int(potential_fbs[0])\n else:\n print('FB was not found for trace id:\\t{}'.format(itrc))\n print('Completed')\n return fbs",
"def peakFinder(self, fit_peaks_image):\n # Calculate background variance.\n #\n # Note the assumption here that we are working in units of photo-electrons\n # so Poisson statistics applies, variance = mean.\n #\n bg_var = self.background + fit_peaks_image\n \n # Add camera variance if set.\n if self.camera_variance is not None:\n bg_var += self.camera_variance\n\n # Calculate weighted variance if the image is being smoothed.\n if self.fg_vfilter is not None:\n bg_var = self.fg_vfilter.convolve(bg_var)\n\n if self.check_mode:\n with tifffile.TiffWriter(\"variances.tif\") as tf:\n tf.save(bg_var.astype(numpy.float32))\n \n # Remove problematic values.\n #\n mask = (bg_var <= 0.1)\n if (numpy.sum(mask) > 0):\n if self.check_mode:\n print(\"Warning! small and/or negative values detected in background variance!\")\n bg_var[mask] = 0.1\n \n # Convert to standard deviation.\n bg_std = numpy.sqrt(bg_var)\n\n # Calculate foreground.\n foreground = self.image - self.background - fit_peaks_image\n\n # Calculate smoothed image if we have a foreground filter.\n if self.fg_mfilter is not None:\n foreground = self.fg_mfilter.convolve(foreground)\n\n if self.check_mode:\n with tifffile.TiffWriter(\"foreground.tif\") as tf:\n tf.save(foreground.astype(numpy.float32))\n \n # Calculate foreground in units of signal to noise.\n foreground = foreground/bg_std\n \n if self.check_mode:\n with tifffile.TiffWriter(\"fg_bg_ratio.tif\") as tf:\n tf.save(foreground.astype(numpy.float32))\n \n # Mask the image so that peaks are only found in the AOI.\n masked_image = foreground * self.peak_mask\n\n # Identify local maxima in the masked image.\n [x, y, z] = self.mfinder.findMaxima([masked_image])\n return {\"x\" : x, \"y\" : y, \"z\" : z, \"sigma\" : numpy.ones(x.size)*self.sigma}",
"def peak_finder(thresh=0):\n last = 0 # Track last input value\n ascent_dist = 0 # Distance from last trough.\n ascent_start = None # Last trough height\n\n def detect_peak(data):\n \"\"\" Returns initialized function to detect peaks on live streaming data.\n\n Args:\n data (numeric value): Input data point.\n\n Returns:\n If peak is detected return peak value, else return None\n \"\"\"\n nonlocal last, ascent_dist, ascent_start\n if data > last:\n if ascent_start is None:\n ascent_start = last\n ascent_dist += 1\n else:\n if ascent_dist:\n peak = last\n ascent_dist = 0\n if (peak - ascent_start) > thresh:\n last = data\n ascent_start = None\n return peak\n ascent_start = None\n last = data\n return None\n\n return detect_peak",
"def findpeakg(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = GaussianModel(prefix = 'g1_')\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n for j in range(1,fnum[i]):\n mod = mod + GaussianModel(prefix = 'g%i_'%(j + 1))\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][-1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Gaussian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][-1] - bottom)/width)\n for k in range(fnum[i]):\n amplitude = pars['g%i_height'%(k + 1)].value\n sigma = pars['g%i_sigma'%(k + 1)].value\n miu = pars['g%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude*math.exp( - (bottom + width*p - miu)*(bottom + width*p - miu)/(2*sigma*sigma)))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([sigma,miu,amplitude,sum1,tempbo,tempto])\n return peak",
"def peak_finder(filt_im, dist, threshold):\n from skimage.feature import peak_local_max\n coordinates = peak_local_max(filt_im, min_distance=dist, threshold_abs=threshold)\n return coordinates",
"def _find_peaks_heuristic(phnorm):\n median_scale = np.median(phnorm)\n\n # First make histogram with bins = 0.2% of median PH\n hist, bins = np.histogram(phnorm, 1000, [0, 2*median_scale])\n binctr = bins[1:] - 0.5 * (bins[1] - bins[0])\n\n # Scipy continuous wavelet transform\n pk1 = np.array(sp.signal.find_peaks_cwt(hist, np.array([2, 4, 8, 12])))\n\n # A peak must contain 0.5% of the data or 500 events, whichever is more,\n # but the requirement is not more than 5% of data (for meager data sets)\n Ntotal = len(phnorm)\n MinCountsInPeak = min(max(500, Ntotal//200), Ntotal//20)\n pk2 = pk1[hist[pk1] > MinCountsInPeak]\n\n # Now take peaks from highest to lowest, provided they are at least 40 bins from any neighbor\n ordering = hist[pk2].argsort()\n pk2 = pk2[ordering]\n peaks = [pk2[0]]\n\n for pk in pk2[1:]:\n if (np.abs(peaks-pk) > 10).all():\n peaks.append(pk)\n peaks.sort()\n return np.array(binctr[peaks])",
"def pull_peak_times(data):\n bin_centers = np.arange(0.,1.501,0.002)\n data = np.asarray(data)\n maxs = np.argmax(data, axis=1)\n return bin_centers[maxs]",
"def _peakdet(ts, threshold_ratio=.1):\n THRESH = threshold_ratio * (max(ts)-min(ts))\n maxima = []\n minima = []\n extrema = []\n looking_for_maximum = True\n last = 0\n for i in range(1, len(ts)):\n if looking_for_maximum:\n if ts[i] > ts[last]:\n last = i\n elif ts[i] + THRESH < ts[last]:\n maxima.append(last)\n extrema.append(last)\n looking_for_maximum = False\n else: #looking for minimum\n if ts[i] < ts[last]:\n last = i\n elif ts[i] - THRESH > ts[last]:\n minima.append(last)\n extrema.append(last)\n looking_for_maximum = True\n \n return extrema",
"def find_local_peak(flux, x, width, figname=None):\n width = int(round(width))\n if width%2 != 1:\n width += 1\n half = int((width-1)/2)\n\n i = int(round(x))\n\n # find the peak in a narrow range\n\n i1, i2 = max(0, i-half), min(flux.size, i+half+1)\n\n if i2 - i1 <= 4:\n # 4 is the number of free parameters in fitting function\n return None\n\n # find the peak position\n imax = flux[i1:i2].argmax() + i1\n xdata = np.arange(i1,i2)\n ydata = flux[i1:i2]\n # determine the initial parameters for gaussian fitting + background\n p0 = [ydata.max()-ydata.min(), imax, 3., ydata.min()]\n # least square fitting\n #p1,succ = opt.leastsq(errfunc2, p0[:], args=(xdata,ydata))\n p1, cov, info, mesg, ier = opt.leastsq(errfunc2, p0[:],\n args=(xdata, ydata), full_output=True)\n\n res_lst = errfunc2(p1, xdata, ydata)\n\n if res_lst.size-len(p0)-1 == 0:\n return None\n\n std = math.sqrt((res_lst**2).sum()/(res_lst.size-len(p0)-1))\n\n if figname is not None:\n fig = plt.figure()\n ax1 = fig.add_axes([0.1, 0.4, 0.8, 0.5])\n ax2 = fig.add_axes([0.1, 0.1, 0.8, 0.25])\n ax1.plot(xdata, ydata, 'o', ms=4)\n newx = np.arange(xdata[0], xdata[-1], 0.1)\n newy = gaussian_bkg(p1[0], p1[1], p1[2], p1[3], newx)\n ax1.plot(newx, newy, '-', lw=0.6)\n yerr = errfunc2(p1, xdata, ydata)\n ax2.plot(xdata, yerr, 'o', ms=4)\n ax1.set_xlim(xdata[0], xdata[-1])\n ax2.set_xlim(xdata[0], xdata[-1])\n fig.savefig(figname)\n plt.close(fig)\n\n return i1, i2, p1, std",
"def testUpperBound(self,time,accel):\n\t\tif (time - self.timestamp) > ParserSettings.TIME_DELTA:#tests lockout threshold of a flick event\n\t\t\tif accel < self.upper:#tests if flick maximum is found, relative to previous magnitude\n\t\t\t\tself.timestamp\t= time#once peak found, set appropriate data and return a flick\n\t\t\t\ttoReturn\t\t= self.upper\n\t\t\t\tself.upper\t\t= 0\n\t\t\t\treturn toReturn\n\t\t\telse:\n\t\t\t\tself.upper = accel#if no flick yet, update most recent flick to test\n\t\t\t\treturn 0\n\t\telse:\n\t\t\treturn 0",
"def extractPeak( image, nSizeX, nSizeY, nMaxSize, nMaxNbr = 5, nErrorValue = -1 ):\n blobs = []; # will contain the center of the blob and it's max value\n nSmallerMax = 0; # the max value of the smallest peak\n nSmallerIdx = -1;\n nMaxSizeSquared = nMaxSize*nMaxSize;\n for y in range( nSizeY ):\n for x in range( nSizeX ):\n# print( \"x,y: %d,%d\" % (x,y) );\n nVal = image[x+y*nSizeX];\n if( nVal != nErrorValue ):\n if( nVal > nSmallerMax ):\n # update blobs\n # find in blobs\n bFound = False; \n bUpdateSmallerMax = False;\n n = 0;\n while( n < len( blobs ) ):\n if( distSquared( blobs[n][0], blobs[n][1], x, y ) < nMaxSizeSquared ):\n # found it!\n if( nVal > blobs[n][2] ):\n # update this blobs\n blobs[n][0] = x;\n blobs[n][1] = y;\n blobs[n][2] = nVal;\n if( nSmallerMax == nVal ):\n # update smaller max\n bUpdateSmallerMax = True;\n bFound = True;\n break;\n n += 1;\n if( not bFound ):\n # create a new one\n if( len( blobs ) < nMaxNbr ):\n # create from scratch\n blobs.append( [x,y,nVal] );\n bUpdateSmallerMax = True;\n else:\n # reuse smaller\n blobs[nSmallerIdx][0] = x;\n blobs[nSmallerIdx][1] = y;\n blobs[nSmallerIdx][2] = nVal;\n bUpdateSmallerMax = True;\n \n if( bUpdateSmallerMax ):\n nSmallerMax = 0xFFFFFFF;\n for idx, blob in enumerate( blobs ):\n if( blob[2] < nSmallerMax ):\n nSmallerMax = blob[2];\n nSmallerIdx = idx;\n# print( \"blobs: %s\" % str( blobs ) );\n # if( nVal > nSmallerMax ) - end\n # if( nVal != nErrorValue ) - end\n \n # convert to fixed size\n for idx, blob in enumerate( blobs ):\n blobs[idx].append( 50-idx*10 );\n\n return blobs;",
"def find_closest_peaks(power, freqs, guess_freqs):\n # find the maxima in the power spectrum\n maxima = sig.argrelextrema(power, np.greater)\n\n maxima_freqs = np.zeros(freqs.shape)\n maxima_freqs[maxima] = freqs[maxima]\n\n # determine the peaks as the closest maxima to\n # each of the standing wave frequencies\n peak_indices = [find_nearest_idx(maxima_freqs, f) for f in guess_freqs]\n return peak_indices",
"def peakdetect_parabole(y_axis, x_axis, points = 9):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n # make the points argument odd\n points += 1 - points % 2\n #points += 1 - int(points) & 1 slower when int conversion needed\n \n # get raw peaks\n max_raw, min_raw = peakdetect_zero_crossing(y_axis)\n \n # define output variable\n max_peaks = []\n min_peaks = []\n \n max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points)\n min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points)\n \n max_peaks = map(lambda x: [x[0], x[1]], max_)\n max_fitted = map(lambda x: x[-1], max_)\n min_peaks = map(lambda x: [x[0], x[1]], min_)\n min_fitted = map(lambda x: x[-1], min_)\n \n \n #pylab.plot(x_axis, y_axis)\n #pylab.hold(True)\n #for max_p, max_f in zip(max_peaks, max_fitted):\n # pylab.plot(max_p[0], max_p[1], 'x')\n # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)\n #for min_p, min_f in zip(min_peaks, min_fitted):\n # pylab.plot(min_p[0], min_p[1], 'x')\n # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)\n #pylab.show()\n \n return [max_peaks, min_peaks]",
"def gaussianPeakHistogram(self,thresholds,norm=False,fig=None,ax=None,**kwargs):\n\n\n\t\tif not matplotlib:\n\t\t\traise ImportError(\"matplotlib is not installed, cannot plot the peak histogram!\")\n\n\t\t#Instantiate figure\n\t\tif (fig is None) or (ax is None):\n\t\t\t\n\t\t\tself.fig,self.ax = plt.subplots()\n\n\t\telse:\n\n\t\t\tself.fig = fig\n\t\t\tself.ax = ax\n\n\t\t#Compute the gradient and laplacian expectation values\n\t\tgx,gy = self.gradient()\n\t\thxx,hyy,hxy = self.hessian()\n\n\t\tsigma0 = self.data.std()\n\t\tsigma1 = np.sqrt((gx**2+gy**2).mean())\n\t\tsigma2 = np.sqrt(((hxx+hyy)**2).mean())\n\n\t\t#Compute special parameters\n\t\tg = sigma1**2 / (sigma0*sigma2)\n\t\tt = np.sqrt(2)*sigma1/sigma2\n\t\tv = 0.5*(thresholds[1:]+thresholds[:-1])\n\t\tdv = thresholds[1:] - thresholds[:-1]\n\t\tif not(norm):\n\t\t\tv /= sigma0\n\t\t\tdv /= sigma0\n\n\t\tx = v*g\n\n\t\t#Compute G,N\n\t\tG = (x**2-g**2)*(1-0.5*sp.erfc(x/np.sqrt(2*(1-g**2)))) + x*(1-g**2)*np.exp(-x**2/(2*(1-g**2)))/np.sqrt(2*np.pi*(1-g**2)) + np.exp(-x**2/(3-2*(g**2)))*(1-0.5*sp.erfc(x/np.sqrt(2*(1-g**2)*(3-2*(g**2)))))/np.sqrt(3-2*(g**2))\n\t\tN = np.exp(-0.5*(v**2))*G*dv*(self.data.shape[0]**2)/((t**2)*(2*np.pi)**1.5)\n\n\t\t#Plot the histogram\n\t\tself.ax.plot(0.5*(thresholds[1:]+thresholds[:-1]),N,**kwargs)",
"def peakFinder(self, fit_peaks_image):\n raise FittingException(\"Finder had no peakFinder() method.\")",
"def find_histogram(vol, hist, mini, maxi, mask, use_mask):\n validsize = 0\n hist = np.zeros(hist.size, dtype=int)\n if mini == maxi:\n return -1\n\n fA = float(hist.size)/(maxi-mini)\n fB = (float(hist.size)*float(-mini)) / (maxi-mini)\n\n if use_mask:\n a = vol[mask > 0.5].flatten()\n else:\n a = vol.flatten()\n\n a = (a*fA + fB).astype(int)\n h = hist.size - 1\n\n for i in np.arange(a.size):\n hist[max(0, min(a[i], h))] += 1\n validsize += 1\n\n return hist, validsize",
"def spec_to_peaks(data, value, fp = iterate_structure(generate_binary_structure(rank = 2, connectivity=2), 10)):\n\n max_arr = maximum_filter(data, footprint = fp)\n return (data == max_arr) & (data > value)",
"def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak"
]
| [
"0.6847167",
"0.65728945",
"0.65421",
"0.6399944",
"0.6385045",
"0.6322003",
"0.6320446",
"0.61476105",
"0.6143559",
"0.6129745",
"0.6124224",
"0.6073451",
"0.60435104",
"0.59944385",
"0.5978126",
"0.59742147",
"0.59578466",
"0.59361404",
"0.59117126",
"0.5847243",
"0.58335",
"0.5812471",
"0.58036673",
"0.57964945",
"0.5786717",
"0.5773241",
"0.57581484",
"0.5741395",
"0.5736823",
"0.5722156"
]
| 0.7401598 | 0 |
Make a graph of response Vs pt for given eta bin pt_var allows the user to specify which pT to bin in & plot against. Should be the name of a variable in the tree pt_max is a cut on maxmimum value of pt (applied to l1 pt to avoid including saturation effects) | def plot_rsp_pt(inputfile, outputfile, absetamin, absetamax, pt_bins, pt_var, pt_max, pu_min, pu_max):
# Input tree
tree_raw = inputfile.Get("valid")
# Output folders
output_f = outputfile.GetDirectory('eta_%g_%g' % (absetamin, absetamax))
output_f_hists = None
if not output_f:
output_f = outputfile.mkdir('eta_%g_%g' % (absetamin, absetamax))
output_f_hists = output_f.mkdir("Histograms")
else:
output_f_hists = output_f.GetDirectory("Histograms")
gr_rsp_pt = ROOT.TGraphErrors()
# Cut strings
eta_cutStr = "TMath::Abs(eta) < %f && TMath::Abs(eta) > %f" % (absetamax, absetamin)
# keep the pt < pt_max to safeguard against staurated L1 jets
pt_cutStr = "%s < %g && pt < %g" % (pt_var, pt_bins[-1], pt_max)
pu_cutStr = "numPUVertices <= %f && numPUVertices >= %f" % (pu_max, pu_min)
avoidSaturation_cut = "pt < 1023.1"
cutStr = " && ".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])
n_rsp_bins = 100
rsp_min = 0
rsp_max = 5
pt_array = array('d', pt_bins)
# First make a 2D plot
h2d_rsp_pt = ROOT.TH2D("h2d_rsp_%s_%g_%g" % (pt_var, absetamin, absetamax),
"%g < |#eta| < %g;p_{T};response" % (absetamin, absetamax),
len(pt_bins) - 1, pt_array,
n_rsp_bins, rsp_min, rsp_max)
tree_raw.Draw("rsp:%s>>h2d_rsp_%s_%g_%g" % (pt_var, pt_var, absetamin, absetamax), cutStr)
output_f_hists.WriteTObject(h2d_rsp_pt)
# Now for each pt bin, do a projection on 1D hist of response and fit a Gaussian
print pt_bins
for i, (pt_min, pt_max) in enumerate(pairwise(pt_bins)):
h_rsp = h2d_rsp_pt.ProjectionY("rsp_%s_%g_%g" % (pt_var, pt_min, pt_max), i + 1, i + 1)
print i, pt_min, pt_max
if h_rsp.Integral() <= 0:
print "No entries - skipping"
continue
# Fit with Gaussian
mean = h_rsp.GetMean()
err = h_rsp.GetMeanError()
peak = h_rsp.GetBinCenter(h_rsp.GetMaximumBin())
# if h_rsp.GetRMS() < 0.2:
# fit_result = h_rsp.Fit("gaus", "QER", "", peak - h_rsp.GetRMS(), peak + h_rsp.GetRMS())
# else:
# fit_result = h_rsp.Fit("gaus", "QER", "", peak - 0.5*h_rsp.GetRMS(), peak + 0.5*h_rsp.GetRMS())
fit_result = h_rsp.Fit("gaus", "QER", "", peak - h_rsp.GetRMS(), peak + h_rsp.GetRMS())
# fit_result = h_rsp.Fit("gaus", "QER", "", mean - h_rsp.GetRMS(), mean + h_rsp.GetRMS())
output_f_hists.WriteTObject(h_rsp)
# TODO: better check against Gaussian fit - are peaks ~ similar?
# if int(fit_result) == 0 and check_gaus_fit(h_rsp):
if int(fit_result) == 0 and abs(h_rsp.GetFunction("gaus").GetParameter(1) - peak) / peak < 0.1:
mean = h_rsp.GetFunction("gaus").GetParameter(1)
err = h_rsp.GetFunction("gaus").GetParError(1)
# Add the Gaussian to the total graph
N = gr_rsp_pt.GetN()
gr_rsp_pt.SetPoint(N, 0.5 * (pt_min + pt_max), mean)
gr_rsp_pt.SetPointError(N, 0.5 * (pt_max - pt_min), err)
else:
print "Cannot fit Gaussian in plot_rsp_pt, using raw mean instead"
# Save the graph
gr_rsp_pt.SetTitle("%g < |#eta^{L1}| < %g;p_{T}; <response> = <p_{T}^{L1}/p_{T}^{Ref}>" % (absetamin, absetamax))
gr_rsp_pt.SetName("gr_rsp_%s_eta_%g_%g" % (pt_var, absetamin, absetamax))
output_f.WriteTObject(gr_rsp_pt) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_rsp_eta(inputfile, outputfile, eta_bins, pt_min, pt_max, pt_var, pu_min, pu_max):\n\n gr_rsp_eta = ROOT.TGraphErrors()\n\n # Input tree\n tree_raw = inputfile.Get(\"valid\")\n\n # Output folders\n output_f = outputfile.GetDirectory('eta_%g_%g' % (eta_bins[0], eta_bins[-1]))\n output_f_hists = None\n if not output_f:\n output_f = outputfile.mkdir('eta_%g_%g' % (eta_bins[0], eta_bins[-1]))\n output_f_hists = output_f.mkdir(\"Histograms\")\n else:\n output_f_hists = output_f.GetDirectory(\"Histograms\")\n\n # Go through eta bins, get response hist, fit with Gaussian and add to\n # the overall graph\n for i, eta in enumerate(eta_bins[:-1]):\n absetamin = eta\n absetamax = eta_bins[i + 1] # Eta cut string\n\n # Cut strings\n eta_cutStr = \"TMath::Abs(eta) < %f && TMath::Abs(eta) > %f\" % (absetamax, absetamin)\n pt_cutStr = \"%s < %g && %s > %g\" % (pt_var, pt_max, pt_var, pt_min)\n pu_cutStr = \"numPUVertices <= %f && numPUVertices >= %f\" % (pu_max, pu_min)\n avoidSaturation_cut = \"pt < 1023.1\"\n cutStr = \" && \".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])\n print cutStr\n\n nb_rsp = 100\n rsp_min, rsp_max = 0, 5\n rsp_name = 'hrsp_eta_%g_%g_%s_%g_%g' % (absetamin, absetamax, pt_var, pt_min, pt_max)\n tree_raw.Draw(\"rsp>>%s(%d,%g,%g)\" % (rsp_name, nb_rsp, rsp_min, rsp_max), cutStr)\n h_rsp = ROOT.gROOT.FindObject(rsp_name)\n h_rsp.SetTitle(\";response (p_{T}^{L1}/p_{T}^{Ref});\")\n\n print 'Integral', h_rsp.Integral()\n\n if h_rsp.Integral() <= 0:\n print \"No entries - skipping\"\n continue\n\n # Fit with Gaussian\n peak = h_rsp.GetBinCenter(h_rsp.GetMaximumBin())\n if absetamin < 2.9:\n fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\",\n h_rsp.GetMean() - h_rsp.GetRMS(),\n h_rsp.GetMean() + h_rsp.GetRMS())\n else:\n fit_result = h_rsp.Fit(\"gaus\", \"QER\", \"\",\n peak - (0.5 * h_rsp.GetRMS()),\n peak + (0.5 * h_rsp.GetRMS()))\n\n mean = h_rsp.GetMean()\n err = h_rsp.GetMeanError()\n\n check_fit = True\n if check_fit:\n if int(fit_result) == 0:\n mean = h_rsp.GetFunction(\"gaus\").GetParameter(1)\n err = h_rsp.GetFunction(\"gaus\").GetParError(1)\n else:\n print \"cannot fit with Gaussian - using raw mean instead\"\n\n output_f_hists.WriteTObject(h_rsp)\n\n # add to graph\n N = gr_rsp_eta.GetN()\n print absetamin, \"-\", absetamax, mean, err\n gr_rsp_eta.SetPoint(N, 0.5 * (absetamin + absetamax), mean)\n gr_rsp_eta.SetPointError(N, 0.5 * (absetamax - absetamin), err)\n\n gr_rsp_eta.SetTitle(\";|#eta^{L1}|; <response> = <p_{T}^{L1}/p_{T}^{Ref}>\")\n gr_rsp_eta.SetName(\"gr_rsp_eta_%g_%g_%s_%g_%g\" % (eta_bins[0], eta_bins[-1], pt_var, pt_min, pt_max))\n output_f.WriteTObject(gr_rsp_eta)",
"def create_pt_hist(hist, binning, binning_uflow, pt_bin_edges, pt_bin_edges_uflow, variable_bin_edges):\n # THIS IS A HORRIBLE HACK BECAUSE I DIDNT FILL MY HISTS\n all_pt_bins = list(np.append(pt_bin_edges_uflow[:-1], pt_bin_edges))\n all_pt_bins.append(8000)\n # print(all_pt_bins)\n nbins_pt = len(all_pt_bins)-1\n h_new = ROOT.TH1D(\"hpt\"+cu.get_unique_str(), \"\", nbins_pt, array('d', all_pt_bins))\n for pt_ind in range(1, h_new.GetNbinsX()+1):\n this_sum = 0\n this_err_sq = 0\n this_pt = all_pt_bins[pt_ind-1]\n # ARGH THIS IS SO FRUSTRATING\n this_binning = binning if this_pt >= pt_bin_edges[0] else binning_uflow\n for var_ind, var in enumerate(variable_bin_edges[:-1]):\n bin_num = this_binning.GetGlobalBinNumber(var, this_pt)\n this_sum += hist.GetBinContent(bin_num)\n this_err_sq += hist.GetBinError(bin_num)**2\n h_new.SetBinContent(pt_ind, this_sum)\n h_new.SetBinError(pt_ind, math.sqrt(this_err_sq))\n return h_new",
"def plot_checks(inputfile, outputfile, absetamin, absetamax, max_pt, pu_min, pu_max):\n\n print \"Doing eta bin: %g - %g, max L1 jet pt: %g\" % (absetamin, absetamax, max_pt)\n\n # Input tree\n tree_raw = inputfile.Get(\"valid\")\n\n # Output folders\n output_f = outputfile.mkdir('eta_%g_%g' % (absetamin, absetamax))\n output_f_hists = output_f.mkdir(\"Histograms\")\n\n # Eta cut string\n eta_cutStr = \" TMath::Abs(eta)<%g && TMath::Abs(eta) > %g \" % (absetamax, absetamin)\n # Pt cut string\n pt_cutStr = \"pt < %g\" % max_pt\n # PU cut string\n pu_cutStr = \"numPUVertices <= %f && numPUVertices >= %f\" % (pu_max, pu_min)\n # Avoid L1 saturated jets cut (from 2017 any l1 jet with a saturated tower is auto given pt=1024GeV)\n avoidSaturation_cut = \"pt < 1023.1\"\n cutStr = \" && \".join([eta_cutStr, pt_cutStr, pu_cutStr, avoidSaturation_cut])\n\n # Draw response (pT^L1/pT^Gen) for all pt bins\n tree_raw.Draw(\"rsp>>hrsp_eta_%g_%g(100,0,5)\" % (absetamin, absetamax), cutStr)\n hrsp_eta = ROOT.gROOT.FindObject(\"hrsp_eta_%g_%g\" % (absetamin, absetamax))\n hrsp_eta.SetTitle(\";response (p_{T}^{L1}/p_{T}^{Ref});\")\n if absetamin < 2.9:\n fit_result = hrsp_eta.Fit(\"gaus\", \"QER\", \"\",\n hrsp_eta.GetMean() - hrsp_eta.GetRMS(),\n hrsp_eta.GetMean() + hrsp_eta.GetRMS())\n else:\n peak = hrsp_eta.GetBinCenter(hrsp_eta.GetMaximumBin())\n fit_result = hrsp_eta.Fit(\"gaus\", \"QER\", \"\",\n peak - (0.5 * hrsp_eta.GetRMS()),\n peak + (0.5 * hrsp_eta.GetRMS()))\n\n # mean = hrsp_eta.GetFunction(\"gaus\").GetParameter(1)\n # err = hrsp_eta.GetFunction(\"gaus\").GetParError(1)\n output_f_hists.WriteTObject(hrsp_eta)\n\n # nb_pt, pt_min, pt_max = 63, 0, 252 # for GCT/Stage 1\n nb_pt, pt_min, pt_max = 512, 0, 1024 # for Stage 2\n nb_rsp, rsp_min, rsp_max = 100, 0, 5\n\n # Draw rsp (pT^L1/pT^Gen) Vs GenJet pT\n tree_raw.Draw(\"rsp:ptRef>>h2d_rsp_gen(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)\n h2d_rsp_gen = ROOT.gROOT.FindObject(\"h2d_rsp_gen\")\n h2d_rsp_gen.SetTitle(\";p_{T}^{Ref} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})\")\n output_f_hists.WriteTObject(h2d_rsp_gen)\n\n h2d_rsp_gen_norm = cu.norm_vertical_bins(h2d_rsp_gen)\n output_f_hists.WriteTObject(h2d_rsp_gen_norm)\n\n # Draw rsp (pT^L1/pT^Gen) Vs L1 pT\n tree_raw.Draw(\"rsp:pt>>h2d_rsp_l1(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_rsp, rsp_min, rsp_max), cutStr)\n h2d_rsp_l1 = ROOT.gROOT.FindObject(\"h2d_rsp_l1\")\n h2d_rsp_l1.SetTitle(\";p_{T}^{L1} [GeV];response (p_{T}^{L1}/p_{T}^{Ref})\")\n output_f_hists.WriteTObject(h2d_rsp_l1)\n\n h2d_rsp_l1_norm = cu.norm_vertical_bins(h2d_rsp_l1)\n output_f_hists.WriteTObject(h2d_rsp_l1_norm)\n\n # Draw pT^Gen Vs pT^L1\n tree_raw.Draw(\"pt:ptRef>>h2d_gen_l1(%d,%g,%g,%d,%g,%g)\" % (nb_pt, pt_min, pt_max, nb_pt, pt_min, pt_max), cutStr)\n h2d_gen_l1 = ROOT.gROOT.FindObject(\"h2d_gen_l1\")\n h2d_gen_l1.SetTitle(\";p_{T}^{Ref} [GeV];p_{T}^{L1} [GeV]\")\n output_f_hists.WriteTObject(h2d_gen_l1)",
"def ANN_efficiency_vs_PU_pT_PV(title, x_data, pT, CSV, model_noPT, model_withPT, model_withPV, ANN_noPT_Cuts, ANN_withPT_Cuts, ANN_withPV_Cuts, Ratio_Cuts, CSV_Cuts, bins, y_max, pT_Cut=200, BG=False, DrawTitle=False, LargeLegend=False):\n assert x_data.shape[1]==21, \"x_data does not contain PV. Make sure it is made from a PU sample and has shape (x, 21).\"\n\tassert x_data.shape[0] == len(pT) == len(CSV), \"data inputs need to have the same length\"\n\tassert len(ANN_noPT_Cuts) == len(ANN_withPT_Cuts) == len(ANN_withPV_Cuts) == len(Ratio_Cuts) == len(CSV_Cuts) == len(bins)-1, \"cuts need to have the same length and be compatible with amount of bins\"\n\n ran = (0,80)\n nbins = 80\n import array\n\tif BG:\n\t\tbins_ = array.array('d',[0.0, 11.0]+range(19,41,8)+[42.0, 52.0, 80])\n\telse:\n \tbins_ = array.array('d',[0.0, 11.0]+range(15,41,4)+[42.0, 52.0, 58.0, 65.0, 80])\n\n\tif pT_Cut >= 1200:\n\t\tbins_ = array.array('d',[0.0, 20.0, 40.0, 80.0])\n\n\n #make histograms of efficiency vs PU\n AllJets_Hist = rt.TH1D(\"AllJets\",\"AllJets\",nbins,ran[0],ran[1])\n ANN_noPT_Hist = rt.TH1D(\"ANN_noPT\",\"ANN_noPT\",nbins,ran[0],ran[1])\n\tANN_withPT_Hist = rt.TH1D(\"ANN_withPT\",\"ANN_withPT\",nbins,ran[0],ran[1])\n\tANN_withPV_Hist = rt.TH1D(\"ANN_withPV\",\"ANN_withPV\",nbins,ran[0],ran[1])\n Ratio_Hist = rt.TH1D(\"Ratio\",\"Ratio\",nbins,ran[0],ran[1])\n CSV_Hist = rt.TH1D(\"CSV\",\"CSV\",nbins,ran[0],ran[1])\n\n\tAllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_noPT_Hist = ANN_noPT_Hist.Rebin(len(bins_)-1,\"ANN_noPT\",bins_)\n\tANN_withPT_Hist = ANN_withPT_Hist.Rebin(len(bins_)-1,\"ANN_withPT\",bins_)\n\tANN_withPV_Hist = ANN_withPV_Hist.Rebin(len(bins_)-1,\"ANN_withPV\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n \n\tpred_y_noPT = model_noPT.predict(ANN_functional_shape(x_data))\n\tpred_y_withPT = model_withPT.predict(ANN_functional_shape(x_data)+[pT/200.])\n\tpred_y_withPV = model_withPV.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\t\n\tfor i,pT_value in enumerate(pT):\n\t\t\tif pT_value < pT_Cut: continue\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJets_Hist.Fill(x_data[i,-1])\n\t\t\tif CSV[i] >= CSV_Cuts[bin_numbers[i]]: CSV_Hist.Fill(x_data[i,-1])\n\t if pred_y_noPT[i] >= ANN_noPT_Cuts[bin_numbers[i]]: ANN_noPT_Hist.Fill(x_data[i,-1])\n\t\t\tif pred_y_withPT[i] >= ANN_withPT_Cuts[bin_numbers[i]]: ANN_withPT_Hist.Fill(x_data[i,-1])\n\t\t\tif pred_y_withPV[i] >= ANN_withPV_Cuts[bin_numbers[i]]: ANN_withPV_Hist.Fill(x_data[i,-1])\n\n\t\t\tif x_data[i,12] != 0:\n\t\t\t\tL_R = x_data[i,15]/float(x_data[i,12])\n\t\t\t\tif L_R >= Ratio_Cuts[bin_numbers[i]]: Ratio_Hist.Fill(x_data[i,-1])\n \n\t#Make Graphs and draw them\n canvas = rt.TCanvas('canvas','canvas',600,600)\n\tif DrawTitle == False: rt.gStyle.SetOptTitle(0)\n\tif LargeLegend:\n\t\tlegend = rt.TLegend(0.1,0.9,0.4,0.7)\n\telse:\n \tlegend = rt.TLegend(0.1,0.9,0.35,0.75)\n ANN_noPT_Graph = rt.TGraphAsymmErrors()\n\tANN_withPT_Graph = rt.TGraphAsymmErrors()\n\tANN_withPV_Graph = rt.TGraphAsymmErrors()\n Ratio_Graph = rt.TGraphAsymmErrors()\n CSV_Graph = rt.TGraphAsymmErrors()\n if DrawTitle: Ratio_Graph.SetTitle(title+\"_vs_PU_pT{}{}\".format('jet',pT_Cut))\n ANN_noPT_Graph.Divide(ANN_noPT_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n\tANN_withPT_Graph.Divide(ANN_withPT_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n\tANN_withPV_Graph.Divide(ANN_withPV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n Ratio_Graph.Divide(Ratio_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n CSV_Graph.Divide(CSV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n ANN_noPT_Graph.SetLineColor(3)\n ANN_withPT_Graph.SetLineColor(6)\n\tANN_withPV_Graph.SetLineColor(7)\n\tRatio_Graph.SetLineColor(2)\n CSV_Graph.SetLineColor(4)\n #legend.AddEntry(ANN_noPT_Graph, \"ANN without p_{T}/PV\", \"LEP\")\n\tlegend.AddEntry(ANN_noPT_Graph, \"ANN without p_{T}\", \"LEP\")\n legend.AddEntry(ANN_withPT_Graph, \"ANN with p_{T}\", \"LEP\")\n\t#legend.AddEntry(ANN_withPV_Graph, \"ANN with PV\", \"LEP\")\n\tlegend.AddEntry(Ratio_Graph, \"L4/L1\", \"LEP\")\n legend.AddEntry(CSV_Graph, \"CSV\", \"LEP\")\n Ratio_Graph.GetXaxis().SetTitle(\"#PV\")\n if BG:\n\t\tRatio_Graph.GetYaxis().SetTitle('mistag rate')\n\telse:\t\n\t\tRatio_Graph.GetYaxis().SetTitle('efficiency')\n Ratio_Graph.GetYaxis().SetTitleOffset(1.5)\n\tRatio_Graph.SetMinimum(0.)\n Ratio_Graph.SetMaximum(y_max)\n Ratio_Graph.Draw()\n ANN_noPT_Graph.Draw(\"SAME\")\n\tANN_withPT_Graph.Draw(\"SAME\")\n\t#ANN_withPV_Graph.Draw(\"SAME\")\n CSV_Graph.Draw(\"SAME\")\n legend.Draw()\n canvas.SaveAs('Thesis_Plots/'+title+\"_vs_PU_pT{}{}.png\".format('jet',pT_Cut))",
"def value(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][0]",
"def ANN_efficiency_vs_PU(title, x_data, pT, CSV, model, ANN_Cuts, Ratio_Cuts, CSV_Cuts, bins, y_max, pT_Cut=200, BG=False, DrawTitle=False):\n assert x_data.shape[1]==21, \"x_data does not contain PV. Make sure it is made from a PU sample and has shape (x, 21).\"\n\tassert x_data.shape[0] == len(pT) == len(CSV), \"data inputs need to have the same length\"\n\tassert len(ANN_Cuts) == len(Ratio_Cuts) == len(CSV_Cuts) == len(bins)-1, \"cuts need to have the same length and be compatible with amount of bins\"\n\n ran = (0,80)\n nbins = 80\n import array\n\tif BG:\n\t\tbins_ = array.array('d',[0.0, 11.0]+range(19,41,8)+[42.0, 52.0, 80])\n\telse:\n \tbins_ = array.array('d',[0.0, 11.0]+range(15,41,4)+[42.0, 52.0, 58.0, 65.0, 80])\n\n\tif pT_Cut >= 1200:\n\t\tbins_ = array.array('d',[0.0, 20.0, 40.0, 80.0])\n\n\n #make histograms of efficiency vs PU\n AllJets_Hist = rt.TH1D(\"AllJets\",\"AllJets\",nbins,ran[0],ran[1])\n ANN_Hist = rt.TH1D(\"ANN\",\"ANN\",nbins,ran[0],ran[1])\n Ratio_Hist = rt.TH1D(\"Ratio\",\"Ratio\",nbins,ran[0],ran[1])\n CSV_Hist = rt.TH1D(\"CSV\",\"CSV\",nbins,ran[0],ran[1])\n\n\tAllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_Hist = ANN_Hist.Rebin(len(bins_)-1,\"ANN\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n \n\tpred_y = model.predict(ANN_functional_shape(x_data))\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\t\n\tfor i,pT_value in enumerate(pT):\n\t\t\tif pT_value < pT_Cut: continue\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJets_Hist.Fill(x_data[i,-1])\n\t\t\tif CSV[i] >= CSV_Cuts[bin_numbers[i]]: CSV_Hist.Fill(x_data[i,-1])\n\t if pred_y[i] >= ANN_Cuts[bin_numbers[i]]: ANN_Hist.Fill(x_data[i,-1])\n\t\t\tif x_data[i,12] != 0:\n\t\t\t\tL_R = x_data[i,15]/float(x_data[i,12])\n\t\t\t\tif L_R >= Ratio_Cuts[bin_numbers[i]]: Ratio_Hist.Fill(x_data[i,-1])\n\t \n\t'''\t\t\n AllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_Hist = ANN_Hist.Rebin(len(bins_)-1,\"ANN\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n\t'''\n #Make Graphs and draw them\n canvas = rt.TCanvas('canvas','canvas',600,600)\n\tif DrawTitle == False: rt.gStyle.SetOptTitle(0)\n legend = rt.TLegend(0.1,0.9,0.35,0.75)\n ANN_Graph = rt.TGraphAsymmErrors()\n Ratio_Graph = rt.TGraphAsymmErrors()\n CSV_Graph = rt.TGraphAsymmErrors()\n if DrawTitle: Ratio_Graph.SetTitle(title+\"_vs_PU_pT{}{}\".format('jet',pT_Cut))\n ANN_Graph.Divide(ANN_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n Ratio_Graph.Divide(Ratio_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n CSV_Graph.Divide(CSV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n ANN_Graph.SetLineColor(3)\n Ratio_Graph.SetLineColor(2)\n CSV_Graph.SetLineColor(4)\n legend.AddEntry(ANN_Graph, \"ANN\", \"LEP\")\n legend.AddEntry(Ratio_Graph, \"L4/L1\", \"LEP\")\n legend.AddEntry(CSV_Graph, \"CSV\", \"LEP\")\n Ratio_Graph.GetXaxis().SetTitle(\"#PV\")\n if BG:\n\t\tRatio_Graph.GetYaxis().SetTitle('mistag rate')\n\telse:\t\n\t\tRatio_Graph.GetYaxis().SetTitle('efficiency')\n Ratio_Graph.GetYaxis().SetTitleOffset(1.5)\n\tRatio_Graph.SetMinimum(0.)\n Ratio_Graph.SetMaximum(y_max)\n Ratio_Graph.Draw()\n ANN_Graph.Draw(\"SAME\")\n CSV_Graph.Draw(\"SAME\")\n legend.Draw()\n canvas.SaveAs('Thesis_Plots/'+title+\"_vs_PU_pT{}{}.png\".format('jet',pT_Cut))",
"def __ptBin(self,pt):\n if len(self._ptbins)>0:\n return reduce(lambda x,y:x+y,map(lambda x:pt>x,self._ptbins))\n else:\n return 0",
"def do_jet_pt_plots(tree, output_dir, cut=''):\n for logz, normx in product([True, False], [True, False]):\n make_2d_plot(tree, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX, 'pt', PT_L1_STR, NB_PT, PT_MIN, PT_MAX,\n os.path.join(output_dir, 'pt_httL1.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, diagonal_line=True)\n make_2d_plot(tree, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX, 'ptRef', PT_REF_STR, NB_PT, PT_MIN, PT_MAX,\n os.path.join(output_dir, 'ptRef_httL1.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, diagonal_line=True)\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'pt', PT_L1_STR, NB_PT, PT_MIN, PT_MAX,\n os.path.join(output_dir, 'pt_httRef.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, diagonal_line=True)\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'ptRef', PT_REF_STR, NB_PT, PT_MIN, PT_MAX,\n os.path.join(output_dir, 'ptRef_httRef.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, diagonal_line=True)",
"def ppk_plot(data: (List[int], List[float], pd.Series, np.array),\n upper_control_limit: (int, float), lower_control_limit: (int, float),\n threshold_percent: float = 0.001,\n ax: Axis = None):\n\n data = coerce(data)\n mean = data.mean()\n std = data.std()\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.hist(data, density=True, label='data', alpha=0.3)\n x = np.linspace(mean - 4 * std, mean + 4 * std, 100)\n pdf = stats.norm.pdf(x, mean, std)\n ax.plot(x, pdf, label='normal fit', alpha=0.7)\n\n bottom, top = ax.get_ylim()\n\n ax.axvline(mean, linestyle='--')\n ax.text(mean, top * 1.01, s='$\\mu$', ha='center')\n\n ax.axvline(mean + std, alpha=0.6, linestyle='--')\n ax.text(mean + std, top * 1.01, s='$\\sigma$', ha='center')\n\n ax.axvline(mean - std, alpha=0.6, linestyle='--')\n ax.text(mean - std, top * 1.01, s='$-\\sigma$', ha='center')\n\n ax.axvline(mean + 2 * std, alpha=0.4, linestyle='--')\n ax.text(mean + 2 * std, top * 1.01, s='$2\\sigma$', ha='center')\n\n ax.axvline(mean - 2 * std, alpha=0.4, linestyle='--')\n ax.text(mean - 2 * std, top * 1.01, s='-$2\\sigma$', ha='center')\n\n ax.axvline(mean + 3 * std, alpha=0.2, linestyle='--')\n ax.text(mean + 3 * std, top * 1.01, s='$3\\sigma$', ha='center')\n\n ax.axvline(mean - 3 * std, alpha=0.2, linestyle='--')\n ax.text(mean - 3 * std, top * 1.01, s='-$3\\sigma$', ha='center')\n\n ax.fill_between(x, pdf, where=x < lower_control_limit, facecolor='red', alpha=0.5)\n ax.fill_between(x, pdf, where=x > upper_control_limit, facecolor='red', alpha=0.5)\n\n lower_percent = 100.0 * stats.norm.cdf(lower_control_limit, mean, std)\n lower_percent_text = f'{lower_percent:.02f}% < LCL' if lower_percent > threshold_percent else None\n\n higher_percent = 100.0 - 100.0 * stats.norm.cdf(upper_control_limit, mean, std)\n higher_percent_text = f'{higher_percent:.02f}% > UCL' if higher_percent > threshold_percent else None\n\n left, right = ax.get_xlim()\n bottom, top = ax.get_ylim()\n cpk = calc_ppk(data, upper_control_limit=upper_control_limit, lower_control_limit=lower_control_limit)\n\n lower_sigma_level = (mean - lower_control_limit) / std\n if lower_sigma_level < 6.0:\n ax.axvline(lower_control_limit, color='red', alpha=0.25, label='limits')\n ax.text(lower_control_limit, top * 0.95, s=f'$-{lower_sigma_level:.01f}\\sigma$', ha='center')\n else:\n ax.text(left, top * 0.95, s=f'limit > $-6\\sigma$', ha='left')\n\n upper_sigma_level = (upper_control_limit - mean) / std\n if upper_sigma_level < 6.0:\n ax.axvline(upper_control_limit, color='red', alpha=0.25)\n ax.text(upper_control_limit, top * 0.95, s=f'${upper_sigma_level:.01f}\\sigma$', ha='center')\n else:\n ax.text(right, top * 0.95, s=f'limit > $6\\sigma$', ha='right')\n\n strings = [f'Ppk = {cpk:.02f}']\n\n strings.append(f'$\\mu = {mean:.3g}$')\n strings.append(f'$\\sigma = {std:.3g}$')\n\n if lower_percent_text:\n strings.append(lower_percent_text)\n if higher_percent_text:\n strings.append(higher_percent_text)\n\n props = dict(boxstyle='round', facecolor='white', alpha=0.75, edgecolor='grey')\n ax.text(right - (right - left) * 0.05, 0.85 * top, '\\n'.join(strings), bbox=props, ha='right', va='top')\n\n ax.legend(loc='lower right')",
"def Efficiency_vs_pT(title,histlist, hist_all_jets,y_max,Save=False,legend_shift=False,BG=False, LargeLegend=False):\n canvas = rt.TCanvas('canvas','canvas',600,600)\n if legend_shift:\n\t\tif LargeLegend:\n\t\t\t#legend = rt.TLegend(0.1,0.1,0.4,0.3)\n\t\t\tlegend = rt.TLegend(0.6,0.9,0.7,0.9)\n\t\telse:\n \t#legend = rt.TLegend(0.1,0.1,0.35,0.25)\n\t\t\tlegend = rt.TLegend(0.65,0.75,0.9,0.9)\n else:\n\t\tif LargeLegend:\n\t\t\tlegend = rt.TLegend(0.1,0.9,0.4,0.7)\n\t\telse:\n \tlegend = rt.TLegend(0.1,0.9,0.35,0.75)\n graphlist = []\n for n,hist in enumerate(histlist):\n graphlist.append(rt.TGraphAsymmErrors())\n #if n==0: graphlist[n].SetTitle(title+\"_vs_jet-pT\")\n graphlist[n].Divide(hist[0],hist_all_jets,\"cl=0.683 b(1,1) mode\")\n legend.AddEntry(graphlist[n], histlist[n][1],\"LEP\")\n\t\tif len(hist) > 2:\n\t\t\tgraphlist[n].SetLineColor(hist[2])\n\t\telse:\n\t\t\tif n < 3:\n \t \tgraphlist[n].SetLineColor(n+2)\n\t\t\telse:\n\t\t\t\tgraphlist[n].SetLineColor(n+3)\n if n<1:\n graphlist[n].GetXaxis().SetTitle(\"jet p_{T} (GeV)\")\n if BG:\n\t\t\t\tgraphlist[n].GetYaxis().SetTitle('mistag rate')\n\t\t\telse:\n\t\t\t\tgraphlist[n].GetYaxis().SetTitle('efficiency')\n graphlist[n].GetYaxis().SetTitleOffset(1.5)\n graphlist[n].SetMinimum(0.)\n graphlist[n].SetMaximum(y_max)\n graphlist[n].Draw()\n else:\n graphlist[n].Draw(\"SAME\") \n legend.Draw()\n if Save: canvas.SaveAs(\"Thesis_Plots/\"+title+\"_vs_jet-pT.png\")",
"def ANN_Make_Binned_ROC_histograms(title,model, x_data, pT, CSV, bins, PU_range='full',addFeature=False):\n nbins = 60\n\n ANN_hist_list = []\n CSV_hist_list = []\n for bin_ in range(len(bins)-1):\n ANN_hist_list.append(rt.TH1D(\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n\n\tif addFeature == False:\n\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\telif addFeature == \"pT\":\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\telif addFeature == \"PV\":\n\t\tassert x_data.shape[1] == 21, \"wrong x_data shape: PV cannot be found\"\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\telse:\n\t\tprint \"invalid feature selection\"\n\t\treturn None\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\n for n,particle in enumerate(x_data):\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n if bin_numbers[n] == -100: continue\n ANN_hist_list[int(bin_numbers[n])].Fill(pred_y[n])\n CSV_hist_list[int(bin_numbers[n])].Fill(CSV[n])\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in ANN_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)",
"def create_paa_plot(res, paa, window):\n #TODO: plot x,y,z,total infeatures one figure\n\n original_data = res.valuefeatures\n paa_data = np.array(paa.vafeatureslue)\n \n original_len = len(original_data)\n a = 0\n if not original_len % window == 0:\n a = 1\n \n print(len(original_data))\n print(len(paa_data))\n plt.plot(figsize=(12,8))\n plt.plot(np.arange(original_len), original_data, 'o-', label='Original')\n plt.plot(np.arange(window // 2,\n (original_len + window // 2),\n window)-a, paa_data.T, 'o--', label='PAA')\n plt.vlines(np.arange(0, original_len, window),\n original_data.min(), original_data.max(), color='g', linestyles='--', linewidth=0.5)\n plt.legend(loc='best', fontsize=14)\n plt.show()",
"def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]",
"def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return",
"def paper_plot_final_tstats(self, ftstat, indexes, freq=None, params = None, er = None, domain = None, yrange = None, label= None, lande=None,\n xi_range=None, s_range=None, pred=None, compare=None, pos =None):\n\n if pos is None:\n pos = 'both'\n\n multiply_se_by = 1.96\n\n if label is None:\n label = ftstat\n\n no_undertext = False\n no_legend = False\n # if ftstat == '2a_frac_fixed_scaled_per_mut_input':\n # no_legend = True\n\n possible_compare_stats = ['2a_frac_fixed_scaled_per_mut_input']\n\n\n if compare is None:\n compare = False\n if pos != 'both':\n compare = False\n\n if er is None:\n er = True\n if freq is None:\n freq = False\n\n if freq:\n loglinear = False\n else:\n loglinear = True\n\n if s_range is None:\n sr = False\n else:\n sr = True\n if xi_range is None:\n xir = False\n else:\n xir = True\n if pred is None:\n pred = False\n legend_with_dclass_param = False\n if params is not None:\n legend_with_dclass_param = True\n\n data_classes = [self.data_classes[indi] for indi in indexes]\n\n if lande is None:\n if data_classes[0]._THERE_ARE_FINAL_TRAJ_NON_LANDE:\n lande = False\n else:\n lande = True\n\n only_lande = False\n for dc in data_classes:\n if 'U' not in dc.param_dict:\n only_lande = True\n lande = True\n #if there are no nonlande trajectories then we must use the Lande ones\n if not data_classes[0]._THERE_ARE_FINAL_TRAJ_NON_LANDE:\n only_lande = True\n lande = True\n\n if only_lande:\n undertext_params = [['N', 'Vs'], ['shift_s0', 'sigma_0_del']]\n else:\n undertext_params = [['N', 'U'], ['shift_s0', 'sigma_0_del'], ['E2Ns', 'V2Ns']]\n\n if ftstat not in data_classes[0]._ftstats:\n print(str(ftstat) + ' not in dataclass ')\n return\n\n if freq or ftstat not in possible_compare_stats:# or len(data_classes) >1:\n compare = False\n\n\n plot_dict = dict()\n plotspecs = dict()\n plotspecs['legend_anchor'] = 'upper left'\n plotspecs['legend_loc'] = (1.02, 1.03)\n plotspecs['fsize'] = (28, 16)\n plotspecs['dpi'] = 200\n plotspecs['linewidth'] = 1.5\n plotspecs['ticksize'] = 30\n plotspecs['legend_font'] = {'size': '54'}\n plotspecs['axis_font'] = {'fontname': 'Arial', 'size': '28'}\n plot_dict['linestyles'] = ['-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-']\n plotspecs['marker_size'] = 15\n plotspecs['cap_size'] = 20\n #plotspecs['nxticks'] = 2\n plotspecs['undertext_font'] = {'color': 'black', 'weight': 'roman', 'size': '16'}\n extra_text = ''\n\n #print groupings\n\n\n if not no_undertext and len(data_classes)==1:\n undertext = []\n final = True\n #str(int(data_class.number_population_simulations()))\n if not lande and not only_lande:\n number_runs_string = \"Obtained from \" + str(int(data_classes[0].number_allele_pairs(False,final=final))) + \\\n \" allele traj's using NonLande D(t) averaged over \" \\\n +str(int(data_classes[0].number_population_simulations()))\\\n + \" population sims with parameters:\"\n elif lande and only_lande:\n number_runs_string = \"Obtained from \" + str(int(data_classes[0].number_allele_pairs(True, final=final))) + \\\n \" allele traj's for each effect size, with Lande D(t) and parameters:\"\n elif lande and not only_lande:\n number_runs_string1st = \"NonLande obtained from \" + str(int(data_classes[0].number_allele_pairs(False,final=final))) + \\\n \" allele traj's with NonLande D(t) averaged over \" \\\n +str(int(data_classes[0].number_population_simulations()))+ \" population sims\"\n undertext.append(number_runs_string1st)\n number_runs_string = \"Lande obtained from \" + str(int(data_classes[0].number_allele_pairs(True, final=final))) + \\\n \" allele traj's for each effect size, with corresponding Lande D(t)\"\n undertext.append(number_runs_string)\n for listi in undertext_params:\n text_list = self._plot_text(index_list=[indexes[0]],params_list=listi)\n if text_list:\n text_string = ', '.join(text_list)\n undertext.append(text_string )\n plot_dict['undertext'] = undertext#data_class.plot_text()\n\n if not no_legend:\n if len(data_classes) ==1:\n if compare:\n if ftstat == '2a_frac_fixed_scaled_per_mut_input':\n if not only_lande:\n anal_name = r\"$ (1+A)\\cdot f(a)$\"\n else:\n anal_name = r\"$ f(a)$\"\n else:\n anal_name ='Analytic'\n plot_dict['ynames'] = ['Simulations', anal_name]\n if lande and not only_lande:\n plot_dict['ynames'] = ['NonLande', 'lande']\n if compare:\n plot_dict['ynames'] = ['NonLande', 'lande','Analytic']\n else:\n if lande and not only_lande:\n plot_dict['groupings_labels_within'] = ['NonLande', 'lande']\n if compare:\n plot_dict['groupings_labels_within'] = ['NonLande', 'lande','Analytic']\n if compare:\n plot_dict['groupings_labels_within'] = ['Simulations', 'Analytic']\n\n if freq:\n if pred:\n plot_dict['xlabel'] = r'$x_i $ percentile'\n else:\n plot_dict['xlabel'] = r'$x_i$'\n else:\n plot_dict['xlabel'] = \"Effect size squared (\" + r\"$S=a^2$\" + ')'\n if pred:\n if not legend_with_dclass_param:\n plot_dict['legend_title'] = 'Percentile of \\n initial frequency_pos'\n\n\n\n savedir = os.path.join(self.base_dir, 'ftstats')\n if pred:\n savedir = os.path.join(savedir, 'pred')\n if freq:\n savedir = os.path.join(savedir, 'S_sorted')\n else:\n savedir = os.path.join(savedir, 'XI_sorted')\n\n if pos == 'both':\n label = 'dpn_' + label\n savedir = os.path.join(savedir, 'combined')\n elif pos == 'pos':\n label ='p_' + label\n savedir = os.path.join(savedir,'positives')\n else:\n label = 'n_' + label\n savedir = os.path.join(savedir, 'negatives')\n\n\n if domain is None:\n if not freq:\n epsl = 0.05\n epsh =2\n domain = [0.1-epsl,100+epsh ]\n\n plot_dict['domain']= domain\n plot_dict['yrange'] = yrange\n\n plot_dict['ylabel'] = self.name_class.yname(ftstat)\n\n if ftstat == 'x_per_seg_var' or ftstat == 'x_per_seg_var':\n #plot_dict['ylabel'] = 'Average increased frequency of\\n aligned alleles'\n if pos == 'both':\n plot_dict['ylabel'] = 'Increased fixation prob of aligned\\n alleles per seg variant'\n else:\n plot_dict['ylabel'] = 'Average fixation probability'\n\n\n plot_dict['marker'] = True\n if compare and (not lande or only_lande):\n if len(data_classes) == 1:\n plot_dict['colors'] = ['deepskyblue','black']\n\n\n ts_string = '_' +ftstat\n _mylabel = ts_string\n\n\n less = False\n\n x = []\n y = []\n yer = []\n ynames = []\n\n #maybe\n x_other = []\n y_other = []\n yer_other = []\n ynames_other = []\n\n\n\n tstati = ftstat\n\n\n for data_class in data_classes:\n\n\n if 'var_0' in data_class.param_dict:\n var_0 = data_class.param_dict['var_0']\n else:\n var_0 = data_class.param_dict['sigma_0'] ** 2\n N = data_class.param_dict['N']\n Vs = data_class.param_dict['Vs']\n var_0_delta_square = var_0 * 2 * N / float(Vs)\n sig_0_del = np.sqrt(var_0_delta_square)\n\n if 'shift_s0' in data_class.param_dict:\n D_sig_0 = data_class.param_dict['shift_s0']\n D_del = sig_0_del * D_sig_0\n\n x_1 = defaultdict(list)\n y_1 = defaultdict(list)\n yer_1 = defaultdict(list)\n ynames_1 = dict()\n\n x_theory_1 = []\n y_theory_1 = []\n\n if lande and not only_lande:\n x_1_other = defaultdict(list)\n y_1_other = defaultdict(list)\n yer_1_other = defaultdict(list)\n ynames_1_other = dict()\n\n\n triples = deepcopy(data_class.tuples)\n if xir:\n remov = []\n xilow = xi_range[0]\n xihigh = xi_range[1]\n for trip in triples:\n if pred:\n XPI = data_class.xpercentile_dict[trip]\n else:\n XPI = trip[1]\n if XPI < xilow or XPI > xihigh:\n remov.append(trip)\n for trip in remov:\n triples.remove(trip)\n if sr:\n #print 's_range', s_range\n remov = []\n slow = s_range[0]\n shigh = s_range[1]\n for trip in triples:\n s = trip[0]\n if s < slow or s > shigh:\n remov.append(trip)\n for trip in remov:\n triples.remove(trip)\n\n name = ''\n lib = ''\n if params is None:\n # if len(data_classes) > 1:\n # name += str(data_class.index)\n lib += str(data_class.index)\n else:\n for param in params:\n try:\n name += param + ' = ' + '{0:.2f}'.format(data_class.param_dict[param]) + ' '\n lib += param + '{0:.0f}'.format(data_class.param_dict[param]) + '_'\n except KeyError:\n print('KeyError: ' + param)\n\n\n stat_dict = data_class.read_tfstats(tuples=triples, lande=only_lande)\n\n if lande and not only_lande:\n stat_dict_other = data_class.read_tfstats(tuples=triples, lande=True)\n print('lande stat dict')\n\n\n for triple in triples:\n\n name_1 = ''\n\n\n y_val_2 = stat_dict[triple][tstati][pos]['mean']\n y_val_er_2 = stat_dict[triple][tstati][pos]['se']\n if y_val_2 == 0.0:\n print('yval is zero for S ', triple[0])\n if lande and not only_lande:\n y_val_2_other = stat_dict_other[triple][tstati][pos]['mean']\n y_val_er_2_other = stat_dict_other[triple][tstati][pos]['se']\n XI = triple[1]\n if XI < 0:\n less = True\n XI = -XI\n else:\n less = False\n S = triple[0]\n if freq:\n lenformat = 1\n if pred and not less:\n val = data_class.xpercentile_dict[triple]\n else:\n val = XI\n # if root_s:\n # name_1 = r'$\\sqrt{S} =$ '\n # else:\n # name_1 = r'$S =$ '\n if legend_with_dclass_param:\n key = data_class.param_dict[params[0]]\n else:\n key = S\n else:\n #key =XI\n val = S\n lenformat = 3\n if xir:\n if xi_range[1] < 1:\n lenformat = 2\n if pred:\n key = data_class.xpercentile_dict[triple]\n if key < 0.91:\n key = round(key,1)\n elif key <0.991:\n key = round(key,2)\n else:\n key = round(key,3)\n if key < 0:\n if legend_with_dclass_param:\n key = data_class.param_dict[params[0]]\n else:\n key = -key\n # name_1 = ''\n else:\n if legend_with_dclass_param:\n key = data_class.param_dict[params[0]]\n else:\n key = XI\n # name_1 = ''\n if less:\n lenformat =0\n x_1[key].append(val)\n y_1[key].append(y_val_2)\n\n if lande and not only_lande:\n x_1_other[key].append(val)\n y_1_other[key].append(y_val_2_other)\n\n if er:\n yer_1[key].append(y_val_er_2*multiply_se_by)\n if lande and not only_lande:\n yer_1_other[key].append(y_val_er_2_other*multiply_se_by)\n if legend_with_dclass_param:\n name_1 += self.name_class.param_text(value=key, digits=2)\n else:\n name_1 += self.name_class.param_text(value=key, digits=lenformat) + ' ' + name\n\n\n name_2_lande = 'lande: '+ name_1\n\n if name_1 not in ynames_1:\n ynames_1[key] = name_1\n if lande and not only_lande:\n if name_2_lande not in ynames_1_other:\n ynames_1_other[key] = name_2_lande\n\n x.append(x_1)\n y.append(y_1)\n yer.append(yer_1)\n ynames.append(ynames_1)\n if lande and not only_lande:\n x_other.append(x_1_other)\n y_other.append(y_1_other)\n yer_other.append(yer_1_other)\n ynames_other.append(ynames_1_other)\n\n\n plot_dict['savedir'] = savedir\n\n if compare:\n kkey = list(x_1.keys())[0]\n smin = min(x_1[kkey])\n smax = max(x_1[kkey])\n xtheorylowers = [xx for xx in np.linspace(smin, smin+1, 50)]\n xtheoryhighers = [xx for xx in np.linspace(smin+1, smax, 100)]\n x_theory = xtheorylowers+xtheoryhighers[1:]\n s_theory = [ssi for ssi in x_theory]\n y_theory = [0 for _ in x_theory]\n if ftstat == '2a_frac_fixed_scaled_per_mut_input':\n if pos == 'both':\n myA = 0\n if not only_lande:\n myA = data_class.get_A()\n y_theory = [(1.0+myA)*myf(np.sqrt(ss)) for ss\n in s_theory]\n\n x_theory_1.append(x_theory)\n y_theory_1.append(y_theory)\n\n _mylabel = _mylabel + lib\n\n if len(data_classes)>1:\n if less:\n if not freq:\n plot_dict['legend_title'] = r'Shift (units $\\sigma_0$)'\n if legend_with_dclass_param:\n plot_dict['legend_title'] = self.name_class.param_text(name=params[0])\n\n x_2 =[]\n y_2 =[]\n yer_2 = []\n ynames_2 = []\n jj = 0\n groupings = []\n keys_list = []\n # for k in xrange(len(x)):\n # keys_list+=x[k].keys()\n # keys_list = list(set(keys_list))\n\n for key in sorted(x[0]):\n seti = set()\n for k in range(len(x)):\n if er:\n zipi = list(zip(*sorted(zip(x[k][key], y[k][key], yer[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n yer_temp = zipi[2]\n yer_2.append(yer_temp)\n else:\n zipi = list(zip(*sorted(zip(x[k][key], y[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n x_2.append(x_temp)\n y_2.append(y_temp)\n ynames_2.append(ynames[k][key])\n seti.add(jj)\n jj += 1\n\n if lande and not only_lande:\n if er:\n zipi = list(zip(*sorted(zip(x_other[k][key], y_other[k][key], yer_other[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n yer_temp = zipi[2]\n yer_2.append(yer_temp)\n else:\n zipi = list(zip(*sorted(zip(x_other[k][key], y_other[k][key]))))\n x_temp = [xi for xi in zipi[0]]\n y_temp = [yi for yi in zipi[1]]\n x_2.append(x_temp)\n y_2.append(y_temp)\n ynames_2.append(ynames_other[k][key])\n seti.add(jj)\n jj += 1\n\n if compare:\n if x_theory_1:\n x_2.append(x_theory_1[k])\n y_2.append(y_theory_1[k])\n seti.add(jj)\n jj += 1\n if er:\n yer_2.append([0 for _ in x_theory_1[k]])\n\n groupings.append(seti)\n\n\n plot_dict['x'] = x_2\n plot_dict['y'] = y_2\n if er:\n plot_dict['yer'] = yer_2\n\n #experimenting\n # if len(data_classes) >1:\n # plot_dict['groupings'] = groupings\n plot_dict['groupings'] = groupings\n\n\n linestyles =['','-']\n markerstyles = ['o','']\n if lande and not only_lande:\n markerstyles = ['o', '*','-']\n linestyles = ['','','']\n if compare:\n linestyles = ['','-','']\n markerstyles =['o','','*']\n if lande and not only_lande:\n linestyles = ['', '-','--']\n markerstyles = ['o', '','']\n\n plot_dict['linestyles'] = linestyles #+linestyles+linestyles+linestyles\n plot_dict['markerstyles'] = markerstyles #+markerstyles+markerstyles+markerstyles\n\n size_group = len(groupings[0])\n if label is None:\n if len(_mylabel) <30:\n plot_dict['label'] = _mylabel +'_many_cl'\n else:\n plot_dict['label'] = ftstat\n else:\n plot_dict['label'] = label #ftstat +'_' + label\n\n if freq:\n plot_dict['label']+= '_xi_x'\n\n\n if pred:\n plot_dict['label'] += '_pred'\n\n if compare:\n plot_dict['label'] += '_comp'\n\n\n\n #List with [text_top, text_bottom] containing relevant parameters\n # if len(data_classes) == 1:\n # if extra_text:\n # undertext.append(extra_text)\n #\n # if not no_undertext:\n # plot_dict['undertext'] = undertext\n\n if len(data_classes)>1:\n plot_dict['ynames'] = ynames_2\n\n if loglinear:\n plotspecs['xlog'] = True\n\n\n plot_dict['plotspecs'] = plotspecs\n\n plot_many_y(**plot_dict)",
"def pr_box(eta=1, name=False):\n outcomes = list(product([0, 1], repeat=4))\n pmf = [((1 + eta) / 16 if (x * y == a ^ b) else (1 - eta) / 16) for x, y, a, b in outcomes]\n pr = Distribution(outcomes, pmf)\n\n if name:\n pr.set_rv_names(\"XYAB\")\n\n return pr",
"def plot_tttdprc(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"distance\",\n y_var=\"totTT %\",\n label_var=\"mpr\",\n pivot=\"flow\",\n x_label=\"Distance [m]\",\n y_label=r\"Change in Total TT [\\%]\",\n t_label=\"Flow [veh/h]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt",
"def make_eta(eta_step_bucket, npart, hist_rule=\"square-root\"):\n\n lowbound = np.min(eta_step_bucket)\n upbound = np.max(eta_step_bucket) + 1e-10\n pts = len(eta_step_bucket)\n if hist_rule == \"square-root\":\n hist_num = int(np.sqrt(pts))\n elif hist_rule == \"sturges\":\n hist_num = int(np.log2(pts)) + 1\n elif hist_rule == \"rice-rule\":\n hist_num = int(2 * pts ** (1 / 3))\n eta_hist = np.zeros(hist_num)\n eta_hist, bins = np.histogram(\n np.array(eta_step_bucket), bins=np.linspace(lowbound, upbound, num=hist_num + 1)\n )\n # plt.figure()\n # _=plt.hist(np.array(eta_step_bucket),bins=np.linspace(lowbound, upbound, num=hist_num+1))\n # plt.title('Input eta histogram')\n eta_hist = eta_hist / np.sum(eta_hist)\n\n # make cdf\n eta_cdf = np.zeros(eta_hist.shape)\n eta_cdf[0] = eta_hist[0]\n for j in range(1, hist_num):\n eta_cdf[j] = eta_hist[j] + eta_cdf[j - 1]\n eta_cdf = np.concatenate((np.zeros(1), eta_cdf))\n\n # make eta\n x = np.random.rand(npart)\n eta_sampled = np.interp(x, eta_cdf, bins)\n # plt.figure()\n # _=plt.hist(eta_sampled,bins=np.linspace(lowbound, upbound, num=hist_num+1))\n # plt.title('Sampled eta histogram')\n return eta_sampled",
"def getHistPta(self, name, **kwargs):\n hists = []\n format_string = \"{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[jetpT]:02d}TrkPt{0[trkpT]:02d}\"\n\n for i in range(self._range[0], self._range[1]):\n if \"dir\" in kwargs:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": kwargs[\"dir\"],\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"jetpT\": i,\n \"trkpT\": j,\n }\n )\n ).Clone()\n for j in range(0, 11)\n ] # Get jT histograms from file an array\n else:\n hist = [\n self._f.Get(\n format_string.format(\n {\n \"dir\": self._directory,\n \"histname\": name,\n \"NFin\": self._NFIN,\n \"jetpT\": i,\n \"trkpT\": j,\n }\n )\n ).Clone()\n for j in range(0, 11)\n ] # Get jT histograms from file an array\n hists.append(hist)\n # print('{0[dir]}/{0[histname]}/{0[histname]}NFin{0[NFin]:02d}JetPt{0[pT]:02d}'.format({'dir':self._directory, 'histname':name,'NFin':self._NFIN,'pT':1}))\n\n # Get Jet Pt bins\n jetPt = parse_jet_pt_bins(hist)\n\n # Get Track pt Bins\n trkPt = parse_jet_pt_bins(search=\"constituent\")\n\n # print(len(hist))\n # print(hist)\n # print(jetPt)\n for hist, N, bgN in zip(hists, self._measN, self._measBgN):\n for h in hist:\n h.Sumw2()\n # print(\"Rebinning {} by {} in set {} that has {} bins\".format(h.GetTitle(), self._rebin, self._name, h.GetNbinsX()))\n h.Rebin(self._rebin)\n print(kwargs)\n if self.properties.get(\"isWeight\", False):\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n else:\n if kwargs.get(\"isBg\", False):\n h.SetLineColor(self.properties.get(\"color\", 1) + 1)\n h.SetMarkerColor(self.properties.get(\"color\", 1) + 1)\n h.Scale(1.0 / bgN, \"width\")\n print(\"{} is bg\".format(name))\n else:\n h.SetLineColor(self.properties.get(\"color\", 1))\n h.SetMarkerColor(self.properties.get(\"color\", 1))\n h.Scale(1.0 / N, \"width\")\n\n h.SetMarkerStyle(self.properties.get(\"style\", 24))\n h.SetMarkerSize(0.5)\n h.SetLineColor(1)\n\n if kwargs.get(\"jetpt\", False):\n return hist, jetPt, trkPt\n else:\n return hist",
"def pospdf(self, neurons='all', dim='y', edges=None, nbins=10, stats=False, labels=True,\n a=None, figsize=(7.5, 6.5)):\n if neurons == 'all':\n neurons = list(self.alln.values())\n elif neurons == 'quiet':\n neurons = list(self.qn.values())\n elif neurons == 'active':\n neurons = list(self.n.values())\n dimi = {'x':0, 'y':1}[dim]\n p = [ n.pos[dimi] for n in neurons ] # all position values\n if edges != None:\n nbins = len(edges) - 1\n bins = edges # assume it includes rightmost bin edge\n else:\n nbins = max(nbins, 2*intround(np.sqrt(self.nneurons)))\n bins = nbins\n n, p = np.histogram(p, bins=bins) # p includes rightmost bin edge\n binwidth = p[1] - p[0] # take width of first bin in p\n\n if stats:\n mean = np.mean(p)\n median = np.median(p)\n argmode = n.argmax()\n mode = p[argmode] + binwidth / 2 # middle of tallest bin\n stdev = np.std(p)\n\n if a == None:\n f = pl.figure(figsize=figsize)\n a = f.add_subplot(111)\n else: # add to existing axes\n a.hold(True)\n f = pl.gcf()\n\n # use CCWHITEDICT1 for familiarity with len 10 1-based id to colour mapping\n #color = CCWHITEDICT1[int(self.id)]\n color = 'k'\n\n # exclude rightmost bin edge in p\n a.bar(left=p[:-1], height=n, width=binwidth, bottom=0, color=color, ec=color)\n titlestr = lastcmd()\n gcfm().window.setWindowTitle(titlestr)\n if labels:\n a.set_title(titlestr)\n a.set_xlabel('neuron %s position (um)' % dim)\n a.set_ylabel('neuron count')\n\n if stats:\n # add stuff to top right of plot:\n uns = get_ipython().user_ns\n a.text(0.99, 0.99, 'mean = %.3f\\n'\n 'median = %.3f\\n'\n 'mode = %.3f\\n'\n 'stdev = %.3f\\n'\n 'minrate = %.2f Hz\\n'\n 'nneurons = %d\\n'\n 'dt = %d min'\n % (mean, median, mode, stdev,\n uns['MINRATE'], self.nneurons, intround(self.dtmin)),\n transform = a.transAxes,\n horizontalalignment='right',\n verticalalignment='top')\n f.tight_layout(pad=0.3) # crop figure to contents\n f.canvas.draw() # this is needed if a != None when passed as arg\n return a",
"def plot_historical_var(port_returns, var_p, num_plot_points):\n\n # create a numpy array of the bins to use for plotting the Historical VaR, based on the maximum and minimum values\n # of the portfolio returns, and the number of plot points to include\n bins = np.linspace(np.sort(port_returns)[0], np.sort(port_returns)[-1], num_plot_points)\n\n figure = plt.figure()\n axis = figure.add_subplot(111)\n\n axis.hist(np.sort(port_returns), bins, label='Distribution of Returns')\n axis.axvline(x=var_p, ymin=0, color='r', label='Historical VaR cutoff value')\n axis.legend(loc='upper left')\n axis.set_xlabel('Daily Returns')\n axis.set_ylabel('Frequency')\n axis.set_title('Frequency vs Daily Returns')\n\n return figure",
"def testTempsPfunc(nMax, p, nomMethode, fois = 10, **kwargs):\r\n n = np.linspace(nMax / 10, nMax, 10, dtype = int)\r\n res = np.zeros(n.shape)\r\n progressBar = ProgressBar(maxValue = n.size * fois)\r\n \r\n for ni in range(n.size):\r\n for f in range(fois):\r\n progressBar.update(ni * fois + f + 1)\r\n graphe = Graphe(nbSommets = n[ni], probaArete = p(n[ni]))\r\n start = t.process_time()\r\n getattr(graphe, nomMethode)(**kwargs)\r\n res[ni] += t.process_time() - start\r\n print(\"\")\r\n \r\n return res / fois, n",
"def p_plot(data,pv_index=0,alpha=0.05):\n ####if it's a pd.dataframe, rename to col header\n if isinstance(data, pd.DataFrame):\n if isinstance(pv_index, int):\n pv_index = data.columns.get_values()[pv_index]\n data =data.rename(columns ={pv_index: \"p_value\"})\n if not (np.issubdtype(data['p_value'].dtypes, np.number)):\n raise TypeError(\"Please ensure you have specified the column index of numeric p-values.\")\n ###or make a vector a pd.dataframe\n else:\n data = pd.DataFrame({\"p_value\": data})\n \n if (data[\"p_value\"].max()> 1) or (data[\"p_value\"].max()< 0):\n raise ProbabilityError(\"One or more p-values is not between 0 and 1!\")\n \n m = len(data['p_value'])\n\n data = data.sort_values('p_value',ascending=True)\n data['rank'] = np.arange(1,len(data['p_value'])+1)\n data['critical_value'] = data['rank']*alpha/m\n\n fig = plt.clf()\n plt.scatter(data['rank'],data['p_value'],color='black')\n plt.axhline(y=alpha,label='Bonferroni')\n plt.plot(data['rank'],data['critical_value'],label='BH',color='red')\n plt.legend()\n plt.title(\"Bonferroni vs BH\")\n plt.xlabel(\"Rank\")\n plt.ylabel(\"p(k)\")\n return fig",
"def comp_vel(p1=database['K+'], p2=database['pi+'], p3=database['p+'], pmin=0, pmax=80):\r\n p_range = np.linspace(pmin, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n m3 = p3.mass\r\n v1, v2, v3 = [], [], []\r\n for p in p_range:\r\n v1.append(c*beta(p, m1))\r\n v2.append(c*beta(p, m2))\r\n v3.append(c*beta(p, m3))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n p1_name = r'K$^+$'\r\n p2_name = r'$\\pi^+$'\r\n p3_name = r'p$^+$'\r\n ax.plot(p_range, v1, 'r', label=p1_name)\r\n ax.plot(p_range, v2, 'b', label=p2_name)\r\n ax.plot(p_range, v3, 'g', label=p3_name)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'v / $ms^{-1}$', fontsize=20)\r\n ax.axvline(75, color='k', label='p = 75 GeV')\r\n ax.set_xticks(np.arange(pmin, pmax+1, 1))\r\n ax.set_xticklabels(np.arange(pmin, pmax+1, 1))\r\n ax.grid()\r\n ax.minorticks_on()\r\n ax.set_xlim(pmin, pmax)\r\n# ax.set_ylim(np.min(v1+v2))\r\n ax.legend(fontsize=20)\r\n plt.show\r\n return",
"def __init__(self,ptbins,etabins,data=None):\n self._ptbins = ptbins\n self._etabins = etabins\n if data is not None:\n self._data = data\n else:\n self._data = [ [ (0,0) for i in range(len(self._etabins)+1) ] for i in range(len(self._ptbins)+1) ]\n self.__check()",
"def ptpresionagua(self,prof_pt): #getter que halla la presion de poros en un punto\r\n p_agua=0.0\r\n if prof_pt<self.n_fret:\r\n p_agua=0.0\r\n pass\r\n else:\r\n p_agua=(prof_pt-self.n_fret)*self.gamma_h20\r\n return p_agua",
"def plot_VTx_variance(self, ax):\n V = self.V\n A = self.A\n b = self.b\n Ax = self.Ax\n x_ls_no = solve(A,b)\n x_ls = solve(A,Ax)\n\n ax.plot(dot(V.T, x_ls), 'r-', label='clean', lw=2.0)\n ax.plot(dot(V.T, x_ls_no), 'ko-', label='noisy')\n ax.set_xlabel(r'$i$')\n ax.set_title(r'$\\vec{v}_i^T \\vec{x}_{LS}$')\n ax.grid()\n leg = ax.legend(loc='upper center')\n leg.get_frame().set_alpha(0.5)",
"def plot_effective_beta(t, recalled_ctx, ctx, ctx_test_env, ax=None):\n if ax is None:\n ax = plt.gca()\n\n ax.set_prop_cycle('color', sns.color_palette(\"husl\", ctx_test_env.n))\n y = np.sum(recalled_ctx * ctx, axis=1)\n for i in range(1, ctx_test_env.n):\n sel = (t > i) & (t <= i + 1)\n ax.plot(t[sel], y[sel])\n\n ax.axhline(y=ctx_test_env.beta, c='k', ls='--')\n ax.set_xlabel(r\"Time $t/\\mathrm{s}$\")\n ax.set_ylabel(r\"$\\beta'$\")\n ax.set_yticks([0, ctx_test_env.beta, 1])",
"def plot_historical_var(port_returns, var_p, num_plot_points):\n\n # sort the array of the portfolio returns in ascending order\n sorted_returns = sorted(port_returns, reverse=False)\n\n # create a numpy array of the bins to use for plotting the Historical VaR, based on the maximum and minimum values\n # of the portfolio returns, and the number of plot points to include\n bins = np.linspace(sorted_returns[0], sorted_returns[-1], num_plot_points)\n\n plt.hist(port_returns, bins, label='Distribution of Returns')\n plt.axvline(x=var_p, ymin=0, color='r', label='Historical VaR cutoff point')\n plt.legend(loc='upper left')\n plt.xlabel('Daily Returns')\n plt.ylabel('Frequency')\n plt.title('Frequency vs Daily Returns')\n plt.show()",
"def plotvar(x, y, var, weights, NBINS = 70, title = '', targetdir = '.'):\n bins = np.linspace(np.percentile(x, 0.5), np.percentile(x, 99), NBINS)\n plot_reweight_result(x, y, bins, weights, title = title, xlabel = var)\n plt.savefig(f'{targetdir}/{var}.pdf', bbox_inches='tight')\n plt.close()"
]
| [
"0.73201025",
"0.62735224",
"0.60859984",
"0.5668642",
"0.5492072",
"0.513802",
"0.49961162",
"0.49354777",
"0.48562816",
"0.4804888",
"0.4779778",
"0.47651786",
"0.4745427",
"0.47441676",
"0.47035292",
"0.46883026",
"0.4685897",
"0.46459523",
"0.46423784",
"0.46298364",
"0.46218714",
"0.46069688",
"0.4603755",
"0.46015462",
"0.45963266",
"0.45896795",
"0.45835245",
"0.4542574",
"0.45411295",
"0.45398447"
]
| 0.7351307 | 0 |
A loading pattern that just prints '.' to the terminal | def default_loading_pattern(stream_writer: Optional[StreamWriter] = None, loading_pattern_rate: float = 0.5) -> None:
stream_writer = stream_writer or StreamWriter(sys.stderr)
stream_writer.write_str(".")
stream_writer.flush()
sleep(loading_pattern_rate) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loading(delay):\r\n\r\n for i in range(3):\r\n\r\n print \".\",\r\n sys.stdout.flush()\r\n time.sleep(delay)\r\n\r\n print(\"\")",
"def dot():\n print_message(\".\")",
"def dot():\n\n sys.stdout.write('.')\n sys.stdout.flush()",
"def animate():\n for c in itertools.cycle(['|', '/', '-', '\\\\']):\n if done:\n break\n sys.stdout.write('\\rLoading ' + c)\n sys.stdout.flush()\n time.sleep(0.1)\n #prefer sys.stdout instead of print for continuously updating\n #the Loading animation",
"def print_dot(interval):\n global _dot_counter\n b = False\n with _dot_counter.get_lock():\n _dot_counter.value += 1\n b = _dot_counter.value % interval == 0\n if b:\n print(\".\", end='')\n sys.stdout.flush()",
"def intro_dots():\n i = 0\n time.sleep(0.6)\n while i != 3:\n color.write(\".\")\n time.sleep(0.3)\n i += 1\n print(\"\")",
"def init_dot_format():\n ifaint.add_format(_load_dot, None, \"GraphViz dot\", \"dot\")",
"def printDot(system,out):\n print_header(out)\n #printVariablesDot(system,out)\n printRulesDot(system,out)\n print_footer(out)",
"def dotted_format(cls):\n\n data_to_write = \".\" + \"\\n.\".join(Settings.domains)\n\n print(\"Generation of %s\" % Settings.dotted_file, end=\" \")\n Helpers.File(Settings.dotted_file).write(data_to_write, overwrite=True)\n print(Settings.done)",
"def animate():\n for c in itertools.cycle(['|', '/', '-', '\\\\']):\n if done:\n break\n sys.stdout.write('\\rloading ' + c)\n sys.stdout.flush()\n time.sleep(0.1)\n sys.stdout.write('\\rDone! ')",
"def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n # print line[-1]\n return line + \" .\"",
"def escape_dot(s):\n\treturn s. \\\n\t\treplace(\"{\", \"\\\\{\").\\\n\t\treplace(\"}\", \"\\\\}\").\\\n\t\treplace(\"\\n\", \"\").\\\n\t\treplace(\"\\r\", \"\")",
"def dot_parser(self, start_index, line1, text, index):\n if text[index].startswith('*'):\n # if start_index > 5: start_index = 3\n print '-'.rjust(start_index) + line1.lstrip('.')\n return start_index\n line1_index_count = len(line1) - len(line1.lstrip('.'))\n j = index\n while not text[j].startswith('.'):\n if j == len(text) - 1:\n print '-'.rjust(start_index), line1.lstrip('.')\n print '-'.rjust(start_index - 1), text[j]\n exit(0)\n j += 1\n line2_index_count = len(text[j]) - len(text[j].lstrip('.'))\n\n if line1_index_count >= line2_index_count:\n print '-'.rjust(start_index) + line1.lstrip('.')\n else:\n print '+'.rjust(start_index) + line1.lstrip('.')\n start_index = start_index + 1\n\n return start_index",
"def is_dot(f):\n return f.startswith('.')",
"def part_1():\n print(\"You finally get out of the forest\")\n time.sleep(1)\n print(\"You see a giant frost spider in the distance\")\n print(r\"\"\"\n (\n )\n (\n /\\ .-\" \"-. /\\\n //\\\\/ ,,, \\//\\\\\n |/\\| ,;;;;;, |/\\|\n //\\\\\\;-\" \"-;///\\\\\n // \\/ . \\/ \\\\\n (| ,-_| \\ | / |_-, |)\n //`__\\.-.-./__`\\\\\n // /.-(() ())-.\\ \\\\\n (\\ |) '---' (| /)\n ` (| |) `\n \\) (/)\"\"\")",
"def load(fname, objname=None):\r\n if not objname:\r\n objname = fname.split(\".\")[0]\r\n return f'\\ncmd.load(\"{fname}\", \"{objname}\")'",
"def load_dotted(name):\n components = name.split('.')\n path = [components.pop(0)]\n obj = __import__(path[0])\n while components:\n comp = components.pop(0)\n path.append(comp)\n try:\n obj = getattr(obj, comp)\n except AttributeError:\n __import__('.'.join(path))\n try:\n obj = getattr(obj, comp)\n except AttributeError:\n raise ImportError('.'.join(path))\n\n return obj",
"def _read_dot(self):\n if self._next_char() != '.':\n raise SyntaxError(self.build_error_string(\"Unknown token '.'\"))\n return TOKEN_INTERVAL",
"def print_dot(obj,out,system,parentname):\n for class_ in type(obj).mro():\n if class_ in _registered_handler.keys():\n handler = _registered_handler[class_]\n return handler(obj,out,system,parentname)\n return \"\"",
"def print_dotted_line(width=72):\n print('-' * width)",
"def dots(result):\n sys.stdout.write(CHARACTER[result])\n sys.stdout.flush()",
"def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line == \"\":\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \" .\"",
"def dot_printname(self):\n return self.printname.split('/')[0].replace('-', '_')",
"def __initialize(name: str, period: bool=False) -> str:\n if period:\n return f\"{'.'.join([n[0] for n in name.split(' ')])}.\"\n return ''.join([n[0] for n in name.split(' ')])",
"def fill_dots(message):\r\n length = len(message)\r\n power = int(np.ceil(np.log2(length)))\r\n return message + (\".\" * (2**power - length))",
"def __init__(self, filename):\n self.message = \"\"\"\\\n-- EMPTY MODULE --------------------------------------------- {0}\n\nI ran into something unexpected when parsing your code!\n\n\nI am looking for one of the following things:\n\n a definition or type annotation\n a port declaration\n a type declaration\n an import\n an infix declaration\n whitespace\"\"\".format(filename)\n super().__init__(self.message)",
"def replace_dot(data):\n data = re.sub(\"[.]\", \" . \", data)\n return data",
"def __str__(self):\n c = self\n ans = \"[\"\n while c:\n ans += \".\"\n c = c.next\n return ans + \"]\"",
"def show(self):\n f = open('/tmp/dotty', 'w')\n f.write(self.dot())\n f.close()\n os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif')\n os.system('eog /tmp/dotty.gif')",
"def sanitize_dot(func):\n return str(func).replace(\"::\", \"\\\\\")"
]
| [
"0.63691044",
"0.6325998",
"0.63179594",
"0.5760462",
"0.57490486",
"0.57462436",
"0.55643755",
"0.5487015",
"0.5447725",
"0.53625304",
"0.5229726",
"0.52093977",
"0.51824206",
"0.5179551",
"0.5173894",
"0.5169528",
"0.5149935",
"0.5124151",
"0.5117392",
"0.5044035",
"0.49932796",
"0.497996",
"0.49574506",
"0.49278054",
"0.48911536",
"0.48802602",
"0.48769015",
"0.4843443",
"0.48165554",
"0.48118412"
]
| 0.639237 | 0 |
Wrapper for Popen to asynchronously invoke a subprocess while printing a given pattern until the subprocess is complete. If the log level is lower than INFO, stream the process stdout instead. | def invoke_subprocess_with_loading_pattern(
command_args: Dict[str, Any],
loading_pattern: Callable[[StreamWriter], None] = default_loading_pattern,
stream_writer: Optional[StreamWriter] = None,
) -> Optional[Union[str, bytes]]:
stream_writer = stream_writer or StreamWriter(sys.stderr)
process_output = ""
process_stderr = ""
# Default stdout to PIPE if not specified so
# that output isn't printed along with dots
if not command_args.get("stdout"):
command_args["stdout"] = PIPE
if not command_args.get("stderr"):
command_args["stderr"] = PIPE
try:
keep_printing = LOG.getEffectiveLevel() >= logging.INFO
def _print_loading_pattern():
while keep_printing:
loading_pattern(stream_writer)
# Popen is async as opposed to run so we can print while we wait
with Popen(**command_args) as process:
with ThreadPoolExecutor() as executor:
executor.submit(_print_loading_pattern)
if process.stdout:
# Logging level is DEBUG, streaming logs instead
# we read from subprocess stdout to avoid the deadlock process.wait function
# for more detail check this python bug https://bugs.python.org/issue1256
for line in process.stdout:
decoded_line = _check_and_process_bytes(line)
if LOG.getEffectiveLevel() < logging.INFO:
LOG.debug(decoded_line)
process_output += decoded_line
if process.stderr:
for line in process.stderr:
# Since we typically log standard error back, we preserve
# the whitespace so that it is formatted correctly
decoded_line = _check_and_process_bytes(line, preserve_whitespace=True)
process_stderr += decoded_line
return_code = process.wait()
keep_printing = False
stream_writer.write_str(os.linesep)
stream_writer.flush()
if return_code:
raise LoadingPatternError(
f"The process {command_args.get('args', [])} returned a "
f"non-zero exit code {process.returncode}.\n{process_stderr}"
)
except (OSError, ValueError) as e:
raise LoadingPatternError(f"Subprocess execution failed {command_args.get('args', [])}. {e}") from e
return process_output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()",
"def _dumpStdout(self, p, outputCallback):\n while p.poll() is None:\n try:\n # May raise IOError if in non-blocking mode\n l = p.stdout.read()\n outputCallback(l)\n except IOError:\n pass\n time.sleep(0.1)\n outputCallback(p.stdout.read())",
"def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n ),\n }\n # Unfortunately, stdout and stderr are not synchronised with each other.\n # This makes capturing both for real-time processing useless. So it is\n # currently all captured under stdout. Even more unfortunately, stderr\n # comes through first before stdout. This means writes that are made first\n # to stdout will not be first through the pipe if there is stderr output.\n #\n # This lack of sychronisation between stdout and stderr output makes\n # real-time display useless because they aren't captured and passed\n # through to the handler as they are encountered.\n #\n # Worse still, there appear to be issues with subprocess output capture on\n # Windows.\n #\n # A proper resolution would be to provide a custom subprocess module but\n # since the common usage does not require real-time capture of\n # stdout/stderr, this is not worth the effort. Manually running whatever\n # was intended for the subprocess outside ttt is the only recourse.\n #\n for thread in threads.values():\n thread.start()\n\n stdout = []\n stderr = []\n while threads:\n try:\n item = io_q.get(True, 1)\n except queue.Empty:\n if process.poll() is not None:\n break\n else:\n outstream, message = item\n if message == \"EXIT\":\n threads[outstream].join()\n del threads[outstream]\n else:\n message = message.rstrip(os.linesep)\n channel = sys.stdout if outstream == \"stdout\" else sys.stderr\n (stdout if outstream == \"stdout\" else stderr).append(message)\n if line_handler is not None:\n line_handler(channel, message)\n else:\n channel.write(message)\n channel.flush()\n\n for t in threads.values():\n t.join()\n process.wait()\n return (process.returncode, stdout, stderr)",
"def call_output(*popenargs, **kwargs):\n\n def create_process(*popenargs, **kwargs):\n return subprocess.Popen(*popenargs, **kwargs)\n\n if \"stdout\" in kwargs:\n raise ValueError(\"stdout argument not allowed, it will be overridden.\")\n if \"stdin\" in kwargs:\n raise ValueError(\"stdin argument not allowed, it will be overridden.\")\n\n kwargs[\"stdin\"] = subprocess.PIPE\n line_handler = kwargs.pop(\"listener\", None)\n\n with create_process(\n *popenargs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs\n ) as process:\n return run(process, line_handler)",
"def execute_command(cmd):\n popen = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdout = b''\n while True: # Save output to youtube_stdout while this being echoed\n tmp = popen.stdout.read(1)\n stdout += tmp\n _print(tmp, end=\"\")\n sys.stdout.flush()\n # do it until the process finish and there isn't output\n if tmp == b\"\" and popen.poll() is not None:\n break",
"async def checked_run(*cmd):\n\n # Start the subprocess.\n logging.info('Running: %s', expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n chunks = []\n while True:\n chunk = await p.stdout.read(16 * 1024)\n if not chunk:\n break\n chunks.append(chunk)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n stdout = b''.join(chunks).decode()[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, expand_cmd_str(cmd), stdout))\n\n return stdout",
"def cmd(*args, **kwargs):\n cmd_s = ' '.join(args)\n print('+ {}'.format(cmd_s))\n proc = subprocess.Popen(cmd_s, shell=True, stdout=subprocess.PIPE, **kwargs)\n for line in iter(proc.stdout.readline, ''):\n sys.stdout.write('> {}'.format(line))\n while proc.poll() is None:\n time.sleep(0.5)\n if proc.returncode != 0:\n raise CmdError(cmd_s, proc.returncode)",
"def _run_subprocess(cmd: List[str], args: List[str], env: Optional[Dict[str, str]] = None):\n async def _read_output(stream, logger_instance):\n \"\"\"Read output from command and print it into the right logger.\"\"\"\n while True:\n line = await stream.readline()\n if line == b'':\n break\n logger_instance(line.decode('utf-8').rstrip())\n\n async def _stream_subprocess(cmd, args, env):\n \"\"\"Run subprocess.\"\"\"\n cmd_ = ' '.join(cmd)\n args_ = ' '.join(args)\n process = await asyncio.create_subprocess_shell(f'{cmd_} {args_}',\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=env)\n\n await asyncio.wait([\n _read_output(process.stdout, logger.info),\n _read_output(process.stderr, logger.error)\n ])\n await process.wait()\n if process.returncode is None or process.returncode != 0:\n raise ValueError('Task failed!')\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(_stream_subprocess(cmd, args, env))",
"def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output",
"def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers",
"def execute(command, stdout, stderr=sys.stdout):\n # Does tail work to watch stdout to logging service?\n proc = subprocess.Popen(\n command, shell=True, stdout=stdout, stderr=stderr)\n proc.wait()",
"def subproc(self,line):\n self.set_stdout()\n proc = subprocess.Popen(line.split(),stdout=self.stdout)\n proc.wait() #ensures that the subprocess executes and terminates before returning to the shell",
"def _get_pattern_from_output(proc, pattern, retries=5, wait=1):\n m = None\n err_str = ''\n while m is None and retries:\n sleep(wait)\n try:\n out, err = proc.communicate(timeout=5)\n err_str += err\n m = re.search(pattern, err)\n except Exception:\n print(\"Communicate timed out...\")\n continue\n finally:\n retries -= 1\n return m, err_str",
"def bash(cmd, prnt=True, wait=True):\n p = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)\n if wait:\n p.wait()\n while True and prnt:\n line = p.stdout.readline()\n if line:\n print(line)\n else:\n break\n\n return (p)",
"def run(command, params={}):\n pass_params = {'stdout': PIPE, 'stderr': PIPE}\n pass_params.update(params)\n process = Popen(command, **pass_params)\n System.log_subprocess_output(process)",
"def _progressive_log(msg):\n # first clear the line\n sys.stdout.write(80 * ' ' + '\\r')\n sys.stdout.flush()\n sys.stdout.write(msg+'\\r')\n sys.stdout.flush()",
"def check_output(*args, **kwargs):\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.STDOUT\n\n p = subprocess.Popen(*args, **kwargs)\n\n try:\n while p.poll() is None:\n time.sleep(0.002)\n return p.poll(), p.stdout.read().decode('utf-8', 'ignore')\n finally:\n if p.poll() is None: # pragma: no cover\n p.kill()",
"def run_command(cmd, print_output=True):\n def enqueue_output(out, queue):\n for line in iter(out.readline, b''):\n queue.put(line.decode(\"utf-8\"))\n out.close()\n\n print(\" -> {}\".format(cmd))\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n q_stdout = Queue()\n q_stderr = Queue()\n t_stdout = Thread(target=enqueue_output, args=(proc.stdout, q_stdout))\n t_stderr = Thread(target=enqueue_output, args=(proc.stderr, q_stderr))\n t_stderr.daemon = True # thread dies with the program\n t_stdout.daemon = True\n t_stdout.start()\n t_stderr.start()\n stdout = \"\"\n stderr = \"\"\n\n # read stdout and stderr without blocking\n finished = False\n while True:\n done = proc.poll()\n try:\n line_stdout = \"\"\n while True:\n line_stdout += q_stdout.get(timeout=0.01)\n except Empty:\n pass\n # accumilate stdout and print if we should\n stdout += line_stdout\n if print_output and line_stdout != \"\":\n sys.stdout.write(bcolors.COLOR_CYAN)\n for line in line_stdout.splitlines():\n sys.stdout.write(\"\\t{}\\n\".format(line))\n sys.stdout.write(bcolors.COLOR_NC)\n sys.stdout.flush()\n\n try:\n line_stderr = \"\"\n while True:\n line_stderr += q_stderr.get(timeout=0.01)\n except Empty:\n pass\n # accumilate stderr and print if we should\n stderr += line_stderr\n if print_output and line_stderr != \"\":\n sys.stderr.write(bcolors.COLOR_RED)\n for line in line_stderr.splitlines():\n sys.stderr.write(\"\\t{}\\n\".format(line))\n sys.stderr.write(bcolors.COLOR_NC)\n sys.stderr.flush()\n\n # check if we're done and the finished flag is set\n if finished:\n if done != 0 and print_output is False:\n sys.stderr.write(bcolors.COLOR_RED)\n for line in stderr.splitlines():\n sys.stderr.write(\"\\t{}\\n\".format(line))\n sys.stderr.write(bcolors.COLOR_NC)\n sys.stderr.flush()\n\n return stdout, stderr, done\n\n # check if the process is done...\n if done is not None:\n finished = True\n # give the process's stdout and stderr time to flush\n time.sleep(0.25)",
"async def read_console(self):\n while self.proc is not None and self.proc.poll() is None:\n line = await self.loop.run_in_executor(None, self.proc.stdout.readline) # Async readline\n # Parse the command output and get the time in epoch format\n match = re.match(r'\\[([0-9]{2}):([0-9]{2}):([0-9]{2})\\] \\[([^][]*)\\]: (.*)$', line.decode())\n if match is None:\n return\n h, m, s, log, text = match.groups()\n local = time.localtime()\n if h == 23 and local.tm_hour == 0: # In case a line from 23:59 gets parsed at 00:00\n local = time.localtime(time.time()-3600)\n log_t = list(local)\n log_t[3:6] = map(int, (h, m, s))\n log_time = time.mktime(tuple(log_t))\n self.loop.create_task(self.on_line(log_time, log, text))",
"async def run_cmd_async(\n cmd: Union[List[str], str],\n cwd: Optional[Union[str, Path]] = None,\n env: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n redirect: Optional[Union[str, Path, TextIOWrapper]] = None,\n) -> str:\n if isinstance(cmd, str):\n args = shlex.split(cmd)\n else:\n args = cmd\n cwd = cwd or Path.cwd()\n env = env or os.environ.copy()\n log.info(\"Calling asynchronously: %s\", \" \".join(args))\n try:\n proc = await create_subprocess_exec(\n *args,\n stdout=PIPE,\n stderr=STDOUT, # combine stdout,stderr streams\n cwd=cwd,\n env=env,\n )\n stdout, _ = await wait_for(proc.communicate(), timeout=timeout)\n await proc.wait()\n except subprocess.CalledProcessError as err:\n handle_output(err.stdout.decode(\"utf-8\"), redirect)\n raise\n return handle_output(stdout.decode(\"utf-8\"), redirect)",
"def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()",
"def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors",
"def _launch_command(args, out_cb, err_cb, done=None, **kwargs):\n\n def pump_stream(callback, stream):\n \"\"\"Pump the stream\"\"\"\n for line in stream:\n callback(line)\n callback(None)\n\n def joiner():\n \"\"\"Wait for streams to finish, then call done callback\"\"\"\n for th in threads:\n th.join()\n done(process)\n\n kwargs = kwargs.copy()\n in_data = kwargs.get(\"input\")\n if \"input\" in kwargs:\n del kwargs[\"input\"]\n assert kwargs.get(\"stdin\") is None, kwargs[\"stdin\"]\n kwargs[\"stdin\"] = PIPE\n elif \"stdin\" not in kwargs:\n kwargs[\"stdin\"] = DEVNULL\n kwargs.setdefault(\"stdout\", PIPE)\n kwargs.setdefault(\"stderr\", PIPE)\n kwargs[\"universal_newlines\"] = True # Text streams, not byte streams\n process = Popen(args, **kwargs)\n threads = []\n if process.stdout:\n thread = Thread(\n target=pump_stream, args=(out_cb, process.stdout), daemon=True\n )\n thread.start()\n threads.append(thread)\n if process.stderr:\n thread = Thread(\n target=pump_stream, args=(err_cb, process.stderr), daemon=True\n )\n thread.start()\n threads.append(thread)\n if done and threads:\n Thread(target=joiner, daemon=True).start()\n if in_data:\n process.stdin.write(str(in_data, \"utf-8\"))\n process.stdin.close()\n return process",
"async def checked_run(cmd, env=None):\n\n # Start the subprocess.\n logging.info('Running: %s', await expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, env=env,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n lines = []\n while True:\n line = await p.stdout.readline()\n if not line:\n break\n line = line.decode()[:-1]\n lines.append(line)\n logging.info(line)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n output = '\\n'.join(lines)[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, await expand_cmd_str(cmd), output))\n\n return output",
"def spawn(stdout, command, **options):\n # grab arguments that we care about\n stderr = options.pop('stderr', None)\n daemon = options.pop('daemon', True)\n\n # empty out the first generator result if a coroutine is passed\n if hasattr(stdout, 'send'):\n res = six.next(stdout)\n res and P.write(res)\n if hasattr(stderr, 'send'):\n res = six.next(stderr)\n res and P.write(res)\n\n # spawn the sub-process\n return process(command, stdout=stdout, stderr=stderr, **options)",
"def _logging_subprocess(self):\n\n # Setup logging for logging subprocess\n setproctitle('flowbber - logging manager')\n\n # # Level\n level = self.LEVELS.get(self._verbosity, logging.DEBUG)\n\n # # Format\n if level != logging.DEBUG:\n format_tpl = self.FORMAT\n else:\n format_tpl = self.FORMAT_DEBUG\n formatter = ColoredFormatter(fmt=format_tpl, style='{')\n\n # # Handler\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n # # Configure baisc logging\n logging.basicConfig(handlers=[handler], level=level)\n\n # Start listening for logs and prints\n listener = QueueListener(self._log_queue, handler)\n listener.start()",
"def Popen(self, args, **kwargs):\n # Invoke subprocess.check_output\n if self.command.verbosity >= 2:\n print(\">>> {cmdline}\".format(\n cmdline=' '.join(shlex.quote(arg) for arg in args)\n ))\n\n return self._subprocess.Popen(\n [\n str(arg) for arg in args\n ],\n **self.final_kwargs(**kwargs)\n )",
"def runCommand(command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1)\n for line in p.stdout:\n print (line.decode(\"utf-8\"),end=\"\") # the end=\"\" argument to print prevents unwanted newlines after each line\n p.wait()",
"def _log_output(config: Configuration, stderr: IO, signal: RunningSignal):\n if config.verbose > 0:\n logger = logging.getLogger(\"ffmpeg\")\n while signal.running():\n try:\n line = _readline(stderr)\n if line != '':\n logger.info(line)\n except:\n pass\n logger.debug(\"Logging thread ended\")",
"def _read_thread(proc, ready_event):\n ready = False\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n\n if output_lines is not None:\n output_lines.append(line)\n\n if not ready and indicator in line:\n ready = True\n ready_event.set()"
]
| [
"0.5985993",
"0.58647025",
"0.57697845",
"0.5740654",
"0.5580448",
"0.5571762",
"0.5541012",
"0.551508",
"0.54873276",
"0.54817224",
"0.5456601",
"0.5406513",
"0.53827673",
"0.53788185",
"0.53719383",
"0.5362436",
"0.53304255",
"0.5301103",
"0.52810025",
"0.52699345",
"0.526385",
"0.52560246",
"0.5239223",
"0.52359205",
"0.522493",
"0.5214521",
"0.521084",
"0.5198735",
"0.5128271",
"0.50858545"
]
| 0.6803606 | 0 |
convert the naslib representation to Genotype | def convert_naslib_to_genotype(naslib_object):
ops_to_genotype = {
"Identity": "skip_connect",
"FactorizedReduce": "skip_connect",
"SepConv3x3": "sep_conv_3x3",
"DilConv3x3": "dil_conv_3x3",
"SepConv5x5": "sep_conv_5x5",
"DilConv5x5": "dil_conv_5x5",
"AvgPool": "avg_pool_3x3",
"MaxPool": "max_pool_3x3",
"Zero": "zero",
}
cells = [
get_cell_of_type(naslib_object, "normal_cell"),
get_cell_of_type(naslib_object, "reduction_cell"),
]
converted_cells = []
for cell in cells:
edge_op_dict = {
(i, j): ops_to_genotype[cell.edges[i, j]["op"].get_op_name]
for i, j in cell.edges
}
op_edge_list = [
(edge_op_dict[(i, j)], i - 1)
for i, j in sorted(edge_op_dict, key=lambda x: x[1])
if j < 7
]
converted_cells.append(op_edge_list)
return Genotype(
normal=converted_cells[0],
normal_concat=[2, 3, 4, 5],
reduce=converted_cells[1],
reduce_concat=[2, 3, 4, 5],
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fromgenotype(self):\n\t\tpass",
"def convert_genotype_to_naslib(genotype, naslib_object):\n genotype_to_ops = {\n \"skip_connect\": (\"Identity\", \"FactorizedReduce\"),\n \"sep_conv_3x3\": \"SepConv3x3\",\n \"dil_conv_3x3\": \"DilConv3x3\",\n \"sep_conv_5x5\": \"SepConv5x5\",\n \"dil_conv_5x5\": \"DilConv5x5\",\n \"avg_pool_3x3\": \"AvgPool\",\n \"max_pool_3x3\": \"MaxPool\",\n # \"zero\": (\"Zero\"),\n }\n cell_names = [\"normal_cell\", \"reduction_cell\"]\n\n # create a dictionary of edges to ops in the genotype\n edge_op_dict = {\"normal_cell\": {}, \"reduction_cell\": {}}\n for c, cell_type in enumerate([\"normal\", \"reduce\"]):\n cell = eval(\"genotype.\" + cell_type)\n tail = 2\n for i, edge in enumerate(cell):\n if i % 2 == 0:\n tail += 1\n head = edge[1] + 1\n edge_op_dict[cell_names[c]][(head, tail)] = genotype_to_ops[edge[0]]\n\n def add_genotype_op_index(edge):\n # function that adds the op index from genotype to each edge, and deletes the rest\n if (edge.head, edge.tail) in edge_op_dict[edge.data.cell_name]:\n for i, op in enumerate(edge.data.op):\n if (\n op.get_op_name\n in edge_op_dict[edge.data.cell_name][(edge.head, edge.tail)]\n ):\n index = i\n break\n edge.data.set(\"op_index\", index, shared=True)\n else:\n edge.data.delete()\n\n def update_ops(edge):\n # function that replaces the primitive ops at the edges with the ones from genotype\n if isinstance(edge.data.op, list):\n primitives = edge.data.op\n else:\n primitives = edge.data.primitives\n\n edge.data.set(\"op\", primitives[edge.data.op_index])\n edge.data.set(\"primitives\", primitives) # store for later use\n\n naslib_object.update_edges(\n add_genotype_op_index,\n scope=naslib_object.OPTIMIZER_SCOPE,\n private_edge_data=False,\n )\n\n naslib_object.update_edges(\n update_ops, scope=naslib_object.OPTIMIZER_SCOPE, private_edge_data=True\n )",
"def test_genbank_to_genome_taxonomy(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '3702',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n ('result', result)\n ref = result[0]['genome_ref']\n self.assertTrue(ref, 'Genome ref exists')\n info = result[0]['genome_info']\n typ = info[2]\n self.assertTrue(typ.startswith('KBaseGenomes.Genome'))\n info_details = info[-1]\n self.assertEqual(info_details['Taxonomy'], (\n \"cellular organisms;Eukaryota;Viridiplantae;\"\n \"Streptophyta;Streptophytina;Embryophyta;Tracheophyta;\"\n \"Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;\"\n \"eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;\"\n \"Brassicales;Brassicaceae;Camelineae;Arabidopsis\"\n ))\n self.assertEqual(info_details['Size'], '697724')\n self.assertEqual(info_details['Source'], 'Genbank')\n self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis')\n self.assertEqual(info_details['GC content'], '0.22479')\n self.assertEqual(info_details['Genetic code'], '11')\n self.assertEqual(info_details['Number of Genome Level Warnings'], '1')\n self.assertEqual(info_details['Source ID'], 'BA000021')\n self.assertEqual(info_details['Number of Protein Encoding Genes'], '20')\n self.assertEqual(info_details['Domain'], 'Eukaryota')\n self.assertTrue(info_details['Assembly Object'])\n self.assertEqual(info_details['Number contigs'], '1')\n self.assertEqual(info_details['Number of CDS'], '20')\n self.assertTrue(info_details['MD5'])",
"def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)",
"def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes",
"def genotype(self, arch):\n backbone_r, neck_r = arch\n return (backbone_r.genotype, neck_r.genotype if neck_r is not None else None)",
"def genotypes(self):\n return self.data.genotypes.values",
"def toGenomeRepresentation(self):\n s = \"\"\n s += str(self.axiom)\n s += \"||\"+str(self.niterations) # The iterations must be shown as well\n for prod in self.productions:\n s += \"||\"\n s += prod.toGenomeRepresentation()\n return s",
"def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene",
"def test_to_rna(self):\n r = self.DNA(\"UCA\")\n self.assertEqual(str(r), \"TCA\")\n self.assertEqual(str(r.to_rna()), \"UCA\")",
"def convertToSpectroGram(self):",
"def genotype(self):\n\t\tgenotype = \"\"\n\t\tfields = vars(self)\n\t\tfor name, field in fields.items():\n\t\t\tif isinstance(field, Pattern):\n\t\t\t\tgenotype += field.genotype()\n\t\t\telse:\n\t\t\t\tgenotype += str(field)\n\t\t\tgenotype += \"\\0\"\n\n\t\treturn genotype",
"def _get_genotypes(self, samples, records, switch):\n\n variant = np.zeros(len(samples))\n for idx, sample in enumerate(samples):\n try:\n gt = records.genotype(sample)['GT']\n except IndexError:\n print(\"something went wrong with:\")\n print('sample:', sample, 'variant:', records, '-- set value to missing')\n gt = '.'\n if gt == '.':\n gt = 0\n else:\n gt = re.split('\\||/', gt)\n gt = list(map(int, gt))\n variant[idx] = np.sum(gt)\n if switch:\n variant = np.abs(variant - 2)\n return variant",
"def _raw_to_string(self, dtype, units='Angstrom', atom_format=None, ghost_format=None, width=17, prec=12):\n\n #molrec = self.to_dict(force_units=units, np_out=True)\n molrec = self.to_dict(np_out=True)\n smol = molparse.to_string(molrec,\n dtype=dtype,\n units=units,\n atom_format=atom_format,\n ghost_format=ghost_format,\n width=width,\n prec=prec)\n return smol",
"def _extract_genotype(geno_field):\n # Assume the genotype is the first format field and raise if it's not\n geno = geno_field.split(':')[0]\n if not GENO_REGEX.search(geno):\n raise ValueError('\"{}\" does not look like a genotype'.format(geno))\n return geno",
"def test_to_phylip(self):\n s = self.SequenceClass(\"ACG\", name=\"xyz\")\n self.assertEqual(s.to_phylip(), \"xyz\" + \" \" * 27 + \"ACG\")",
"def convert_config_to_genotype(config):\n base_string = \"NetworkSelectorDatasetInfo:darts:\"\n genotype = []\n for i, cell_type in enumerate([\"normal\", \"reduce\"]):\n genotype.append([])\n\n start = 0\n n = 2\n for node_idx in range(4):\n end = start + n\n # print(start, end)\n for j in range(start, end):\n key = \"NetworkSelectorDatasetInfo:darts:edge_{}_{}\".format(cell_type, j)\n if key in config:\n genotype[i].append((config[key], j - start))\n\n if len(genotype[i]) != 2 * (node_idx + 1):\n print(\"this is not a valid darts arch\")\n return config\n\n start = end\n n += 1\n\n return Genotype(\n normal=genotype[0],\n normal_concat=[2, 3, 4, 5],\n reduce=genotype[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def convert_compact_to_genotype(compact):\n OPS = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\",\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n ]\n genotype = []\n\n for i in range(2):\n cell = compact[i]\n genotype.append([])\n\n for j in range(8):\n genotype[i].append((OPS[cell[j][1]], cell[j][0]))\n\n return Genotype(\n normal=genotype[0],\n normal_concat=[2, 3, 4, 5],\n reduce=genotype[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def to_orb(self):\n node_id = int(self.idd)\n node_type = GLOB.gmplsTypes.NODETYPE_UNKNOWN\n if type(self.typee) == str:\n node_type = GLOB.gmplsTypes.NODETYPE_NETWORK\n\n node_orb = GLOB.gmplsTypes.nodeIdent(node_id, node_type)\n return node_orb",
"def all_genotype(ploidy):\n return [\"\".join(comb) for comb in cwr(\"ACGT-\", ploidy)]",
"def genotype(rsid):\n if rsid[0] == 'I' or rsid[0] == 'i':\n return { 'error': 'Cannot find indicators, must use rs #s'}\n soup = BeautifulSoup(urllib.urlopen('http://snpedia.com/index.php/Special:Browse/' + rsid).read())\n trows = soup('table')[1].find_all('tr')\n if len(trows) < 2:\n return { 'error': 'That rsid does not have any data/does not exist.' }\n locations = getLocations(soup)\n genotypeData = getData(locations, soup)\n genotypeData['rsid'] = rsid\n return genotypeData",
"def to_motevo(self):\n m = \"//\\n\"\n m += \"NA {}\\n\".format(self.id)\n m += \"P0\\tA\\tC\\tG\\tT\\n\"\n for i, row in enumerate(self.pfm):\n m += \"{}\\t{}\\n\".format(i, \"\\t\".join([str(int(x)) for x in row]))\n m += \"//\"\n return m",
"def to_rna(self):\n return self._nucleotids.replace(\"T\", \"U\")",
"def _process_genotypes(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'genotype'))\n logger.info(\"building labels for genotypes\")\n geno = Genotype(g)\n fly_tax = 'NCBITaxon:7227'\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (genotype_num, uniquename, description, name) = line\n\n # if self.testMode is True:\n # if int(object_key) not in self.test_keys.get('genotype'):\n # continue\n\n # add the internal genotype to pub mapping\n genotype_id = 'MONARCH:FBgeno'+str(genotype_num)\n self.idhash['genotype'][genotype_num] = genotype_id\n\n if description == '':\n description = None\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode and \\\n int(genotype_num) not in \\\n self.test_keys['genotype']:\n continue\n\n model.addIndividualToGraph(\n genotype_id, uniquename,\n Genotype.genoparts['intrinsic_genotype'],\n description)\n # we know all genotypes are in flies\n # FIXME we assume here they are in melanogaster,\n # but that isn't necessarily true!!!\n # TODO should the taxon be == genomic background?\n geno.addTaxon(fly_tax, genotype_id)\n genotype_iid = self._makeInternalIdentifier(\n 'genotype', genotype_num)\n model.addComment(\n genotype_id, genotype_iid)\n if name.strip() != '':\n model.addSynonym(genotype_id, name)\n\n return",
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def get_label(genotype_type):\n if genotype_type == \"Hom\":\n return 0\n elif genotype_type == \"Het\":\n return 1\n elif genotype_type == \"Hom_alt\":\n return 2",
"def test_convert(self):\n gd: GraphDocument = json_loader.load(str(ONT), target_class=GraphDocument)\n g = self.converter.convert(gd)\n g.serialize(OUT)\n oi = SparqlImplementation(OntologyResource(OUT))\n # for r in oi.relationships([\"GO:0005773\"]):\n # print(r)\n self.compliance_tester.test_synonyms(oi)\n self.compliance_tester.test_definitions(oi)\n self.compliance_tester.test_sssom_mappings(oi)\n self.compliance_tester.test_relationships(oi)",
"def _genotype_updated(self):\n if self.data.get(\"GT\", None) is None:\n self.gt_alleles = None\n self.called = None\n self.ploidy = None\n else:\n self.gt_alleles = []\n for allele in ALLELE_DELIM.split(str(self.data[\"GT\"])):\n if allele == \".\":\n self.gt_alleles.append(None)\n else:\n self.gt_alleles.append(int(allele))\n self.called = all([al is not None for al in self.gt_alleles])\n self.ploidy = len(self.gt_alleles)",
"def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")",
"def get_gene_biotypes(db_path, table=Annotation):\n session = start_session(db_path)\n query = session.query(table.GeneBiotype).distinct()\n return {x[0] for x in query.all()}"
]
| [
"0.70927507",
"0.6866326",
"0.6093907",
"0.60476726",
"0.59363604",
"0.5803049",
"0.56443554",
"0.56211126",
"0.5610469",
"0.560769",
"0.556792",
"0.55300146",
"0.5503267",
"0.5429665",
"0.53932387",
"0.5378378",
"0.5329879",
"0.53253955",
"0.5294454",
"0.5262527",
"0.52408975",
"0.52370334",
"0.52365345",
"0.521313",
"0.51843745",
"0.5165504",
"0.515656",
"0.5147646",
"0.5141268",
"0.51271176"
]
| 0.7562564 | 0 |
Converts the genotype representation to a naslib object | def convert_genotype_to_naslib(genotype, naslib_object):
genotype_to_ops = {
"skip_connect": ("Identity", "FactorizedReduce"),
"sep_conv_3x3": "SepConv3x3",
"dil_conv_3x3": "DilConv3x3",
"sep_conv_5x5": "SepConv5x5",
"dil_conv_5x5": "DilConv5x5",
"avg_pool_3x3": "AvgPool",
"max_pool_3x3": "MaxPool",
# "zero": ("Zero"),
}
cell_names = ["normal_cell", "reduction_cell"]
# create a dictionary of edges to ops in the genotype
edge_op_dict = {"normal_cell": {}, "reduction_cell": {}}
for c, cell_type in enumerate(["normal", "reduce"]):
cell = eval("genotype." + cell_type)
tail = 2
for i, edge in enumerate(cell):
if i % 2 == 0:
tail += 1
head = edge[1] + 1
edge_op_dict[cell_names[c]][(head, tail)] = genotype_to_ops[edge[0]]
def add_genotype_op_index(edge):
# function that adds the op index from genotype to each edge, and deletes the rest
if (edge.head, edge.tail) in edge_op_dict[edge.data.cell_name]:
for i, op in enumerate(edge.data.op):
if (
op.get_op_name
in edge_op_dict[edge.data.cell_name][(edge.head, edge.tail)]
):
index = i
break
edge.data.set("op_index", index, shared=True)
else:
edge.data.delete()
def update_ops(edge):
# function that replaces the primitive ops at the edges with the ones from genotype
if isinstance(edge.data.op, list):
primitives = edge.data.op
else:
primitives = edge.data.primitives
edge.data.set("op", primitives[edge.data.op_index])
edge.data.set("primitives", primitives) # store for later use
naslib_object.update_edges(
add_genotype_op_index,
scope=naslib_object.OPTIMIZER_SCOPE,
private_edge_data=False,
)
naslib_object.update_edges(
update_ops, scope=naslib_object.OPTIMIZER_SCOPE, private_edge_data=True
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_naslib_to_genotype(naslib_object):\n ops_to_genotype = {\n \"Identity\": \"skip_connect\",\n \"FactorizedReduce\": \"skip_connect\",\n \"SepConv3x3\": \"sep_conv_3x3\",\n \"DilConv3x3\": \"dil_conv_3x3\",\n \"SepConv5x5\": \"sep_conv_5x5\",\n \"DilConv5x5\": \"dil_conv_5x5\",\n \"AvgPool\": \"avg_pool_3x3\",\n \"MaxPool\": \"max_pool_3x3\",\n \"Zero\": \"zero\",\n }\n cells = [\n get_cell_of_type(naslib_object, \"normal_cell\"),\n get_cell_of_type(naslib_object, \"reduction_cell\"),\n ]\n converted_cells = []\n for cell in cells:\n edge_op_dict = {\n (i, j): ops_to_genotype[cell.edges[i, j][\"op\"].get_op_name]\n for i, j in cell.edges\n }\n op_edge_list = [\n (edge_op_dict[(i, j)], i - 1)\n for i, j in sorted(edge_op_dict, key=lambda x: x[1])\n if j < 7\n ]\n converted_cells.append(op_edge_list)\n\n return Genotype(\n normal=converted_cells[0],\n normal_concat=[2, 3, 4, 5],\n reduce=converted_cells[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def fromgenotype(self):\n\t\tpass",
"def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes",
"def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)",
"def to_orb(self):\n node_id = int(self.idd)\n node_type = GLOB.gmplsTypes.NODETYPE_UNKNOWN\n if type(self.typee) == str:\n node_type = GLOB.gmplsTypes.NODETYPE_NETWORK\n\n node_orb = GLOB.gmplsTypes.nodeIdent(node_id, node_type)\n return node_orb",
"def _raw_to_string(self, dtype, units='Angstrom', atom_format=None, ghost_format=None, width=17, prec=12):\n\n #molrec = self.to_dict(force_units=units, np_out=True)\n molrec = self.to_dict(np_out=True)\n smol = molparse.to_string(molrec,\n dtype=dtype,\n units=units,\n atom_format=atom_format,\n ghost_format=ghost_format,\n width=width,\n prec=prec)\n return smol",
"def genotype(rsid):\n if rsid[0] == 'I' or rsid[0] == 'i':\n return { 'error': 'Cannot find indicators, must use rs #s'}\n soup = BeautifulSoup(urllib.urlopen('http://snpedia.com/index.php/Special:Browse/' + rsid).read())\n trows = soup('table')[1].find_all('tr')\n if len(trows) < 2:\n return { 'error': 'That rsid does not have any data/does not exist.' }\n locations = getLocations(soup)\n genotypeData = getData(locations, soup)\n genotypeData['rsid'] = rsid\n return genotypeData",
"def test_genbank_to_genome_taxonomy(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '3702',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n ('result', result)\n ref = result[0]['genome_ref']\n self.assertTrue(ref, 'Genome ref exists')\n info = result[0]['genome_info']\n typ = info[2]\n self.assertTrue(typ.startswith('KBaseGenomes.Genome'))\n info_details = info[-1]\n self.assertEqual(info_details['Taxonomy'], (\n \"cellular organisms;Eukaryota;Viridiplantae;\"\n \"Streptophyta;Streptophytina;Embryophyta;Tracheophyta;\"\n \"Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;\"\n \"eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;\"\n \"Brassicales;Brassicaceae;Camelineae;Arabidopsis\"\n ))\n self.assertEqual(info_details['Size'], '697724')\n self.assertEqual(info_details['Source'], 'Genbank')\n self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis')\n self.assertEqual(info_details['GC content'], '0.22479')\n self.assertEqual(info_details['Genetic code'], '11')\n self.assertEqual(info_details['Number of Genome Level Warnings'], '1')\n self.assertEqual(info_details['Source ID'], 'BA000021')\n self.assertEqual(info_details['Number of Protein Encoding Genes'], '20')\n self.assertEqual(info_details['Domain'], 'Eukaryota')\n self.assertTrue(info_details['Assembly Object'])\n self.assertEqual(info_details['Number contigs'], '1')\n self.assertEqual(info_details['Number of CDS'], '20')\n self.assertTrue(info_details['MD5'])",
"def genotype(self, arch):\n backbone_r, neck_r = arch\n return (backbone_r.genotype, neck_r.genotype if neck_r is not None else None)",
"def make(self, atype, **kwargs):\n obj = self.api.get_type(f\"VSO:{atype}\")\n return obj(**kwargs)",
"def _extract_genotype(geno_field):\n # Assume the genotype is the first format field and raise if it's not\n geno = geno_field.split(':')[0]\n if not GENO_REGEX.search(geno):\n raise ValueError('\"{}\" does not look like a genotype'.format(geno))\n return geno",
"def genotypes(self):\n return self.data.genotypes.values",
"def _process_genotypes(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'genotype'))\n logger.info(\"building labels for genotypes\")\n geno = Genotype(g)\n fly_tax = 'NCBITaxon:7227'\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (genotype_num, uniquename, description, name) = line\n\n # if self.testMode is True:\n # if int(object_key) not in self.test_keys.get('genotype'):\n # continue\n\n # add the internal genotype to pub mapping\n genotype_id = 'MONARCH:FBgeno'+str(genotype_num)\n self.idhash['genotype'][genotype_num] = genotype_id\n\n if description == '':\n description = None\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode and \\\n int(genotype_num) not in \\\n self.test_keys['genotype']:\n continue\n\n model.addIndividualToGraph(\n genotype_id, uniquename,\n Genotype.genoparts['intrinsic_genotype'],\n description)\n # we know all genotypes are in flies\n # FIXME we assume here they are in melanogaster,\n # but that isn't necessarily true!!!\n # TODO should the taxon be == genomic background?\n geno.addTaxon(fly_tax, genotype_id)\n genotype_iid = self._makeInternalIdentifier(\n 'genotype', genotype_num)\n model.addComment(\n genotype_id, genotype_iid)\n if name.strip() != '':\n model.addSynonym(genotype_id, name)\n\n return",
"def test_convert(self):\n gd: GraphDocument = json_loader.load(str(ONT), target_class=GraphDocument)\n g = self.converter.convert(gd)\n g.serialize(OUT)\n oi = SparqlImplementation(OntologyResource(OUT))\n # for r in oi.relationships([\"GO:0005773\"]):\n # print(r)\n self.compliance_tester.test_synonyms(oi)\n self.compliance_tester.test_definitions(oi)\n self.compliance_tester.test_sssom_mappings(oi)\n self.compliance_tester.test_relationships(oi)",
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def nodata(self):\n # first check attrs, then encoding\n nodata = self._obj.rio.nodata\n if nodata is None:\n nodata = self._obj.rio.encoded_nodata\n if nodata is not None:\n self.set_nodata(nodata)\n return nodata",
"def _dta_obj_to_file(self, address):\n global get_missing\n \n type_dict = {\n 65530: ['b',1],\n 65529: ['h',2],\n 65528: ['l',4], \n 65527: ['f',4],\n 65526: ['d',8]\n }\n first_missing = {\n 65530: 101,\n 65529: 32741,\n 65528: 2147483620,\n 65527: float.fromhex('0x1.0p+127'),\n 65526: float.fromhex('0x1.0p+1023')\n }\n typlist = self._typlist\n byteorder = self._byteorder\n nvar = self._nvar\n \n def write_value_label_table(labname, table):\n # Stata limits are a bit confusing.\n # Total length of text (incl. null terminators) must be <= 32000 ?\n # Total number of vals must be <= 65536 ?\n # But the limit on text length forces no. of vals <= 16000 since\n # each label must occupy at least two bytes \n # (including null terminator).\n labname = labname[:32]\n \n val = sorted(table.keys())\n # each value may be up to 81 chars including null\n txt = [table[v][:80] for v in val] \n \n nval = len(val)\n if nval > 65536: # max number of values allowed\n val = val[:65536]\n txt = txt[:65536]\n nval = 65536\n \n off = [0]\n for i in range(nval - 1):\n # in next line, \"+ 1\" to leave room for \\0\n offset = off[i] + len(txt[i]) + 1\n if offset > 32000: # if too much text\n off = off[:i] # cut off at before the ith one\n val = val[:i]\n txt = txt[:i]\n nval = i\n break\n off.append(offset)\n txt_len = off[-1] + len(txt[-1]) + 1\n \n table_len = 4 + 4 + 4*nval + 4*nval + txt_len\n \n dta.write(bytearray('<lbl>', 'iso-8859-1'))\n dta.write(pack(byteorder + \"l\", table_len))\n dta.write(bytearray(labname, 'iso-8859-1') + \n b'\\0'*(33-len(labname)))\n dta.write(b'\\x00\\x00\\x00')\n \n dta.write(pack(byteorder + \"l\", nval))\n dta.write(pack(byteorder + \"l\", txt_len))\n for o in off: dta.write(pack(byteorder + \"l\", o))\n for v in val: dta.write(pack(byteorder + \"l\", v))\n for t in txt: dta.write(bytearray(t, 'iso-8859-1') + b'\\0')\n dta.write(bytearray('</lbl>', 'iso-8859-1'))\n \n with open(address, 'wb') as dta:\n dta.write(bytearray('<stata_dta>', 'iso-8859-1'))\n \n # header\n dta.write(bytearray('<header>', 'iso-8859-1'))\n dta.write(bytearray('<release>', 'iso-8859-1'))\n dta.write(bytearray('117', 'iso-8859-1'))\n dta.write(bytearray('</release>', 'iso-8859-1'))\n dta.write(bytearray('<byteorder>', 'iso-8859-1'))\n dta.write(\n bytearray('MSF' if byteorder == '>' else 'LSF', 'iso-8859-1'))\n dta.write(bytearray('</byteorder>', 'iso-8859-1'))\n dta.write(bytearray('<K>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'H', self._nvar))\n dta.write(bytearray('</K>', 'iso-8859-1'))\n dta.write(bytearray('<N>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'I', self._nobs))\n dta.write(bytearray('</N>', 'iso-8859-1'))\n dta.write(bytearray('<label>', 'iso-8859-1'))\n label = self._data_label\n label_length = len(label)\n dta.write(pack(byteorder + 'B', label_length))\n dta.write(bytearray(label, 'iso-8859-1'))\n dta.write(bytearray('</label>', 'iso-8859-1'))\n dta.write(bytearray('<timestamp>', 'iso-8859-1'))\n stamp = self._time_stamp\n m = re.match(\n '^([ 0-3][0-9]) ' + \n '(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ' + \n '[0-9]{4} ([ 0-2][0-9]):([0-9]{2})$', \n stamp)\n if (m and \n 1 <= int(m.group(1)) <= 31 and \n 0 <= int(m.group(3)) <= 24 and\n 0 <= int(m.group(4)) < 60):\n dta.write(pack(byteorder + 'B', 17))\n # next line includes optional binary zero\n dta.write(bytearray(stamp, 'iso-8859-1'))\n else: # there's something wrong with the time stamp, just skip it\n dta.write(pack(byteorder + 'B', 0))\n dta.write(bytearray('</timestamp>', 'iso-8859-1'))\n dta.write(bytearray('</header>', 'iso-8859-1'))\n \n # map\n offset_map = [0, dta.tell()]\n dta.write(bytearray(\"<map>\", 'iso-8859-1'))\n for i in range(14):\n dta.write(pack(byteorder + 'Q', 0))\n dta.write(bytearray(\"</map>\", \"iso-8859-1\"))\n \n # variable types\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<variable_types>\", 'iso-8859-1'))\n dta.write(pack(byteorder + 'H'*nvar, *typlist))\n dta.write(bytearray(\"</variable_types>\", 'iso-8859-1'))\n \n # variable names\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<varnames>\", 'iso-8859-1'))\n for name in self._varlist:\n name = name[:32]\n dta.write(bytearray(name, 'iso-8859-1') + b'\\0'*(33-len(name)))\n dta.write(bytearray(\"</varnames>\", 'iso-8859-1'))\n \n # sort order\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<sortlist>\", 'iso-8859-1'))\n srtlist = self._srtlist + [None]\n srtlist = [srt + 1 if srt is not None else 0 for srt in srtlist]\n dta.write(pack(byteorder + 'H'*(nvar + 1), *srtlist))\n dta.write(bytearray(\"</sortlist>\", 'iso-8859-1'))\n \n # formats\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<formats>\", 'iso-8859-1'))\n for fmt in self._fmtlist:\n fmt = fmt[:48]\n dta.write(bytearray(fmt, 'iso-8859-1') + b'\\0'*(49-len(fmt)))\n dta.write(bytearray(\"</formats>\", 'iso-8859-1'))\n \n # value-label names\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<value_label_names>\", 'iso-8859-1'))\n for lab in self._lbllist:\n lab = lab[:32]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(33-len(lab)))\n dta.write(bytearray(\"</value_label_names>\", 'iso-8859-1'))\n \n # variable labels\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<variable_labels>\", 'iso-8859-1'))\n for lab in self._vlblist:\n lab = lab[:80]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(81-len(lab)))\n dta.write(bytearray(\"</variable_labels>\", 'iso-8859-1'))\n \n # characteristics\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<characteristics>\", 'iso-8859-1'))\n chrdict = self._chrdict\n for varname in chrdict:\n varname = varname[:32]\n var_dict = chrdict[varname]\n for charname in var_dict:\n charname = charname[:32]\n char = var_dict[charname][:67784] # or 8681 for Small Stata\n full_length = 66 + len(char) + 1 # +1 for null termination\n \n dta.write(bytearray('<ch>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'I', full_length))\n dta.write(bytearray(varname, 'iso-8859-1') + \n b'\\0'*(33-len(varname)))\n dta.write(bytearray(charname, 'iso-8859-1') + \n b'\\0'*(33-len(charname)))\n dta.write(bytearray(char, 'iso-8859-1') + b'\\0')\n dta.write(bytearray('</ch>', 'iso-8859-1'))\n dta.write(bytearray(\"</characteristics>\", 'iso-8859-1'))\n \n # data\n offset_map.append(dta.tell())\n strls = {}\n dta.write(bytearray(\"<data>\", 'iso-8859-1'))\n varvals = self._varvals\n nvar, nobs = self._nvar, self._nobs\n missing_save_val = self._missing_save_val\n for i in range(nobs):\n row = varvals[i]\n for j in range(nvar):\n value, st_type = row[j], typlist[j]\n if st_type <= 2045:\n value = value[:st_type]\n dta.write(bytearray(value, 'iso-8859-1') + \n b'\\0'*(st_type - len(value)))\n elif st_type == 32768:\n if value == \"\":\n o,v = 0,0\n elif value in strls:\n o,v = strls[value]\n else:\n strls[value] = o,v = (i+1,j+1)\n dta.write(pack(byteorder + 'II', v, o))\n else:\n fmt = 'bhlfd'[65530 - st_type]\n if value is None:\n value = first_missing[st_type]\n elif isinstance(value, MissingValue):\n value = missing_save_val(value, st_type)\n elif (value > 8.988465674311579e+307 or \n value < -1.7976931348623157e+308):\n # is this the right way to handle this ?\n value = missing_save_val(\n get_missing(value), st_type)\n dta.write(pack(byteorder + fmt, value))\n dta.write(bytearray(\"</data>\", 'iso-8859-1'))\n \n # strls\n offset_map.append(dta.tell())\n strls = [(val, key) for key,val in strls.items()]\n strls.sort()\n dta.write(bytearray(\"<strls>\", 'iso-8859-1'))\n for (o,v), value in strls:\n dta.write(bytearray('GSO', 'iso-8859-1'))\n dta.write(pack(byteorder + 'II', v, o))\n if isinstance(value, str):\n try:\n # expect error in next line if anywhere\n value = bytes(value, 'iso-8859-1') + b'\\x00'\n dta.write(pack('B', 130))\n except UnicodeEncodeError:\n value = bytes(value, 'utf-8')\n dta.write(pack('B', 129))\n elif (not isinstance(value, bytes) and \n not isinstance(value, bytearray)):\n msg = \"only bytes or str object allowed in Stata strl\"\n raise TypeError(msg)\n else:\n dta.write(pack('B', 129))\n val_len = len(value)\n dta.write(pack(byteorder + 'I', val_len))\n num_vals = unpack(str(val_len) + 'b', value)\n dta.write(value)\n dta.write(bytearray(\"</strls>\", 'iso-8859-1'))\n \n # value labels\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<value_labels>\", 'iso-8859-1'))\n for name, table in self._vallabs.items():\n write_value_label_table(name, table)\n dta.write(bytearray(\"</value_labels>\", 'iso-8859-1'))\n \n # end file\n offset_map.append(dta.tell())\n dta.write(bytearray(\"</stata_dta>\", 'iso-8859-1'))\n \n offset_map.append(dta.tell())\n \n # write map\n dta.seek(offset_map[1] + 5)\n for offset in offset_map:\n dta.write(pack(byteorder + 'Q', offset))",
"def map_to_homo_nid(self, ids, ntype):\n ...",
"def map_to_homo_nid(self, ids, ntype):\n ...",
"def convert():\n parser = _parser()\n args = parser.parse_args()\n\n logger.setLevel(args.log_level)\n\n with contextlib.ExitStack() as stack:\n data = [stack.enter_context(ctra.formats.oxstats_genotypes(*a))\n for a in ctra.algorithms.kwise(args.load_oxstats, 2)]\n samples = list(itertools.chain.from_iterable(s for _, _, s, _ in data))\n merged = ctra.formats.merge_oxstats([d for _, _, _, d in data])\n if args.num_samples > len(samples):\n logger.error('{} individuals present in OXSTATS data, but {} were specified'.format(len(samples), args.num_samples))\n sys.exit(1)\n elif args.num_samples < len(samples):\n logger.warn('{} individuals present in OXSTATS data, but {} were specified'.format(len(samples), args.num_samples))\n if os.path.exists(args.out) and not args.force:\n logger.error('Output file {} already exists. Not overwriting')\n sys.exit(1)\n outfile = stack.enter_context(h5py.File(args.out, 'w'))\n outfile.create_dataset('dosage', shape=(args.num_samples, args.num_variants), dtype='float32', chunks=args.chunk_size)\n outfile.create_dataset('info', shape=(1, args.num_variants), dtype='float32')\n for j, row in enumerate(merged):\n if j >= args.num_variants:\n logger.warn('{} variants processed, but additional variants are present'.format(j))\n break\n probs = numpy.array([float(x) for x in row[5:]])\n x, y = info(probs)\n outfile['dosage'][:, j] = x\n if not j % 1000:\n logger.debug('{} variants processed'.format(j))\n if j + 1 < args.num_variants:\n logger.error('{} variants present in OXSTATS data, but {} were specified'.format(j, args.num_variants))\n sys.exit(1)",
"def test_to_rna(self):\n r = self.DNA(\"UCA\")\n self.assertEqual(str(r), \"TCA\")\n self.assertEqual(str(r.to_rna()), \"UCA\")",
"def test_model_to_model(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")",
"def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene",
"def set_genotype(self, genotype):\n self.data[\"GT\"] = genotype\n self._genotype_updated()",
"def to_observation_type(self) -> str:\n obstype = self._header[\"OBSTYPE\"].strip().lower()\n self._used_these_cards(\"OBSTYPE\")\n if obstype == \"object\":\n return \"science\"\n return obstype",
"def to_nifti(self,folder_path: str):\n data_path = settings.STORAGE_DIR\n path = folder_path \n nifti=series.get_series_object(path) \n nifti_str=str(nifti)\n nifti_str=nifti_str[1:44]\n if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT': \n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')\n if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':\n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n nifti.set_ExportType('suv')\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')",
"def _dta_obj_to_file(self, address):\n global get_missing\n \n type_dict = {\n 251: ['b',1],\n 252: ['h',2], \n 253: ['l',4],\n 254: ['f',4],\n 255: ['d',8]\n }\n first_missing = {\n 251: 101,\n 252: 32741,\n 253: 2147483620, \n 254: float.fromhex('0x1.0p+127'),\n 255: float.fromhex('0x1.0p+1023')\n }\n typlist = self._typlist\n nvar = self._nvar\n \n missing_save_val = self._missing_save_val\n \n def write_value_label_table(labname, table):\n # Stata limits are a bit confusing. Total length of text \n # (including null terminators) must be <= 32000? Total \n # number of vals must be <= 65536? But the limit on text \n # length forces no. of vals <= 16000 since each label must \n # occupy at least two bytes (including null terminator).\n \n labname = labname[:32]\n \n val = sorted(table.keys())\n # each value may be up to 81 chars including null\n txt = [table[v][:80] for v in val] \n \n nval = len(val)\n if nval > 65536: # max number of values allowed\n val = val[:65536]\n txt = txt[:65536]\n nval = 65536\n \n off = [0]\n for i in range(nval - 1):\n # in next line, \"+ 1\" to leave room for \\0\n offset = off[i] + len(txt[i]) + 1\n if offset > 32000: # if too much text\n off = off[:i] # cut off at before the ith one\n val = val[:i]\n txt = txt[:i]\n nval = i\n break\n off.append(offset)\n txt_len = off[-1] + len(txt[-1]) + 1\n \n table_len = 4 + 4 + 4*nval + 4*nval + txt_len\n \n dta.write(pack(byteorder + \"l\", table_len))\n dta.write(bytearray(labname, 'iso-8859-1') +\n b'\\0'*(33-len(labname)))\n dta.write(b'\\x00\\x00\\x00')\n \n dta.write(pack(byteorder + \"l\", nval))\n dta.write(pack(byteorder + \"l\", txt_len))\n for o in off: dta.write(pack(byteorder + \"l\", o))\n for v in val: dta.write(pack(byteorder + \"l\", v))\n #for t in txt: write_byte_str((t,), len(t) + 1)\n for t in txt: dta.write(bytearray(t, 'iso-8859-1') + b'\\0')\n \n with open(address, 'wb') as dta:\n # header\n dta.write(pack('b', 115)) # ds_format\n byteorder = self._byteorder\n dta.write(pack('b', 1 if byteorder == '>' else 2)) # byteorder\n dta.write(pack('b', 1)) # filetype\n dta.write(pack('b', 0)) # padding\n dta.write(pack(byteorder + 'h', self._nvar))\n dta.write(pack(byteorder + 'i', self._nobs))\n data_label = self._data_label[:80]\n dta.write(bytearray(data_label, 'iso-8859-1') +\n b'\\0'*(81-len(data_label)))\n self._set_timestamp() # new time_stamp\n time_stamp = self._time_stamp[:17]\n dta.write(bytearray(time_stamp, 'iso-8859-1') +\n b'\\0'*(18-len(time_stamp)))\n \n # descriptors\n dta.write(bytes(self._typlist))\n for name in self._varlist:\n name = name[:32]\n dta.write(bytearray(name, 'iso-8859-1') + b'\\0'*(33-len(name)))\n # In srtlist, Nones are replaced with zeroes and \n # a terminating zero is appended (the file needs \n # nvar + 1 ints including terminating zero).\n srtlist = self._srtlist + [None]\n srtlist = [srt + 1 if srt is not None else 0 for srt in srtlist]\n dta.write(pack(byteorder + 'h'*(nvar + 1), *srtlist))\n for fmt in self._fmtlist:\n fmt = fmt[:48]\n dta.write(bytearray(fmt, 'iso-8859-1') + b'\\0'*(49-len(fmt)))\n for lab in self._lbllist:\n lab = lab[:32]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(33-len(lab)))\n \n # variable labels\n for lab in self._vlblist:\n lab = lab[:80]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(81-len(lab)))\n \n # characteristics\n chrdict = self._chrdict\n for varname in chrdict:\n varname = varname[:32]\n vardict = chrdict[varname]\n for charname in vardict:\n charname = charname[:32]\n char = vardict[charname][:67784] # or 8681 for Small Stata\n data_len = 66 + len(char) + 1 # +1 for null termination\n dta.write(b'\\x01') # data_type\n dta.write(pack(byteorder + 'i', data_len))\n dta.write(bytearray(varname, 'iso-8859-1') + \n b'\\0'*(33 - len(varname)))\n dta.write(bytearray(charname, 'iso-8859-1') + \n b'\\0'*(33 - len(charname)))\n dta.write(bytearray(char, 'iso-8859-1') + b'\\0')\n dta.write(b'\\x00\\x00\\x00\\x00\\x00')\n \n # data\n for row in self._varvals:\n for value, st_type in zip(row, typlist):\n if st_type <= 244:\n dta.write(bytearray(value, 'iso-8859-1') + \n b'\\0'*(st_type - len(value)))\n else:\n fmt, nbytes = type_dict[st_type]\n # Get correct dta value if missing. As a safety, check\n # for non-standard missing (None and large values).\n if value is None:\n value = first_missing[st_type]\n elif isinstance(value, MissingValue):\n value = missing_save_val(value, st_type)\n elif (value > 8.988465674311579e+307 or \n value < -1.7976931348623157e+308):\n # is this the right way to handle this ?\n value = missing_save_val(\n get_missing(value), st_type) \n dta.write(pack(byteorder + fmt, value))\n \n # value labels\n value_labels = self._vallabs\n for labname in value_labels.keys():\n write_value_label_table(labname, value_labels[labname])",
"def _converttonamespace(o):\n ret = {}\n\n # These things are written directy into the dictionary.\n direct = (numbers.Number, np.number, tuple,\n list, np.ndarray, str)\n\n for key, val in o.__dict__.items():\n\n # Ignore hidden variables\n if key.startswith(\"_\"):\n continue\n # Skip fields that should not be stored\n if isinstance(val, Field) and val.save == False:\n continue\n\n if val is not None and isinstance(val, direct):\n ret[key] = copy.copy(val)\n else:\n ret[key] = _converttonamespace(val)\n\n return SimpleNamespace(**ret)",
"def create_merged_genotype_file(snps_file_path):\n print(\"creating merged genotype file\")\n plink_runner = Plink2DockerRunner()\n shutil.copyfile(snps_file_path, f\"{GENOTYPE_DATA_PATH}/{SNP_LIST_FILE_NAME}\")\n plink_runner(f\"./plink2 --pfile {IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_GENOTYPE_FILE} vzs \"\n f\"--extract {IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{SNP_LIST_FILE_NAME} --export vcf \"\n f\"--out {IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_GENOTYPE_FILE}\")"
]
| [
"0.76223373",
"0.6980172",
"0.54658943",
"0.5373541",
"0.5373159",
"0.52955455",
"0.5285364",
"0.5245461",
"0.5245001",
"0.5129519",
"0.50936586",
"0.50587034",
"0.5043617",
"0.5033558",
"0.49881056",
"0.49753153",
"0.49664342",
"0.4957352",
"0.4957352",
"0.4950104",
"0.49459967",
"0.49370095",
"0.4916867",
"0.48611596",
"0.48448557",
"0.48323512",
"0.48312557",
"0.4820368",
"0.48121762",
"0.48057243"
]
| 0.74750704 | 1 |
Converts a DARTS genotype to a configspace instance dictionary | def convert_genotype_to_config(genotype):
base_string = "NetworkSelectorDatasetInfo:darts:"
config = {}
for cell_type in ["normal", "reduce"]:
cell = eval("genotype." + cell_type)
start = 0
n = 2
for node_idx in range(4):
end = start + n
ops = cell[2 * node_idx: 2 * node_idx + 2]
# get edge idx
edges = {
base_string + "edge_" + cell_type + "_" + str(start + i): op
for op, i in ops
}
config.update(edges)
if node_idx != 0:
# get node idx
input_nodes = sorted(list(map(lambda x: x[1], ops)))
input_nodes_idx = "_".join([str(i) for i in input_nodes])
config.update(
{
base_string
+ "inputs_node_"
+ cell_type
+ "_"
+ str(node_idx + 2): input_nodes_idx
}
)
start = end
n += 1
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_genotype_to_config(arch):\n base_string = 'NetworkSelectorDatasetInfo:darts:'\n config = {}\n\n for cell_type in ['normal', 'reduce']:\n cell = eval('arch.' + cell_type)\n\n start = 0\n n = 2\n for node_idx in range(4):\n end = start + n\n ops = cell[2 * node_idx: 2 * node_idx + 2]\n\n # get edge idx\n edges = {base_string + 'edge_' + cell_type + '_' + str(start + i): op for\n op, i in ops}\n config.update(edges)\n\n if node_idx != 0:\n # get node idx\n input_nodes = sorted(list(map(lambda x: x[1], ops)))\n input_nodes_idx = '_'.join([str(i) for i in input_nodes])\n config.update({base_string + 'inputs_node_' + cell_type + '_' + str(node_idx + 2):\n input_nodes_idx})\n\n start = end\n n += 1\n return config",
"def get_configspace(self):\n cd = self.cd\n sp_dict = {}\n sp_dict['epochs'] = int(cd['epochs'])\n sp_dict['gamma'] = self._get_range_uniform('gamma', cd)\n sp_dict['multilabel'] = self._get_atomic('multilabel', cd)\n sp_dict['lr'] = self._get_range_uniform('lr', cd)\n sp_dict['optimizer'] = self._get_categorical('optimizer', cd)\n sp_dict['n_latent'] = self._get_range_integer('n_latent',cd)\n sp_dict['enc_hidden_dim'] = self._get_range_integer('enc_hidden_dim', cd)\n sp_dict['batch_size'] = self._get_range_integer('batch_size', cd)\n sp_dict['coherence_loss_wt'] = self._get_range_uniform('coherence_loss_wt', cd) or 0.0\n sp_dict['redundancy_loss_wt'] = self._get_range_uniform('redundancy_loss_wt', cd) or 0.0\n sp_dict['num_enc_layers'] = self._get_range_integer('num_enc_layers', cd) or 1\n sp_dict['enc_dr'] = self._get_range_uniform('enc_dr', cd) or 0.0\n sp_dict['covar_net_layers'] = self._get_range_integer('covar_net_layers', cd) or 1\n sp_dict['classifier_dropout'] = self._get_range_uniform('classifier_dropout', cd) or 0.1\n\n embedding_types = cd['embedding']\n embedding_space = [] \n for et in embedding_types:\n if et['source'] == 'random':\n embedding_space.append(ag.space.Dict(**{'source': 'random', 'size': self._get_range_integer('size', et)}))\n else:\n fixed_assigned = et.get('fixed')\n if fixed_assigned is None:\n embedding_space.append(ag.space.Dict(**{'source': et['source'], 'fixed': ag.space.Bool()}))\n else:\n embedding_space.append(ag.space.Dict(**{'source': et['source'], 'fixed': fixed_assigned.lower()}))\n sp_dict['embedding'] = ag.space.Categorical(*embedding_space)\n\n latent_types = cd['latent_distribution']\n latent_space = []\n for lt in latent_types:\n dist_type = lt['dist_type']\n if dist_type == 'vmf':\n latent_space.append(ag.space.Dict(**{'dist_type': 'vmf', 'kappa': self._get_range_uniform('kappa', lt)}))\n elif dist_type == 'logistic_gaussian':\n latent_space.append(ag.space.Dict(**{'dist_type': 'logistic_gaussian', 'alpha': self._get_range_uniform('alpha', lt)}))\n else:\n latent_space.append(ag.space.Dict(**{'dist_type': 'gaussian'}))\n sp_dict['latent_distribution'] = ag.space.Categorical(*latent_space)\n return sp_dict",
"def GetConfig(self, genname):\n if genname in self.config:\n return self.config[genname]\n else:\n return dbus.Dictionary(signature='sv')",
"def test_configuration(self):\n space = Space()\n space.register(Integer(\"yolo1\", \"uniform\", -3, 6, shape=(2,)))\n space.register(Integer(\"yolo2\", \"uniform\", -3, 6, shape=(2,)))\n space.register(Real(\"yolo3\", \"norm\", 0.9))\n space.register(Categorical(\"yolo4\", (\"asdfa\", 2)))\n\n assert space.configuration == {\n \"yolo1\": \"uniform(-3, 3, shape=(2,), discrete=True)\",\n \"yolo2\": \"uniform(-3, 3, shape=(2,), discrete=True)\",\n \"yolo3\": \"normal(0.9)\",\n \"yolo4\": \"choices(['asdfa', 2])\",\n }",
"def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }",
"def fromgenotype(self):\n\t\tpass",
"def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}",
"def convert_config_to_genotype(config):\n base_string = \"NetworkSelectorDatasetInfo:darts:\"\n genotype = []\n for i, cell_type in enumerate([\"normal\", \"reduce\"]):\n genotype.append([])\n\n start = 0\n n = 2\n for node_idx in range(4):\n end = start + n\n # print(start, end)\n for j in range(start, end):\n key = \"NetworkSelectorDatasetInfo:darts:edge_{}_{}\".format(cell_type, j)\n if key in config:\n genotype[i].append((config[key], j - start))\n\n if len(genotype[i]) != 2 * (node_idx + 1):\n print(\"this is not a valid darts arch\")\n return config\n\n start = end\n n += 1\n\n return Genotype(\n normal=genotype[0],\n normal_concat=[2, 3, 4, 5],\n reduce=genotype[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def toDict(self):\n \n d = {}\n d['sp'] = self.species\n d['gns'] = self.genera\n d['fam'] = self.families\n d['ord'] = self.orders\n d['cls'] = self.classes\n d['phy'] = self.phyla\n d['kng'] = self.kingdoms\n \n return d",
"def genorates_to_dict(store: GenoRates) -> GenoDistribSerialisable:\n to_return = dict()\n num_alleles = store.shape[0]\n\n for gene in range(num_alleles):\n for allele in range(3):\n to_return[(geno_to_str(gene, allele))] = store[gene][allele]\n\n return to_return",
"def get_spacenet_dataset():\n ds = AttrDict()\n classes = [\n '__background__', 'building']\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds",
"def factory_type_dict():\n return {'filter' : filters.generate_filter,\n 'global_options' : global_options.generate_global_options,\n 'input_device' : input_devices.generate_input_device,\n 'input_stream' : input_streams.generate_input_stream,\n 'output_device' : output_devices.generate_output_device,\n 'output_stream' : output_streams.generate_output_stream}",
"def parse_genotypes(variant, sample_id):\n genotype_fields = variant['format'].split(':')\n genotype_data = variant[sample_id].split(':')\n return dict(zip(genotype_fields, genotype_data))",
"def to_dict(self, sed_type=\"likelihood\"):\n if sed_type == \"likelihood\":\n data = self.data\n else:\n data = {}\n all_maps = REQUIRED_MAPS[sed_type] + OPTIONAL_QUANTITIES[sed_type] + OPTIONAL_QUANTITIES_COMMON\n\n for quantity in all_maps:\n try:\n data[quantity] = getattr(self, quantity)\n except KeyError:\n pass\n\n return data",
"def cg_config():\n return {}",
"def serialize(self):\n cfg = SeqLib.serialize(self)\n\n cfg[\"variants\"] = dict()\n cfg[\"variants\"][\"wild type\"] = self.wt.serialize()\n cfg[\"variants\"][\"use aligner\"] = self.aligner is not None\n if self.max_mutations != DEFAULT_MAX_MUTATIONS:\n cfg[\"variants\"][\"max mutations\"] = self.max_mutations\n if self.variant_min_count > 0:\n cfg[\"variants\"][\"min count\"] = self.variant_min_count\n\n return cfg",
"def config(self) -> Dict[str, Any]:",
"def to_dict(self):\n mm, mp, pm, pp = self.xs\n return to_dict({\n 'type': type(self).__name__,\n 'name': self.name,\n 'pp': pp,\n 'pm': pm,\n 'mp': mp,\n 'mm': mm,\n 'a_guide': self.Aguide,\n 'h': self.H,\n })",
"def __init__(self, cfg, tops_type=[3, 5, 10]):\n\n attr_cloth_file = open(cfg.attr_cloth_file).readlines()\n self.attr_idx2name = {}\n for i, line in enumerate(attr_cloth_file[2:]):\n self.attr_idx2name[i] = line.strip('\\n').split()[0]",
"def get_config(self) -> Dict[str, Any]:\n return {\n 'num_classes': self.num_classes,\n 'name': self.name,\n 'dtype': self.dtype,\n 'sparse_y_true': self.sparse_y_true,\n 'sparse_y_pred': self.sparse_y_pred,\n 'axis': self.axis,\n }",
"def createDict( self ):\n d = {}\n devTup = ( 'endcap', 'comp', 'shutter','397intensity' )\n for dev in devTup:\n d[dev] = {'devChannels':{}}\n endcap = ( ( 1, 1 ), ( 2, 0 ) )\n comp = ( ( 1, 4 ), ( 2, 2 ), ( 'common', 3 ) )\n shutter = ( ( 1, 5 ), ( 2, 6 ), ( 3, 7 ) )\n intensity397 = (('397intensity',8),)\n chanTup = ( endcap, comp, shutter ,intensity397 )\n for dev, value in zip( devTup, chanTup ):\n for chanPair in value:\n d[dev]['devChannels'][chanPair[0]] = {'value':None, 'channel':chanPair[1]}\n ecRange = ( 0.0, 40.0 )\n compRange = ( -40.0, 40.0 )\n shutterRange = ( 0.0, 5.0 )\n intensity397Range = (0.0,2500.0)\n rangeTup = ( ecRange, compRange, shutterRange, intensity397Range )\n for dev, value in zip( devTup, rangeTup ): d[dev]['range'] = value\n self.dcDict = d",
"def smp_dict():\n out = base_dict()\n out['mro']['current'] = ['Sample']\n out['name']['current'] = 'Sample'\n ao(out, 'idx', 'Integer', attr=['Hidden'])\n ao(out, 'ii', 'Integer', attr=['Hidden'])\n ao(out, 'initialDimension', 'Float', 0., name='Initial Dimension')\n return out",
"def to_dict(self):\n config = {'min_length': self.min_length, 'max_length': self.max_length}\n return {'node_type': 'String', 'config': config}",
"def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict",
"def _gen_config(self, address, direction, attr_dict):\n global DefaultConfig\n global DeviceId\n for key in DefaultConfig.keys():\n self.DMAinstance.__setattr__(key,DefaultConfig[key])\n if direction == DMA_TO_DEV:\n self.DMAinstance.HasS2Mm = 0\n self.DMAinstance.HasMm2S = 1\n elif direction == DMA_BIDIRECTIONAL:\n self.DMAinstance.HasS2Mm = 1\n self.DMAinstance.HasMm2S = 1 \n self._bufPtr = None\n self._TransferInitiated = 0\n if attr_dict is not None:\n if type(attr_dict) == dict:\n for key in attr_dict.keys():\n self.DMAinstance.__setattr__(key,attr_dict[key])\n else:\n print(\"Warning: Expecting 3rd Arg to be a dict.\")\n\n virt = libxlnk.cma_mmap(address,0x10000)\n if virt == -1:\n raise RuntimeError(\"Memory map of driver failed.\")\n self.DMAinstance.BaseAddr = ffi.cast(\"uint32_t *\",virt)\n self.DMAinstance.DeviceId = DeviceId\n DeviceId += 1\n \n for key in DefaultConfig.keys():\n self.Configuration[key] = self.DMAinstance.__getattribute__(key)",
"def convert_genotype_to_naslib(genotype, naslib_object):\n genotype_to_ops = {\n \"skip_connect\": (\"Identity\", \"FactorizedReduce\"),\n \"sep_conv_3x3\": \"SepConv3x3\",\n \"dil_conv_3x3\": \"DilConv3x3\",\n \"sep_conv_5x5\": \"SepConv5x5\",\n \"dil_conv_5x5\": \"DilConv5x5\",\n \"avg_pool_3x3\": \"AvgPool\",\n \"max_pool_3x3\": \"MaxPool\",\n # \"zero\": (\"Zero\"),\n }\n cell_names = [\"normal_cell\", \"reduction_cell\"]\n\n # create a dictionary of edges to ops in the genotype\n edge_op_dict = {\"normal_cell\": {}, \"reduction_cell\": {}}\n for c, cell_type in enumerate([\"normal\", \"reduce\"]):\n cell = eval(\"genotype.\" + cell_type)\n tail = 2\n for i, edge in enumerate(cell):\n if i % 2 == 0:\n tail += 1\n head = edge[1] + 1\n edge_op_dict[cell_names[c]][(head, tail)] = genotype_to_ops[edge[0]]\n\n def add_genotype_op_index(edge):\n # function that adds the op index from genotype to each edge, and deletes the rest\n if (edge.head, edge.tail) in edge_op_dict[edge.data.cell_name]:\n for i, op in enumerate(edge.data.op):\n if (\n op.get_op_name\n in edge_op_dict[edge.data.cell_name][(edge.head, edge.tail)]\n ):\n index = i\n break\n edge.data.set(\"op_index\", index, shared=True)\n else:\n edge.data.delete()\n\n def update_ops(edge):\n # function that replaces the primitive ops at the edges with the ones from genotype\n if isinstance(edge.data.op, list):\n primitives = edge.data.op\n else:\n primitives = edge.data.primitives\n\n edge.data.set(\"op\", primitives[edge.data.op_index])\n edge.data.set(\"primitives\", primitives) # store for later use\n\n naslib_object.update_edges(\n add_genotype_op_index,\n scope=naslib_object.OPTIMIZER_SCOPE,\n private_edge_data=False,\n )\n\n naslib_object.update_edges(\n update_ops, scope=naslib_object.OPTIMIZER_SCOPE, private_edge_data=True\n )",
"def get_configspace():\n configspace = cs.ConfigurationSpace()\n\n memory = cs.hyperparameters.UniformIntegerHyperparameter(name='memory', lower=2, upper=25)\n configspace.add_hyperparameter(hyperparameter=memory)\n\n batch_size = cs.hyperparameters.UniformIntegerHyperparameter(\n name='batch_size', lower=32, upper=8192, log=True\n )\n configspace.add_hyperparameter(hyperparameter=batch_size)\n\n frequency = cs.hyperparameters.UniformFloatHyperparameter(\n name='frequency', lower=3e-2, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=frequency)\n\n learning_rate = cs.hyperparameters.UniformFloatHyperparameter(\n name='learning_rate', lower=1e-5, upper=3e-2, log=True\n )\n configspace.add_hyperparameter(hyperparameter=learning_rate)\n\n horizon = cs.hyperparameters.UniformIntegerHyperparameter(\n name='horizon', lower=1, upper=50\n )\n configspace.add_hyperparameter(hyperparameter=horizon)\n\n discount = cs.hyperparameters.UniformFloatHyperparameter(\n name='discount', lower=0.8, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=discount)\n\n ratio_based = cs.hyperparameters.CategoricalHyperparameter(\n name='ratio_based', choices=('no', 'yes')\n )\n configspace.add_hyperparameter(hyperparameter=ratio_based)\n\n clipping_value = cs.hyperparameters.UniformFloatHyperparameter(\n name='clipping_value', lower=0.05, upper=0.5\n )\n configspace.add_hyperparameter(hyperparameter=clipping_value)\n\n baseline = cs.hyperparameters.CategoricalHyperparameter(\n name='baseline',\n choices=('no', 'auto', 'same-network', 'same-policy', 'same-policy-noopt')\n )\n configspace.add_hyperparameter(hyperparameter=baseline)\n\n baseline_learning_rate = cs.hyperparameters.UniformFloatHyperparameter(\n name='baseline_learning_rate', lower=1e-5, upper=3e-2, log=True\n )\n configspace.add_hyperparameter(hyperparameter=baseline_learning_rate)\n\n estimate_advantage = cs.hyperparameters.CategoricalHyperparameter(\n name='estimate_advantage', choices=('no', 'yes')\n )\n configspace.add_hyperparameter(hyperparameter=estimate_advantage)\n\n entropy_regularization = cs.hyperparameters.UniformFloatHyperparameter(\n name='entropy_regularization', lower=1e-5, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=entropy_regularization)\n\n configspace.add_condition(\n condition=cs.EqualsCondition(child=clipping_value, parent=ratio_based, value='yes')\n )\n\n configspace.add_condition(\n condition=cs.NotEqualsCondition(\n child=baseline_learning_rate, parent=baseline, value='no'\n )\n )\n\n configspace.add_condition(\n condition=cs.NotEqualsCondition(\n child=estimate_advantage, parent=baseline, value='no'\n )\n )\n\n return configspace",
"def config_mapping(self) -> typing.Dict[str, type]:\n return self._subclasses",
"def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata",
"def get_config(self):\n\n # these are all that is needed to rebuild this class\n config = dict(hidden_size=self.hidden_size,\n word_embedding=self.word_embedding,\n detection_embedding=self.detection_embedding,\n mode=self.mode,\n decoder_pos_emb=self.decoder_pos_emb,\n ** self.kwargs)\n\n base_config = super(RegionFeature, self).get_config()\n return dict(list(base_config.items()) +\n list(config.items()))"
]
| [
"0.6081296",
"0.594875",
"0.5933283",
"0.59265006",
"0.5880599",
"0.5727958",
"0.56083965",
"0.5581696",
"0.5487566",
"0.54197836",
"0.54093397",
"0.5381316",
"0.5303956",
"0.5295256",
"0.5283128",
"0.52108383",
"0.5208851",
"0.51883763",
"0.5169774",
"0.5140319",
"0.51316565",
"0.5116865",
"0.5093044",
"0.5067684",
"0.50543183",
"0.5028537",
"0.50209117",
"0.5019377",
"0.50021094",
"0.49961215"
]
| 0.6325953 | 0 |
Converts a configspace instance dictionary to a DARTS genotype | def convert_config_to_genotype(config):
base_string = "NetworkSelectorDatasetInfo:darts:"
genotype = []
for i, cell_type in enumerate(["normal", "reduce"]):
genotype.append([])
start = 0
n = 2
for node_idx in range(4):
end = start + n
# print(start, end)
for j in range(start, end):
key = "NetworkSelectorDatasetInfo:darts:edge_{}_{}".format(cell_type, j)
if key in config:
genotype[i].append((config[key], j - start))
if len(genotype[i]) != 2 * (node_idx + 1):
print("this is not a valid darts arch")
return config
start = end
n += 1
return Genotype(
normal=genotype[0],
normal_concat=[2, 3, 4, 5],
reduce=genotype[1],
reduce_concat=[2, 3, 4, 5],
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_configuration(self):\n space = Space()\n space.register(Integer(\"yolo1\", \"uniform\", -3, 6, shape=(2,)))\n space.register(Integer(\"yolo2\", \"uniform\", -3, 6, shape=(2,)))\n space.register(Real(\"yolo3\", \"norm\", 0.9))\n space.register(Categorical(\"yolo4\", (\"asdfa\", 2)))\n\n assert space.configuration == {\n \"yolo1\": \"uniform(-3, 3, shape=(2,), discrete=True)\",\n \"yolo2\": \"uniform(-3, 3, shape=(2,), discrete=True)\",\n \"yolo3\": \"normal(0.9)\",\n \"yolo4\": \"choices(['asdfa', 2])\",\n }",
"def fromgenotype(self):\n\t\tpass",
"def convert_genotype_to_config(arch):\n base_string = 'NetworkSelectorDatasetInfo:darts:'\n config = {}\n\n for cell_type in ['normal', 'reduce']:\n cell = eval('arch.' + cell_type)\n\n start = 0\n n = 2\n for node_idx in range(4):\n end = start + n\n ops = cell[2 * node_idx: 2 * node_idx + 2]\n\n # get edge idx\n edges = {base_string + 'edge_' + cell_type + '_' + str(start + i): op for\n op, i in ops}\n config.update(edges)\n\n if node_idx != 0:\n # get node idx\n input_nodes = sorted(list(map(lambda x: x[1], ops)))\n input_nodes_idx = '_'.join([str(i) for i in input_nodes])\n config.update({base_string + 'inputs_node_' + cell_type + '_' + str(node_idx + 2):\n input_nodes_idx})\n\n start = end\n n += 1\n return config",
"def dump(self):\n cfg = {\n \"Detector\" : \n { \n \"8680\": \n {\n \"CONVENTIONAL\": \n {\n \"1.0\": \n {\n \"1x\": (3.87, 8.51),\n \"2.4x\": (1.6, 6.74),\n \"4.9x\": (0.72, 6.23)\n },\n \n \"3.0\": \n {\n \"1x\": (10.12, 14.07),\n \"2.4x\": (4.2, 11.17),\n \"4.9x\": (1.89, 10)\n }\n \n },\n \n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"1x\": (18.97, 32.25),\n \"2.4x\": (7.61, 19.41),\n \"4.9x\": (3.47, 16.31)\n },\n \n \"3.0\" : \n {\n \"1x\": (46.56, 54.01),\n \"2.4x\": (19.82, 33.3),\n \"4.9x\": (8.84, 26.25)\n },\n \n \"5.0\" : \n {\n \"1x\": (46.49, 70.66),\n \"2.4x\": (19.53, 45.11),\n \"4.9x\": (8.9, 35.87)\n },\n \n \"10.0\" : \n {\n \"2.4x\": (22.45, 52.98),\n \"4.9x\": (10.43, 45.37),\n } \n } \n }, \n \n \"8325\": \n {\n \"CONVENTIONAL\": \n {\n \"1.0\": \n {\n \"1x\": (3.98, 8.64),\n \"2.5x\": (1.6, 6.75),\n \"5.1x\": (0.72, 6.23)\n },\n \n \"3.0\": \n {\n \"1x\": (10.45, 14.42),\n \"2.5x\": (4.14, 10.97),\n \"5.1x\": (1.89, 10.24)\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"1\": (19.73, 34.13),\n \"2.5x\": (7.88, 20.49),\n \"5.1x\": (3.54, 16.99)\n }, \n \n \"3.0\" : \n {\n \"1\": (48.23, 54.5),\n \"2.5x\": (19.77, 33.41),\n \"5.1x\": (9.04, 27.84)\n },\n \n \"5.0\" : \n {\n \"1\": (50.66, 77.0),\n \"2.5x\": (20.46, 48.08),\n \"5.1x\": (8.7, 35.5)\n },\n \n \"10.0\" : \n {\n \"2.5x\": (22.44, 53.63),\n \"5.1x\": (11.3, 52.55),\n } \n }\n },\n \n \"10522\": \n {\n \"CONVENTIONAL\": \n {\n \"0.1\": \n {\n \"Gain 1\": (3.37, 8.54),\n \"Gain 2\": (0.8, 3.15),\n },\n \n \"1.0\": \n {\n \"Gain 1\": (3.36, 6.51),\n \"Gain 2\": (0.79, 4.59),\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"Gain 1\": (16.4, 25.1),\n \"Gain 2\": (3.96, 12.3),\n }, \n \n \"10.0\" : \n {\n \"Gain 1\": (16.5, 76.0),\n \"Gain 2\": (4.05, 45.1),\n },\n \n \"20.0\" : \n {\n \"Gain 1\": (17.2, 193.0),\n \"Gain 2\": (4.64, 76.6),\n },\n \n \"30.0\" : \n {\n \"Gain 1\": (18.2, 272.0),\n \"Gain 2\": (5.46, 145.0),\n } \n }\n },\n \n \"10570\": \n {\n \"CONVENTIONAL\": \n {\n \"0.1\": \n {\n \"Gain 1\": (3.31, 8.82),\n \"Gain 2\": (0.8, 3.39),\n },\n \n \"1.0\": \n {\n \"Gain 1\": (3.30, 6.52),\n \"Gain 2\": (0.79, 4.83),\n }\n },\n \n \"EMGAIN\": \n {\n \"1.0\" : \n {\n \"Gain 1\": (16.4, 25.1),\n \"Gain 2\": (4.01, 12.6),\n }, \n \n \"10.0\" : \n {\n \"Gain 1\": (16.5, 85.6),\n \"Gain 2\": (4.06, 42.7),\n },\n \n \"20.0\" : \n {\n \"Gain 1\": (17.5, 142.0),\n \"Gain 2\": (4.81, 76.0),\n },\n \n \"30.0\" : \n {\n \"Gain 1\": (19.3, 256.0),\n \"Gain 2\": (5.88, 166.0),\n } \n }\n }\n },\n \n \"Phot\" :\n {\n \"fwhmpsf\": 6.0,\n \"sigma\": 10.0,\n \"exposure\": \"EXPTIME\",\n \"calgorithm\": \"centroid\",\n \"cbox\" : 8,\n \"maxshift\": 5, \n \"salgorithm\": \"median\",\n \"annulus\": 14,\n \"dannulus\": 16,\n \"apertures\": 12,\n \"zmag\": 27.11\n }\n }\n \n \n # Dump the configuration to json output file\n with open(self.conf_fname, \"w\") as fd:\n json.dump(cfg, fd) \n \n return",
"def GetConfig(self, genname):\n if genname in self.config:\n return self.config[genname]\n else:\n return dbus.Dictionary(signature='sv')",
"def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)",
"def convert_naslib_to_genotype(naslib_object):\n ops_to_genotype = {\n \"Identity\": \"skip_connect\",\n \"FactorizedReduce\": \"skip_connect\",\n \"SepConv3x3\": \"sep_conv_3x3\",\n \"DilConv3x3\": \"dil_conv_3x3\",\n \"SepConv5x5\": \"sep_conv_5x5\",\n \"DilConv5x5\": \"dil_conv_5x5\",\n \"AvgPool\": \"avg_pool_3x3\",\n \"MaxPool\": \"max_pool_3x3\",\n \"Zero\": \"zero\",\n }\n cells = [\n get_cell_of_type(naslib_object, \"normal_cell\"),\n get_cell_of_type(naslib_object, \"reduction_cell\"),\n ]\n converted_cells = []\n for cell in cells:\n edge_op_dict = {\n (i, j): ops_to_genotype[cell.edges[i, j][\"op\"].get_op_name]\n for i, j in cell.edges\n }\n op_edge_list = [\n (edge_op_dict[(i, j)], i - 1)\n for i, j in sorted(edge_op_dict, key=lambda x: x[1])\n if j < 7\n ]\n converted_cells.append(op_edge_list)\n\n return Genotype(\n normal=converted_cells[0],\n normal_concat=[2, 3, 4, 5],\n reduce=converted_cells[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def get_configspace(self):\n cd = self.cd\n sp_dict = {}\n sp_dict['epochs'] = int(cd['epochs'])\n sp_dict['gamma'] = self._get_range_uniform('gamma', cd)\n sp_dict['multilabel'] = self._get_atomic('multilabel', cd)\n sp_dict['lr'] = self._get_range_uniform('lr', cd)\n sp_dict['optimizer'] = self._get_categorical('optimizer', cd)\n sp_dict['n_latent'] = self._get_range_integer('n_latent',cd)\n sp_dict['enc_hidden_dim'] = self._get_range_integer('enc_hidden_dim', cd)\n sp_dict['batch_size'] = self._get_range_integer('batch_size', cd)\n sp_dict['coherence_loss_wt'] = self._get_range_uniform('coherence_loss_wt', cd) or 0.0\n sp_dict['redundancy_loss_wt'] = self._get_range_uniform('redundancy_loss_wt', cd) or 0.0\n sp_dict['num_enc_layers'] = self._get_range_integer('num_enc_layers', cd) or 1\n sp_dict['enc_dr'] = self._get_range_uniform('enc_dr', cd) or 0.0\n sp_dict['covar_net_layers'] = self._get_range_integer('covar_net_layers', cd) or 1\n sp_dict['classifier_dropout'] = self._get_range_uniform('classifier_dropout', cd) or 0.1\n\n embedding_types = cd['embedding']\n embedding_space = [] \n for et in embedding_types:\n if et['source'] == 'random':\n embedding_space.append(ag.space.Dict(**{'source': 'random', 'size': self._get_range_integer('size', et)}))\n else:\n fixed_assigned = et.get('fixed')\n if fixed_assigned is None:\n embedding_space.append(ag.space.Dict(**{'source': et['source'], 'fixed': ag.space.Bool()}))\n else:\n embedding_space.append(ag.space.Dict(**{'source': et['source'], 'fixed': fixed_assigned.lower()}))\n sp_dict['embedding'] = ag.space.Categorical(*embedding_space)\n\n latent_types = cd['latent_distribution']\n latent_space = []\n for lt in latent_types:\n dist_type = lt['dist_type']\n if dist_type == 'vmf':\n latent_space.append(ag.space.Dict(**{'dist_type': 'vmf', 'kappa': self._get_range_uniform('kappa', lt)}))\n elif dist_type == 'logistic_gaussian':\n latent_space.append(ag.space.Dict(**{'dist_type': 'logistic_gaussian', 'alpha': self._get_range_uniform('alpha', lt)}))\n else:\n latent_space.append(ag.space.Dict(**{'dist_type': 'gaussian'}))\n sp_dict['latent_distribution'] = ag.space.Categorical(*latent_space)\n return sp_dict",
"def serialize(self):\n cfg = SeqLib.serialize(self)\n\n cfg[\"variants\"] = dict()\n cfg[\"variants\"][\"wild type\"] = self.wt.serialize()\n cfg[\"variants\"][\"use aligner\"] = self.aligner is not None\n if self.max_mutations != DEFAULT_MAX_MUTATIONS:\n cfg[\"variants\"][\"max mutations\"] = self.max_mutations\n if self.variant_min_count > 0:\n cfg[\"variants\"][\"min count\"] = self.variant_min_count\n\n return cfg",
"def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }",
"def convert_genotype_to_config(genotype):\n base_string = \"NetworkSelectorDatasetInfo:darts:\"\n config = {}\n\n for cell_type in [\"normal\", \"reduce\"]:\n cell = eval(\"genotype.\" + cell_type)\n\n start = 0\n n = 2\n for node_idx in range(4):\n end = start + n\n ops = cell[2 * node_idx: 2 * node_idx + 2]\n\n # get edge idx\n edges = {\n base_string + \"edge_\" + cell_type + \"_\" + str(start + i): op\n for op, i in ops\n }\n config.update(edges)\n\n if node_idx != 0:\n # get node idx\n input_nodes = sorted(list(map(lambda x: x[1], ops)))\n input_nodes_idx = \"_\".join([str(i) for i in input_nodes])\n config.update(\n {\n base_string\n + \"inputs_node_\"\n + cell_type\n + \"_\"\n + str(node_idx + 2): input_nodes_idx\n }\n )\n\n start = end\n n += 1\n return config",
"def from_dict(dict):\n return SIO_SDC(**dict)",
"def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}",
"def convertToDistr(self,qtype,pts):\n return self.convertToDistrDict[qtype](pts)",
"def gen_species(self, model, options, misc_options):\n Avogadro = model.parameters.get_one(id='Avogadro')\n c = model.compartments.get_one(id='c')\n\n # species types\n\n init_concs = {}\n\n # other basic metabolites\n for species_type in options['basic']:\n species_type_structure = wc_lang.ChemicalStructure(value=species_type['structure_string'], format=eval(species_type['structure_format']))\n species_type_structure.empirical_formula = OpenBabelUtils.get_formula(species_type_structure.get_structure())\n species_type_structure.molecular_weight = species_type_structure.empirical_formula.get_molecular_weight()\n species_type_structure.charge = species_type_structure.get_structure().GetTotalCharge()\n model.species_types.create(id=species_type['id'], name=species_type['name'], type=wc_ontology[species_type['type']], structure=species_type_structure)\n init_concs[species_type['id']] = species_type['init_conc'] * Avogadro.value * c.init_volume.mean\n\n # RNA\n mean_gc_frac = options['rna']['mean_gc_frac']\n RNA_BASES = ['A', 'C', 'G', 'U']\n PROB_BASES = [(1 - mean_gc_frac) / 2, mean_gc_frac /2, mean_gc_frac/2, (1-mean_gc_frac)/2]\n\n\n rna_lens = 3 * self.rand(options['rna']['mean_rna_len'], count=options['rna']['num_rna'], min=2)\n for i in range(options['rna']['num_rna']):\n rna_str = 'AUG'\n for j in range(0, rna_lens[i], 3):\n codon = \"\".join(random.choices(RNA_BASES, weights=PROB_BASES, k=3))\n while codon in ['UAA', 'UAG', 'UGA']:\n codon = \"\".join(random.choices(RNA_BASES, weights=PROB_BASES, k=3))\n rna_str += codon\n rna_str += random.choice(['UAA', 'UAG', 'UGA'])\n rna_str_structure = wc_lang.ChemicalStructure(\n value=rna_str,\n format=wc_lang.ChemicalStructureFormat.BpForms,\n alphabet=wc_lang.ChemicalStructureAlphabet.rna)\n rna_str_structure.empirical_formula = rna_str_structure.get_structure().get_formula()\n rna_str_structure.molecular_weight = rna_str_structure.empirical_formula.get_molecular_weight()\n rna_str_structure.charge = rna_str_structure.get_structure().get_charge()\n rna_id = 'rna_'+str(i+1)\n rna = model.species_types.create(id=rna_id,\n name='RNA '+str(i+1),\n type=wc_ontology['WC:RNA'],\n structure=rna_str_structure)\n half_life_rna = model.parameters.create(id='half_life_'+rna_id,\n type=None,\n value=options['rna']['halflife'],\n units=unit_registry.parse_units('s'))\n init_concs[rna_id] = 1\n\n # protein\n codon_translation = misc_options['codon_translation']\n rna_species_types = [species_types for species_types in model.species_types if species_types.type == wc_ontology['WC:RNA']]\n for rna_species_type in rna_species_types:\n rna_str = rna_species_type.structure.value\n prot_str = ''\n for i in range(0, len(rna_str), 3):\n codon = rna_str[i:i+3]\n aa = codon_translation[codon]\n if aa == 'STOP':\n break\n else:\n prot_str += codon_translation[codon]\n prot_str_structure = wc_lang.ChemicalStructure(\n value=prot_str,\n format=wc_lang.ChemicalStructureFormat.BpForms,\n alphabet=wc_lang.ChemicalStructureAlphabet.protein)\n prot_str_structure.empirical_formula = prot_str_structure.get_structure().get_formula()\n prot_str_structure.molecular_weight = prot_str_structure.empirical_formula.get_molecular_weight()\n prot_str_structure.charge = prot_str_structure.get_structure().get_charge()\n prot_id = 'prot_'+rna_species_type.id[4:]\n prot = model.species_types.create(id=prot_id,\n name='Protein '+rna_species_type.id[4:],\n type=wc_ontology['WC:protein'],\n structure=prot_str_structure)\n half_life_prot = model.parameters.create(id='half_life_'+prot_id,\n type=None,\n value=options['protein']['halflife'],\n units=unit_registry.parse_units('s'))\n init_concs[prot_id] = 5\n\n\n\n # enzymes\n for species_type in options['enzymes']:\n enzyme = model.species_types.create(id=species_type['id'],\n name=species_type['name'],\n type=wc_ontology['WC:protein'])\n init_concs[species_type['id']] = species_type['init_conc']\n\n\n # species and initial concentrations\n for model_species_type in model.species_types:\n model_species = model.species.get_or_create(species_type=model_species_type, compartment=c)\n model_species.id = model_species.gen_id()\n conc = model.distribution_init_concentrations.create(species=model_species, mean=init_concs[model_species_type.id], units=unit_registry.parse_units('molecule'))\n conc.id = conc.gen_id()",
"def __init__(self, greentConf=\"greent.conf\",\n config_file=os.path.join (os.path.dirname (__file__), \"rosetta.yml\"),\n override={},\n delete_type_graph=False,\n init_db=False):\n \"\"\" Load the config file and set up a DiGraph representing the types we know \n about and how to transition between them. \"\"\"\n from greent.core import GreenT\n self.debug = False\n self.cache_path = 'rosetta_cache'\n\n logger.debug (\"-- Initialize GreenT service core.\")\n self.core = GreenT (config=greentConf, override=override)\n\n logger.debug (\"-- Loading Rosetta graph schematic config: {0}\".format (config_file))\n with open (config_file, 'r') as stream:\n self.config = yaml.load (stream)\n \n logger.debug (\"-- Initializing vocabulary and curies.\")\n self.curie = {}\n self.to_curie_map = {}\n self.vocab = self.config[\"@vocab\"]\n for k in self.vocab:\n self.to_curie_map[self.vocab[k]] = k\n\n logger.debug (\"-- Initializing Rosetta type graph\")\n self.concepts = self.config[\"@concepts\"]\n self.type_graph = TypeGraph (self.core.service_context)\n\n logger.debug (\"-- Extending curie map with uber_context.\")\n uber = Resource.get_resource_obj (os.path.join (\"jsonld\", \"uber_context.jsonld\"))\n context = uber['@context']\n self.terminate (context)\n for key, value in context.items ():\n self.curie[k] = value\n if isinstance (value, str):\n self.vocab[k] = value\n\n logger.debug (\"-- Merge Identifiers.org vocabulary into Rosetta vocab.\")\n identifiers_org = Resource.get_resource_obj ('identifiers.org.json')\n for module in identifiers_org:\n curie = module['prefix'].upper ()\n url = module['url']\n self.curie[curie] = url\n self.to_curie_map[url] = curie\n self.vocab[curie] = url\n\n if delete_type_graph:\n logger.debug (\"--Deleting type graph\")\n self.type_graph.delete_all ()\n \n if not init_db:\n return\n\n logger.debug (\"--Initialize concept graph metadata and create type nodes.\")\n self.type_graph.set_concept_metadata (self.concepts)\n for k, v in self.vocab.items ():\n if isinstance (v, str):\n self.type_graph.find_or_create (k, v)\n \n logger.debug (\"-- Initializing Rosetta transition graph.\")\n transitions = self.config[\"@transitions\"]\n errors = 0\n for L in transitions:\n for R in transitions[L]:\n if not L in self.vocab:\n errors += 1\n self.log_debug(\"{0} not in vocab.\".format (L))\n continue\n if not R in self.vocab:\n errors += 1\n self.log_debug (\"{0} not in vocab.\".format (R))\n continue\n assert L in self.vocab and R in self.vocab\n transition_dict = transitions[L][R]\n transition_obj = DataStructure.to_named_tuple ('TransitionTuple', transitions[L][R])\n if 'link' in transition_dict and 'op' in transition_dict:\n self.type_graph.add_edge (L, R,\n rel_name=transition_obj.link.upper (),\n predicate=transition_obj.link.upper (),\n op=transition_obj.op)\n if errors > 0:\n logger.error (\"** Encountered {0} errors. exiting.\".format (errors))\n sys.exit (errors)\n \n logger.debug (\"-- Connecting to translator registry to the type graph.\")\n '''\n subscriptions = self.core.translator_registry.get_subscriptions ()\n for s in subscriptions:\n t_a_iri = s[0]\n t_b_iri = s[1]\n method = s[2]\n op = \"translator_registry.{0}\".format (method[\"op\"])\n if not 'link' in method or method['link'] == None:\n link='unknown' #continue\n link = link.upper ()\n t_a = self.make_up_curie (self.unterminate (t_a_iri))\n t_b = self.make_up_curie (self.unterminate (t_b_iri))\n if not t_a:\n logger.debug (\"Unable to find curie for {}\".format (t_b))\n elif not t_b:\n logger.debug (\"Unable to find curie for {}\".format (t_b))\n else:\n self.type_graph.find_or_create (t_a, iri=t_a_iri)\n self.type_graph.find_or_create (t_b, iri=t_b_iri)\n if link and op:\n self.type_graph.add_edge (t_a, t_b, rel_name=link, predicate=link, op=op)\n '''\n from transreg import MethodMetadata\n self.core.translator_registry.set_rosetta (self)\n subscriptions = self.core.translator_registry.get_subscriptions ()\n for sub in subscriptions:\n in_curie = self.to_curie (self.unterminate (sub.in_type))\n out_curie = self.to_curie (self.unterminate (sub.out_type))\n op = \"translator_registry.{0}\".format (sub.op)\n link = sub.predicate if sub.predicate else \"unknown\"\n link = link.upper ()\n if not in_curie:\n logger.debug (\"Unable to find curie for {}\".format (sub.in_type))\n elif not out_curie:\n logger.debug (\"Unable to find curie for {}\".format (sub.out_type))\n else:\n if link and op:\n print (\"--------------> {} {}\".format (in_curie, out_curie))\n try:\n self.type_graph.add_edge (in_curie, out_curie, rel_name=link, predicate=link, op=op)\n except StatusException:\n logger.error(f\"Failed to create edge from {in_curie} to {out_curie}. One of these has an unspecified mapping to a concept\")",
"def test_genbank_to_genome_taxonomy(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '3702',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n ('result', result)\n ref = result[0]['genome_ref']\n self.assertTrue(ref, 'Genome ref exists')\n info = result[0]['genome_info']\n typ = info[2]\n self.assertTrue(typ.startswith('KBaseGenomes.Genome'))\n info_details = info[-1]\n self.assertEqual(info_details['Taxonomy'], (\n \"cellular organisms;Eukaryota;Viridiplantae;\"\n \"Streptophyta;Streptophytina;Embryophyta;Tracheophyta;\"\n \"Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;\"\n \"eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;\"\n \"Brassicales;Brassicaceae;Camelineae;Arabidopsis\"\n ))\n self.assertEqual(info_details['Size'], '697724')\n self.assertEqual(info_details['Source'], 'Genbank')\n self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis')\n self.assertEqual(info_details['GC content'], '0.22479')\n self.assertEqual(info_details['Genetic code'], '11')\n self.assertEqual(info_details['Number of Genome Level Warnings'], '1')\n self.assertEqual(info_details['Source ID'], 'BA000021')\n self.assertEqual(info_details['Number of Protein Encoding Genes'], '20')\n self.assertEqual(info_details['Domain'], 'Eukaryota')\n self.assertTrue(info_details['Assembly Object'])\n self.assertEqual(info_details['Number contigs'], '1')\n self.assertEqual(info_details['Number of CDS'], '20')\n self.assertTrue(info_details['MD5'])",
"def build_phenotype(phenotype_id, adapter):\n phenotype_obj = {}\n phenotype = adapter.hpo_term(phenotype_id)\n if phenotype:\n phenotype_obj[\"phenotype_id\"] = phenotype[\"hpo_id\"]\n phenotype_obj[\"feature\"] = phenotype[\"description\"]\n return phenotype",
"def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict",
"def convert():\n parser = _parser()\n args = parser.parse_args()\n\n logger.setLevel(args.log_level)\n\n with contextlib.ExitStack() as stack:\n data = [stack.enter_context(ctra.formats.oxstats_genotypes(*a))\n for a in ctra.algorithms.kwise(args.load_oxstats, 2)]\n samples = list(itertools.chain.from_iterable(s for _, _, s, _ in data))\n merged = ctra.formats.merge_oxstats([d for _, _, _, d in data])\n if args.num_samples > len(samples):\n logger.error('{} individuals present in OXSTATS data, but {} were specified'.format(len(samples), args.num_samples))\n sys.exit(1)\n elif args.num_samples < len(samples):\n logger.warn('{} individuals present in OXSTATS data, but {} were specified'.format(len(samples), args.num_samples))\n if os.path.exists(args.out) and not args.force:\n logger.error('Output file {} already exists. Not overwriting')\n sys.exit(1)\n outfile = stack.enter_context(h5py.File(args.out, 'w'))\n outfile.create_dataset('dosage', shape=(args.num_samples, args.num_variants), dtype='float32', chunks=args.chunk_size)\n outfile.create_dataset('info', shape=(1, args.num_variants), dtype='float32')\n for j, row in enumerate(merged):\n if j >= args.num_variants:\n logger.warn('{} variants processed, but additional variants are present'.format(j))\n break\n probs = numpy.array([float(x) for x in row[5:]])\n x, y = info(probs)\n outfile['dosage'][:, j] = x\n if not j % 1000:\n logger.debug('{} variants processed'.format(j))\n if j + 1 < args.num_variants:\n logger.error('{} variants present in OXSTATS data, but {} were specified'.format(j, args.num_variants))\n sys.exit(1)",
"def __init__(self, taxid, species_name = None, lineage=None):\n self.genes = dict()\n self.taxid = taxid\n self.species = species_name\n self.lineage = lineage",
"def parse_genotypes(variant, sample_id):\n genotype_fields = variant['format'].split(':')\n genotype_data = variant[sample_id].split(':')\n return dict(zip(genotype_fields, genotype_data))",
"def get_spacenet_dataset():\n ds = AttrDict()\n classes = [\n '__background__', 'building']\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds",
"def as_dict(self):\n species_dict = dict()\n species_dict['force_field'] = self.force_field\n species_dict['is_ts'] = self.is_ts\n if self.e_elect is not None:\n species_dict['e_elect'] = self.e_elect\n if self.e0 is not None:\n species_dict['e0'] = self.e0\n species_dict['arkane_file'] = self.arkane_file\n if self.yml_path is not None:\n species_dict['yml_path'] = self.yml_path\n if self.is_ts:\n species_dict['ts_methods'] = self.ts_methods\n species_dict['ts_guesses'] = [tsg.as_dict() for tsg in self.ts_guesses]\n species_dict['ts_conf_spawned'] = self.ts_conf_spawned\n species_dict['ts_number'] = self.ts_number\n species_dict['ts_report'] = self.ts_report\n species_dict['rxn_label'] = self.rxn_label\n species_dict['successful_methods'] = self.successful_methods\n species_dict['unsuccessful_methods'] = self.unsuccessful_methods\n species_dict['chosen_ts_method'] = self.chosen_ts_method\n species_dict['chosen_ts'] = self.chosen_ts\n if self.run_time is not None:\n species_dict['run_time'] = self.run_time.total_seconds()\n species_dict['t1'] = self.t1\n species_dict['label'] = self.label\n species_dict['long_thermo_description'] = self.long_thermo_description\n species_dict['multiplicity'] = self.multiplicity\n if self.number_of_radicals is not None:\n species_dict['number_of_radicals'] = self.number_of_radicals\n species_dict['charge'] = self.charge\n species_dict['generate_thermo'] = self.generate_thermo\n if self.opt_level is not None:\n species_dict['opt_level'] = self.opt_level\n if self.final_xyz is not None:\n species_dict['final_xyz'] = self.final_xyz\n species_dict['number_of_rotors'] = self.number_of_rotors\n species_dict['rotors_dict'] = self.rotors_dict\n species_dict['external_symmetry'] = self.external_symmetry\n species_dict['optical_isomers'] = self.optical_isomers\n species_dict['neg_freqs_trshed'] = self.neg_freqs_trshed\n if self.conf_is_isomorphic is not None:\n species_dict['conf_is_isomorphic'] = self.conf_is_isomorphic\n if self.bond_corrections is not None:\n species_dict['bond_corrections'] = self.bond_corrections\n if self.mol is not None:\n species_dict['mol'] = self.mol.toAdjacencyList()\n if self.initial_xyz is not None:\n species_dict['initial_xyz'] = self.initial_xyz\n if self.checkfile is not None:\n species_dict['checkfile'] = self.checkfile\n if self.most_stable_conformer is not None:\n species_dict['most_stable_conformer'] = self.most_stable_conformer\n if self.cheap_conformer is not None:\n species_dict['cheap_conformer'] = self.cheap_conformer\n if self.recent_md_conformer is not None:\n species_dict['recent_md_conformer'] = self.recent_md_conformer\n if self.svpfit_output_file is not None:\n species_dict['svpfit_output_file'] = self.svpfit_output_file\n if self._radius is not None:\n species_dict['radius'] = self._radius\n if self.conformers:\n species_dict['conformers'] = self.conformers\n species_dict['conformer_energies'] = self.conformer_energies\n if self.conformers_before_opt is not None:\n species_dict['conformers_before_opt'] = self.conformers_before_opt\n if self.bdes is not None:\n species_dict['bdes'] = self.bdes\n return species_dict",
"def test_human_ens_to_sym(self):\n\n mapper = EnsemblMapper(\n from_type='ensembl', to_type='symbol', host=HOST)\n mapped = mapper.map_ids(['ENSG00000141510', 'ENSG00000012048'])\n\n assert mapped == ['TP53', 'BRCA1']",
"def __init__(self, cfg, tops_type=[3, 5, 10]):\n\n attr_cloth_file = open(cfg.attr_cloth_file).readlines()\n self.attr_idx2name = {}\n for i, line in enumerate(attr_cloth_file[2:]):\n self.attr_idx2name[i] = line.strip('\\n').split()[0]",
"def _gen_config(self, address, direction, attr_dict):\n global DefaultConfig\n global DeviceId\n for key in DefaultConfig.keys():\n self.DMAinstance.__setattr__(key,DefaultConfig[key])\n if direction == DMA_TO_DEV:\n self.DMAinstance.HasS2Mm = 0\n self.DMAinstance.HasMm2S = 1\n elif direction == DMA_BIDIRECTIONAL:\n self.DMAinstance.HasS2Mm = 1\n self.DMAinstance.HasMm2S = 1 \n self._bufPtr = None\n self._TransferInitiated = 0\n if attr_dict is not None:\n if type(attr_dict) == dict:\n for key in attr_dict.keys():\n self.DMAinstance.__setattr__(key,attr_dict[key])\n else:\n print(\"Warning: Expecting 3rd Arg to be a dict.\")\n\n virt = libxlnk.cma_mmap(address,0x10000)\n if virt == -1:\n raise RuntimeError(\"Memory map of driver failed.\")\n self.DMAinstance.BaseAddr = ffi.cast(\"uint32_t *\",virt)\n self.DMAinstance.DeviceId = DeviceId\n DeviceId += 1\n \n for key in DefaultConfig.keys():\n self.Configuration[key] = self.DMAinstance.__getattribute__(key)",
"def variants ( self ) :\n vars = []\n items = [ 'distrib' , 'default' ]\n items += [ 'stat_%s' % d for d in range ( 10 ) ]\n items += [ 'syst_%s' % d for d in range ( 10 ) ]\n \n from ostap.core.core import rootError \n from ostap.logger.logger import logFatal\n \n for item in items :\n if self.__variant == item : continue \n path = os.path.join ( self.__config_run.eosrootdir ,\n self.__config ,\n \"%s_%s.root\" % ( self.__dataset, item ) )\n with logFatal() , rootError () : \n rf = ROOT.TFile.Open ( path , 'READ' , exception = False )\n if rf and rf.IsOpen ( ) :\n vars.append ( item )\n rf.Close() \n \n return tuple ( vars )",
"def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models",
"def convert_genotype_to_naslib(genotype, naslib_object):\n genotype_to_ops = {\n \"skip_connect\": (\"Identity\", \"FactorizedReduce\"),\n \"sep_conv_3x3\": \"SepConv3x3\",\n \"dil_conv_3x3\": \"DilConv3x3\",\n \"sep_conv_5x5\": \"SepConv5x5\",\n \"dil_conv_5x5\": \"DilConv5x5\",\n \"avg_pool_3x3\": \"AvgPool\",\n \"max_pool_3x3\": \"MaxPool\",\n # \"zero\": (\"Zero\"),\n }\n cell_names = [\"normal_cell\", \"reduction_cell\"]\n\n # create a dictionary of edges to ops in the genotype\n edge_op_dict = {\"normal_cell\": {}, \"reduction_cell\": {}}\n for c, cell_type in enumerate([\"normal\", \"reduce\"]):\n cell = eval(\"genotype.\" + cell_type)\n tail = 2\n for i, edge in enumerate(cell):\n if i % 2 == 0:\n tail += 1\n head = edge[1] + 1\n edge_op_dict[cell_names[c]][(head, tail)] = genotype_to_ops[edge[0]]\n\n def add_genotype_op_index(edge):\n # function that adds the op index from genotype to each edge, and deletes the rest\n if (edge.head, edge.tail) in edge_op_dict[edge.data.cell_name]:\n for i, op in enumerate(edge.data.op):\n if (\n op.get_op_name\n in edge_op_dict[edge.data.cell_name][(edge.head, edge.tail)]\n ):\n index = i\n break\n edge.data.set(\"op_index\", index, shared=True)\n else:\n edge.data.delete()\n\n def update_ops(edge):\n # function that replaces the primitive ops at the edges with the ones from genotype\n if isinstance(edge.data.op, list):\n primitives = edge.data.op\n else:\n primitives = edge.data.primitives\n\n edge.data.set(\"op\", primitives[edge.data.op_index])\n edge.data.set(\"primitives\", primitives) # store for later use\n\n naslib_object.update_edges(\n add_genotype_op_index,\n scope=naslib_object.OPTIMIZER_SCOPE,\n private_edge_data=False,\n )\n\n naslib_object.update_edges(\n update_ops, scope=naslib_object.OPTIMIZER_SCOPE, private_edge_data=True\n )"
]
| [
"0.5696881",
"0.5341971",
"0.5267143",
"0.52451855",
"0.52086383",
"0.5163825",
"0.510316",
"0.5082286",
"0.5014554",
"0.49936697",
"0.49704665",
"0.49327058",
"0.4817968",
"0.47674105",
"0.47186384",
"0.47139776",
"0.4705163",
"0.4680166",
"0.4668757",
"0.46615347",
"0.46294394",
"0.46168363",
"0.46058896",
"0.4587827",
"0.45862588",
"0.45856914",
"0.45809487",
"0.45664626",
"0.4558051",
"0.45576137"
]
| 0.60542524 | 0 |
Converts Genotype to the compact representation | def convert_genotype_to_compact(genotype):
OPS = [
"max_pool_3x3",
"avg_pool_3x3",
"skip_connect",
"sep_conv_3x3",
"sep_conv_5x5",
"dil_conv_3x3",
"dil_conv_5x5",
]
compact = []
for i, cell_type in enumerate(["normal", "reduce"]):
cell = eval("genotype." + cell_type)
compact.append([])
for j in range(8):
compact[i].append((cell[j][1], OPS.index(cell[j][0])))
compact_tuple = (tuple(compact[0]), tuple(compact[1]))
return compact_tuple | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_compact_to_genotype(compact):\n OPS = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\",\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n ]\n genotype = []\n\n for i in range(2):\n cell = compact[i]\n genotype.append([])\n\n for j in range(8):\n genotype[i].append((OPS[cell[j][1]], cell[j][0]))\n\n return Genotype(\n normal=genotype[0],\n normal_concat=[2, 3, 4, 5],\n reduce=genotype[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def fromgenotype(self):\n\t\tpass",
"def toGenomeRepresentation(self):\n s = \"\"\n s += str(self.axiom)\n s += \"||\"+str(self.niterations) # The iterations must be shown as well\n for prod in self.productions:\n s += \"||\"\n s += prod.toGenomeRepresentation()\n return s",
"def genotype(self):\n\t\tgenotype = \"\"\n\t\tfields = vars(self)\n\t\tfor name, field in fields.items():\n\t\t\tif isinstance(field, Pattern):\n\t\t\t\tgenotype += field.genotype()\n\t\t\telse:\n\t\t\t\tgenotype += str(field)\n\t\t\tgenotype += \"\\0\"\n\n\t\treturn genotype",
"def all_genotype(ploidy):\n return [\"\".join(comb) for comb in cwr(\"ACGT-\", ploidy)]",
"def genotypes(self):\n return self.data.genotypes.values",
"def __str__(self):\n return self.fam.c_str(self)",
"def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out",
"def __str__(self):\n\n return self.fam.c_str(self)",
"def to_motevo(self):\n m = \"//\\n\"\n m += \"NA {}\\n\".format(self.id)\n m += \"P0\\tA\\tC\\tG\\tT\\n\"\n for i, row in enumerate(self.pfm):\n m += \"{}\\t{}\\n\".format(i, \"\\t\".join([str(int(x)) for x in row]))\n m += \"//\"\n return m",
"def __str__(self):\n return self.make_flat()",
"def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene",
"def stringify_short(self):\n return self.stringify()",
"def convertToSpectroGram(self):",
"def __str__(self) -> str:\n\n return 'Structure with {} atoms of types {}'\\\n .format(self.nat, set(self.species_labels))",
"def _raw_to_string(self, dtype, units='Angstrom', atom_format=None, ghost_format=None, width=17, prec=12):\n\n #molrec = self.to_dict(force_units=units, np_out=True)\n molrec = self.to_dict(np_out=True)\n smol = molparse.to_string(molrec,\n dtype=dtype,\n units=units,\n atom_format=atom_format,\n ghost_format=ghost_format,\n width=width,\n prec=prec)\n return smol",
"def stringify(self):\n string = self.chars[\"type\"] + \" \"\n \n # current hearts\n for _ in range(self.hearts):\n string += self.chars[\"heart\"]\n\n # dead hearts\n for _ in range(3 - self.hearts):\n string += self.chars[\"noheart\"]\n\n return string",
"def __str__(self):\n\n\t\tterms = \\\n\t\t[{att:self.att[att]} for att in self.att if type(self.att[att])==bool]\n\n\t\treturn ''.join([str(int(t.values()[0])) for t in terms])",
"def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st",
"def to_string(self, smirnoff_data):\n pass",
"def convert_naslib_to_genotype(naslib_object):\n ops_to_genotype = {\n \"Identity\": \"skip_connect\",\n \"FactorizedReduce\": \"skip_connect\",\n \"SepConv3x3\": \"sep_conv_3x3\",\n \"DilConv3x3\": \"dil_conv_3x3\",\n \"SepConv5x5\": \"sep_conv_5x5\",\n \"DilConv5x5\": \"dil_conv_5x5\",\n \"AvgPool\": \"avg_pool_3x3\",\n \"MaxPool\": \"max_pool_3x3\",\n \"Zero\": \"zero\",\n }\n cells = [\n get_cell_of_type(naslib_object, \"normal_cell\"),\n get_cell_of_type(naslib_object, \"reduction_cell\"),\n ]\n converted_cells = []\n for cell in cells:\n edge_op_dict = {\n (i, j): ops_to_genotype[cell.edges[i, j][\"op\"].get_op_name]\n for i, j in cell.edges\n }\n op_edge_list = [\n (edge_op_dict[(i, j)], i - 1)\n for i, j in sorted(edge_op_dict, key=lambda x: x[1])\n if j < 7\n ]\n converted_cells.append(op_edge_list)\n\n return Genotype(\n normal=converted_cells[0],\n normal_concat=[2, 3, 4, 5],\n reduce=converted_cells[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def collapse_genotypes(pL,gL):\n if len(gL) < 2:\n return gL\n else:\n uniqueL = [] # list of unique genotypes relative to ploidy\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL",
"def __str__(self):\n l = [\"MARKING DUMP BEGIN\\n\"]\n for place_name, place_type in self.place_types.items():\n l.append(place_name)\n l.append(\" \\t\")\n l.append(str(place_type.info.type))\n l.append(\" \\tonesafe\") if place_type.info.one_safe else l.append(\"\")\n l.append(\"\\n\")\n l.append(\"MARKING DUMP END\\n\")\n return \"\".join(l)",
"def __str__(self):\n tag = []\n for key in self.tags:\n if key == 'label':\n self.type = self.tags[key]\n else:\n try:\n tag.append(\"%s=%0.3f\" % (str(key), self.tags[key]))\n except TypeError:\n tag.append(\"%s=%s\" % (str(key), str(self.tags[key])))\n \n \n tag = \";\".join(tag)\n dat = [self.chrom, self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end, self.type, self.size, \\\n tag]\n\n return \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\".format(*dat) \\\n .replace(\"None\", \".\")",
"def to_representation(self) -> str:\n raise NotImplementedError()",
"def get_all_possible_genotypes(self):\n # Get all genotypes.\n return mutations_to_genotypes(self.mutations, wildtype=self.wildtype)",
"def serotype_escherichia(metadata, analysistype):\n for sample in metadata:\n # Initialise negative results to be overwritten when necessary\n sample[analysistype].best_o_pid = '-'\n sample[analysistype].o_genes = ['-']\n sample[analysistype].o_set = ['-']\n sample[analysistype].best_h_pid = '-'\n sample[analysistype].h_genes = ['-']\n sample[analysistype].h_set = ['-']\n if sample.general.bestassemblyfile != 'NA':\n if sample.general.closestrefseqgenus in ['Escherichia', 'Shigella']:\n o = dict()\n h = dict()\n for result, percentid in sample[analysistype].blastresults.items():\n if 'O' in result.split('_')[-1]:\n o.update({result: float(percentid)})\n if 'H' in result.split('_')[-1]:\n h.update({result: float(percentid)})\n # O\n try:\n sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_o_pid = str(sorted_o[0][1])\n\n sample[analysistype].o_genes = [gene for gene, pid in o.items()\n if str(pid) == sample[analysistype].best_o_pid]\n sample[analysistype].o_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].o_genes))\n except (KeyError, IndexError):\n pass\n # H\n try:\n sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_h_pid = str(sorted_h[0][1])\n sample[analysistype].h_genes = [gene for gene, pid in h.items()\n if str(pid) == sample[analysistype].best_h_pid]\n sample[analysistype].h_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].h_genes))\n except (KeyError, IndexError):\n pass\n return metadata",
"def __str__(self):\n return (str(self.chromosome_id) + '. Chromosome: Genes: ' + str(\n self.genes) + '; Fitness: ' + str(self.fitness_value))",
"def __repr__(self):\n return repr((self.__fitness, self.__genes))",
"def compact(self):\n raise NotImplementedError"
]
| [
"0.6532562",
"0.5913489",
"0.57653874",
"0.561887",
"0.54874766",
"0.54294086",
"0.5336293",
"0.52914166",
"0.5279694",
"0.5225547",
"0.5195389",
"0.5188905",
"0.5179097",
"0.51273793",
"0.5098214",
"0.50762075",
"0.5054869",
"0.5048605",
"0.5029797",
"0.5024353",
"0.50183964",
"0.5016719",
"0.5000865",
"0.49773595",
"0.49674714",
"0.49613982",
"0.49575698",
"0.49520656",
"0.4943103",
"0.4941354"
]
| 0.7310524 | 0 |
Converts the compact representation to a Genotype | def convert_compact_to_genotype(compact):
OPS = [
"max_pool_3x3",
"avg_pool_3x3",
"skip_connect",
"sep_conv_3x3",
"sep_conv_5x5",
"dil_conv_3x3",
"dil_conv_5x5",
]
genotype = []
for i in range(2):
cell = compact[i]
genotype.append([])
for j in range(8):
genotype[i].append((OPS[cell[j][1]], cell[j][0]))
return Genotype(
normal=genotype[0],
normal_concat=[2, 3, 4, 5],
reduce=genotype[1],
reduce_concat=[2, 3, 4, 5],
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_genotype_to_compact(genotype):\n OPS = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\",\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n ]\n compact = []\n\n for i, cell_type in enumerate([\"normal\", \"reduce\"]):\n cell = eval(\"genotype.\" + cell_type)\n compact.append([])\n\n for j in range(8):\n compact[i].append((cell[j][1], OPS.index(cell[j][0])))\n\n compact_tuple = (tuple(compact[0]), tuple(compact[1]))\n return compact_tuple",
"def fromgenotype(self):\n\t\tpass",
"def toGenomeRepresentation(self):\n s = \"\"\n s += str(self.axiom)\n s += \"||\"+str(self.niterations) # The iterations must be shown as well\n for prod in self.productions:\n s += \"||\"\n s += prod.toGenomeRepresentation()\n return s",
"def convert_naslib_to_genotype(naslib_object):\n ops_to_genotype = {\n \"Identity\": \"skip_connect\",\n \"FactorizedReduce\": \"skip_connect\",\n \"SepConv3x3\": \"sep_conv_3x3\",\n \"DilConv3x3\": \"dil_conv_3x3\",\n \"SepConv5x5\": \"sep_conv_5x5\",\n \"DilConv5x5\": \"dil_conv_5x5\",\n \"AvgPool\": \"avg_pool_3x3\",\n \"MaxPool\": \"max_pool_3x3\",\n \"Zero\": \"zero\",\n }\n cells = [\n get_cell_of_type(naslib_object, \"normal_cell\"),\n get_cell_of_type(naslib_object, \"reduction_cell\"),\n ]\n converted_cells = []\n for cell in cells:\n edge_op_dict = {\n (i, j): ops_to_genotype[cell.edges[i, j][\"op\"].get_op_name]\n for i, j in cell.edges\n }\n op_edge_list = [\n (edge_op_dict[(i, j)], i - 1)\n for i, j in sorted(edge_op_dict, key=lambda x: x[1])\n if j < 7\n ]\n converted_cells.append(op_edge_list)\n\n return Genotype(\n normal=converted_cells[0],\n normal_concat=[2, 3, 4, 5],\n reduce=converted_cells[1],\n reduce_concat=[2, 3, 4, 5],\n )",
"def test_genbank_to_genome_taxonomy(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '3702',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n ('result', result)\n ref = result[0]['genome_ref']\n self.assertTrue(ref, 'Genome ref exists')\n info = result[0]['genome_info']\n typ = info[2]\n self.assertTrue(typ.startswith('KBaseGenomes.Genome'))\n info_details = info[-1]\n self.assertEqual(info_details['Taxonomy'], (\n \"cellular organisms;Eukaryota;Viridiplantae;\"\n \"Streptophyta;Streptophytina;Embryophyta;Tracheophyta;\"\n \"Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;\"\n \"eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;\"\n \"Brassicales;Brassicaceae;Camelineae;Arabidopsis\"\n ))\n self.assertEqual(info_details['Size'], '697724')\n self.assertEqual(info_details['Source'], 'Genbank')\n self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis')\n self.assertEqual(info_details['GC content'], '0.22479')\n self.assertEqual(info_details['Genetic code'], '11')\n self.assertEqual(info_details['Number of Genome Level Warnings'], '1')\n self.assertEqual(info_details['Source ID'], 'BA000021')\n self.assertEqual(info_details['Number of Protein Encoding Genes'], '20')\n self.assertEqual(info_details['Domain'], 'Eukaryota')\n self.assertTrue(info_details['Assembly Object'])\n self.assertEqual(info_details['Number contigs'], '1')\n self.assertEqual(info_details['Number of CDS'], '20')\n self.assertTrue(info_details['MD5'])",
"def genotype(self):\n\t\tgenotype = \"\"\n\t\tfields = vars(self)\n\t\tfor name, field in fields.items():\n\t\t\tif isinstance(field, Pattern):\n\t\t\t\tgenotype += field.genotype()\n\t\t\telse:\n\t\t\t\tgenotype += str(field)\n\t\t\tgenotype += \"\\0\"\n\n\t\treturn genotype",
"def convertToSpectroGram(self):",
"def genotypes(self):\n return self.data.genotypes.values",
"def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene",
"def _raw_to_string(self, dtype, units='Angstrom', atom_format=None, ghost_format=None, width=17, prec=12):\n\n #molrec = self.to_dict(force_units=units, np_out=True)\n molrec = self.to_dict(np_out=True)\n smol = molparse.to_string(molrec,\n dtype=dtype,\n units=units,\n atom_format=atom_format,\n ghost_format=ghost_format,\n width=width,\n prec=prec)\n return smol",
"def all_genotype(ploidy):\n return [\"\".join(comb) for comb in cwr(\"ACGT-\", ploidy)]",
"def _extract_genotype(geno_field):\n # Assume the genotype is the first format field and raise if it's not\n geno = geno_field.split(':')[0]\n if not GENO_REGEX.search(geno):\n raise ValueError('\"{}\" does not look like a genotype'.format(geno))\n return geno",
"def genotype(self, arch):\n backbone_r, neck_r = arch\n return (backbone_r.genotype, neck_r.genotype if neck_r is not None else None)",
"def convert_genotype_to_naslib(genotype, naslib_object):\n genotype_to_ops = {\n \"skip_connect\": (\"Identity\", \"FactorizedReduce\"),\n \"sep_conv_3x3\": \"SepConv3x3\",\n \"dil_conv_3x3\": \"DilConv3x3\",\n \"sep_conv_5x5\": \"SepConv5x5\",\n \"dil_conv_5x5\": \"DilConv5x5\",\n \"avg_pool_3x3\": \"AvgPool\",\n \"max_pool_3x3\": \"MaxPool\",\n # \"zero\": (\"Zero\"),\n }\n cell_names = [\"normal_cell\", \"reduction_cell\"]\n\n # create a dictionary of edges to ops in the genotype\n edge_op_dict = {\"normal_cell\": {}, \"reduction_cell\": {}}\n for c, cell_type in enumerate([\"normal\", \"reduce\"]):\n cell = eval(\"genotype.\" + cell_type)\n tail = 2\n for i, edge in enumerate(cell):\n if i % 2 == 0:\n tail += 1\n head = edge[1] + 1\n edge_op_dict[cell_names[c]][(head, tail)] = genotype_to_ops[edge[0]]\n\n def add_genotype_op_index(edge):\n # function that adds the op index from genotype to each edge, and deletes the rest\n if (edge.head, edge.tail) in edge_op_dict[edge.data.cell_name]:\n for i, op in enumerate(edge.data.op):\n if (\n op.get_op_name\n in edge_op_dict[edge.data.cell_name][(edge.head, edge.tail)]\n ):\n index = i\n break\n edge.data.set(\"op_index\", index, shared=True)\n else:\n edge.data.delete()\n\n def update_ops(edge):\n # function that replaces the primitive ops at the edges with the ones from genotype\n if isinstance(edge.data.op, list):\n primitives = edge.data.op\n else:\n primitives = edge.data.primitives\n\n edge.data.set(\"op\", primitives[edge.data.op_index])\n edge.data.set(\"primitives\", primitives) # store for later use\n\n naslib_object.update_edges(\n add_genotype_op_index,\n scope=naslib_object.OPTIMIZER_SCOPE,\n private_edge_data=False,\n )\n\n naslib_object.update_edges(\n update_ops, scope=naslib_object.OPTIMIZER_SCOPE, private_edge_data=True\n )",
"def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes",
"def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)",
"def genotype(self, normal_primitives, reduce_primitives):\n\n def _parse_proxyless(weights, primitives):\n # Find the best op in this weight.\n k_best = np.argmax(weights, axis=1)\n return [primitives[k] for k in k_best]\n\n _parse = _parse_proxyless\n gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy(), normal_primitives)\n gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy(), reduce_primitives)\n alphas_normal = self.alphas_normal\n alphas_reduce = self.alphas_reduce\n genotype = Genotype(\n normal=gene_normal,\n reduce=gene_reduce,\n alphas_normal=alphas_normal,\n alphas_reduce=alphas_reduce,\n )\n return genotype",
"def fromGenomeRepresentation(self,genome):\n self.clear()\n #print(genome)\n tokens = genome.split(\"||\")\n self.setAxiomFromString(tokens[0])\n self.setIterations(int(tokens[1]))\n for i in range(2,len(tokens)): self.addProductionFromGenomeRepresentation(tokens[i])",
"def __str__(self):\n return self.fam.c_str(self)",
"def __str__(self):\n out = self.san\n if self.comment != \"\":\n out += \" {\" + self.comment.replace('\\n', ' ') + \"}\"\n if len(self.nags) > 0:\n for n in self.nags:\n out += \" \" + n\n for v in self.variations:\n out += \" (\" + str(v).strip(' ') + \")\"\n return out",
"def serotype_escherichia(metadata, analysistype):\n for sample in metadata:\n # Initialise negative results to be overwritten when necessary\n sample[analysistype].best_o_pid = '-'\n sample[analysistype].o_genes = ['-']\n sample[analysistype].o_set = ['-']\n sample[analysistype].best_h_pid = '-'\n sample[analysistype].h_genes = ['-']\n sample[analysistype].h_set = ['-']\n if sample.general.bestassemblyfile != 'NA':\n if sample.general.closestrefseqgenus in ['Escherichia', 'Shigella']:\n o = dict()\n h = dict()\n for result, percentid in sample[analysistype].blastresults.items():\n if 'O' in result.split('_')[-1]:\n o.update({result: float(percentid)})\n if 'H' in result.split('_')[-1]:\n h.update({result: float(percentid)})\n # O\n try:\n sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_o_pid = str(sorted_o[0][1])\n\n sample[analysistype].o_genes = [gene for gene, pid in o.items()\n if str(pid) == sample[analysistype].best_o_pid]\n sample[analysistype].o_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].o_genes))\n except (KeyError, IndexError):\n pass\n # H\n try:\n sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_h_pid = str(sorted_h[0][1])\n sample[analysistype].h_genes = [gene for gene, pid in h.items()\n if str(pid) == sample[analysistype].best_h_pid]\n sample[analysistype].h_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].h_genes))\n except (KeyError, IndexError):\n pass\n return metadata",
"def __str__(self):\n\n return self.fam.c_str(self)",
"def provn_representation(self):\n return \"'%s'\" % self._str",
"def to_representation(self, data): # lint-amnesty, pylint: disable=arguments-differ\n return str(data)",
"def to_representation(self, data): # lint-amnesty, pylint: disable=arguments-differ\n return str(data)",
"def deconstruct_single(self, op):\n bom = str(PlatformVar(\"bom\"))\n int_size = int(self.__size)\n if 1 == int_size:\n return struct.pack(bom + \"B\", op)\n if 2 == int_size:\n if 0 > op:\n return struct.pack(bom + \"h\", op)\n else:\n return struct.pack(bom + \"H\", op)\n elif 4 == int_size:\n if 0 > op:\n return struct.pack(bom + \"i\", op)\n else:\n return struct.pack(bom + \"I\", op)\n elif 8 == int_size:\n if 0 > op:\n return struct.pack(bom + \"q\", op)\n else:\n return struct.pack(bom + \"Q\", op)\n raise RuntimeError(\"cannot pack value of size %i\" % (int_size))",
"def field_to_s(self, fieldname, tag = False):\n fieldname = self.__class__.FIELD_ALIAS.get(fieldname, fieldname)\n v = self._data.get(fieldname, None)\n if v is None:\n raise gfapy.NotFoundError(\"Field {} not found\".format(fieldname))\n t = self._field_or_default_datatype(fieldname, v)\n if not isinstance(v, str):\n v = gfapy.Field._to_gfa_field(v, datatype = t, fieldname = fieldname,\n line = self)\n if self.vlevel >= 2:\n gfapy.Field._validate_gfa_field(v, t, fieldname)\n if tag:\n return gfapy.Field._to_gfa_tag(v, fieldname, datatype = t, line = self)\n else:\n return v",
"def gon(self):\n return dec2gon(self.dec())",
"def gon(self):\n return dec2gon(self.dec())",
"def GenomeSerialization(genome_class):\n\ttest_genome = genome_class()\n\torig_genome = str(test_genome)\n\tserialized_genome = test_genome.serialize()\n\ttest_genome.deserialize(serialized_genome)\n\tprocessed_genome = str(test_genome)\n\treturn (orig_genome != processed_genome)"
]
| [
"0.6472279",
"0.6390739",
"0.5686235",
"0.5626062",
"0.56175274",
"0.5533054",
"0.5452725",
"0.53005815",
"0.5274307",
"0.5267956",
"0.5214343",
"0.51300037",
"0.5117317",
"0.5101243",
"0.5099853",
"0.5059417",
"0.50422156",
"0.50226486",
"0.50093687",
"0.49619627",
"0.49587175",
"0.49302396",
"0.4926566",
"0.4910359",
"0.4910359",
"0.49080083",
"0.49029955",
"0.49019614",
"0.49019614",
"0.48986834"
]
| 0.69796747 | 0 |
Dump coordinates of a geometry in the same order as data packing | def dump_coords(geom): # -> Any | list[Unknown]:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def coords_to_structure(self) -> None:\n ...",
"def __dump_point(obj, big_endian):\n wkb_string = b''\n\n if big_endian:\n wkb_string += BIG_ENDIAN\n else:\n wkb_string += LITTLE_ENDIAN\n\n coords = obj['coordinates']\n num_dims = len(coords)\n if num_dims == 2:\n type_byte_str = __WKB['2D']['Point']\n elif num_dims == 3:\n type_byte_str = __WKB['Z']['Point']\n elif num_dims == 4:\n type_byte_str = __WKB['ZM']['Point']\n else:\n pass\n # TODO: raise\n\n if not big_endian:\n # reverse the byte ordering for little endian\n type_byte_str = type_byte_str[::-1]\n wkb_string += type_byte_str\n\n if big_endian:\n byte_fmt = '>'\n else:\n byte_fmt = '<'\n byte_fmt += 'd' * num_dims\n\n wkb_string += struct.pack(byte_fmt, *coords)\n return wkb_string",
"def dump(self):\r\n return map(int, (self.left, self.top, self.right, self.bottom))",
"def to_xml(self):\n # lines = super(FileCatNoEmpty, self).cat(filepath)\n structure = super(Point, self).to_xml()\n\n\n coords = GeometryTopologyData.__to_xml_vector__(self.coordinate, self.format)\n # description_str = ''\n # if self.description is not None:\n # description_str = '<Description>%s</Description>' % self.description\n\n return '<Point>%s<Coordinate>%s</Coordinate></Point>' % (structure, coords)",
"def _write_coord(parameters):\n # Reorder elements\n if parameters[\"elements_order\"] is not None:\n order = parameters[\"elements_order\"]\n else:\n order = parameters[\"elements\"].keys()\n\n # Format\n fmt = block_to_format[\"COORD\"]\n fmt = str2format(fmt)\n\n out = []\n for k in order:\n values = parameters[\"elements\"][k][\"center\"]\n out += write_record(values, fmt)\n\n return out",
"def to_xml(self):\n output = '<?xml version=\"1.0\" encoding=\"utf8\"?><GeometryTopologyData>'\n if self.num_dimensions != 0:\n output += ('<NumDimensions>%i</NumDimensions>' % self.num_dimensions)\n\n output += ('<CoordinateSystem>%s</CoordinateSystem>' % self.__coordinate_system_to_str__(self.coordinate_system))\n\n if self.lps_to_ijk_transformation_matrix is not None:\n output += self.__write_transformation_matrix__(self.lps_to_ijk_transformation_matrix)\n\n # Concatenate points (sort first)\n self.points.sort(key=lambda p: p.__id__)\n points = \"\".join(map(lambda i:i.to_xml(), self.points))\n # Concatenate bounding boxes\n bounding_boxes = \"\".join(map(lambda i:i.to_xml(), self.bounding_boxes))\n\n return output + points + bounding_boxes + \"</GeometryTopologyData>\"",
"def print_xyz(atoms,coordinates,filename):\n coordinates = [[w / angtobh for w in ww] for ww in coordinates] #bh to ang\n xyz = open(filename,\"a\")\n xyz.write(str(len(atoms)))\n xyz.write(\"\\nOptimizer geometry\\n\")\n for i in xrange(len(atoms)):\n\txyz.write(atoms[i] + ' ')\n\txyz.write(\" \".join(str(f) for f in coordinates[i]))\n\txyz.write(\"\\n\")\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n xyz.close()",
"def _write_coords(self, size, card_writer):\n msg = []\n if len(self.coords) > 1:\n msg.append('$COORDS\\n')\n for (unused_id, coord) in sorted(self.coords.iteritems()):\n if unused_id != 0:\n msg.append(coord.write_bdf(size, card_writer))\n return ''.join(msg)",
"def __str__(self):\n\t\treturn '<' + str(self._coords)[1:-1] + '>'",
"def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data",
"def __str__(self):\n return '<{}>'.format(self._coords)",
"def get_xyz(self)->str:\n xyz_str = str(len(self.POSITION)) + \"\\n\"\n xyz_str += \"# \"+str(self.TITLE.content[0])\n xyz_str += \"# exported wit PyGromosTools\\n\"\n xyz_format = \"{:<3}\\t{:> 3.9f} {:> 3.9f} {:> 3.9f}\\n\"\n\n for position in self.POSITION:\n xyz_line = xyz_format.format(position.atomType[0], position.xp * 10, position.yp * 10, position.zp * 10)\n xyz_str += xyz_line\n\n return xyz_str",
"def dump(self, data_points):\n print(data_points)",
"def to_ghost_xyz(self):\n\n string = \"\"\n\n # add each atom to the string\n for atom in self.get_atoms():\n string += atom.to_ghost_xyz() + \"\\n\"\n\n return string",
"def dump(points, filename):\n with open(filename, 'w') as f:\n for i, pts in enumerate(points):\n for x, y in pts:\n f.write(f\"{x:.3f},{y:.3f},{i}\\n\")\n print(f\"Dumping data to {filename}...\")",
"def dumpCoordinatesToFile(self, coordinate_ids): \n coordinate_ids_dump_file = open(self.COORDINATES_DUMP_FNAME , 'w')\n pickle.dump(coordinate_ids, coordinate_ids_dump_file)\n coordinate_ids_dump_file.close()",
"def coordinates(self):",
"def __str__(self):\n return '< {coords} >'.format(coords=self._coords)",
"def footprint(self):\n # coords format: 'lon lat lon lat....'\n geojson = self._createGeoJSON()\n return geojson_to_wkt(geojson, 14)",
"def dump(\n self,\n display=True,\n _omit_properties=None,\n field=None,\n key=\"\",\n _level=0,\n _title=None,\n _construct_names=None,\n _auxiliary_coordinates=None,\n _dimension_coordinates=None,\n ):\n indent0 = \" \" * _level\n indent1 = \" \" * (_level + 1)\n\n if _title is None:\n string = [\n f\"{indent0}Coordinate Reference: {self.identity(default='')}\"\n ]\n else:\n string = [indent0 + _title]\n\n # Coordinate conversion parameter-valued terms\n coordinate_conversion = self.get_coordinate_conversion()\n for term, value in sorted(coordinate_conversion.parameters().items()):\n string.append(f\"{indent1}Coordinate conversion:{term} = {value}\")\n\n # Coordinate conversion domain ancillary-valued terms\n if _construct_names:\n for term, key in sorted(\n coordinate_conversion.domain_ancillaries().items()\n ):\n if key in _construct_names:\n construct_name = (\n \"Domain Ancillary: \"\n + _construct_names.get(key, f\"key:{key}\")\n )\n else:\n construct_name = \"\"\n\n string.append(\n f\"{indent1}Coordinate conversion:{term} = {construct_name}\"\n )\n else:\n for term, value in sorted(\n coordinate_conversion.domain_ancillaries().items()\n ):\n string.append(\n f\"{indent1}Coordinate conversion:{term} = {value}\"\n )\n\n # Datum parameter-valued terms\n datum = self.get_datum()\n for term, value in sorted(datum.parameters().items()):\n string.append(f\"{indent1}Datum:{term} = {value}\")\n\n # Coordinates\n if _construct_names:\n for key in sorted(self.coordinates(), reverse=True):\n coord_name = _construct_names.get(key, f\"key:{key}\")\n coord = f\"{coord_name}\"\n if key in _dimension_coordinates:\n coord = \"Dimension Coordinate: \" + coord\n elif key in _auxiliary_coordinates:\n coord = \"Auxiliary Coordinate: \" + coord\n\n string.append(f\"{indent1}{coord}\")\n else:\n for identifier in sorted(self.coordinates()):\n string.append(f\"{indent1}Coordinate: {identifier}\")\n\n return \"\\n\".join(string)",
"def exportDT(self):\n # Filter out coordinates in the extended BBox\n coord = self.coords[4:]\n\n # Filter out triangles with any vertex in the extended BBox\n tris = [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]\n return coord, tris",
"def __str__(self):\r\n return (str(self.blockNum) + \" \" + str(self.coords))",
"def extractCoords(self):\n if not self.rank:\n logging.info('Extracting atomic poitions')\n\n # Extract coordinates from liggghts\n self.lmp.command('variable x atom x')\n x = Rxn.lmp.extract_variable(\"x\", \"group1\", 1)\n\n self.lmp.command('variable y atom y')\n y = Rxn.lmp.extract_variable(\"y\", \"group1\", 1)\n\n self.lmp.command('variable z atom z')\n z = Rxn.lmp.extract_variable(\"z\", \"group1\", 1)\n\n coords = np.zeros((self.lmp.get_natoms(),3))\n\n for i in range(self.lmp.get_natoms()):\n coords[i,:] = x[i], y[i], z[i]\n\n self.lmp.command('variable x delete')\n self.lmp.command('variable y delete')\n self.lmp.command('variable z delete')\n\n return coords",
"def GetCoordinates(XSGeometry, Dbf):\n BedLevel = XSGeometry[0]\n BankLeftLevel = XSGeometry[1]\n BankRightLevel = XSGeometry[2]\n InterPLHeight = XSGeometry[3]\n InterPRHeight = XSGeometry[4]\n Bl = XSGeometry[5]\n Br = XSGeometry[6]\n xl = XSGeometry[7]\n yl = XSGeometry[8]\n xr = XSGeometry[9]\n yr = XSGeometry[10]\n B = XSGeometry[11]\n\n Xcoords = list()\n Ycoords = list()\n Zcoords = list()\n # point 1\n Xcoords.append(xl)\n Ycoords.append(yl)\n Zcoords.append(BankLeftLevel)\n # 8 points cross sections\n if Dbf != False:\n # point 2\n Xcoords.append(xl)\n Ycoords.append(yl)\n Zcoords.append(BedLevel + Dbf + InterPLHeight)\n # point 3\n Xcoords.append(xl + (Bl / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + (Bl / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel + Dbf)\n # point 4\n Xcoords.append(xl + (Bl / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + (Bl / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 5\n Xcoords.append(xl + ((Bl+B) / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + ((Bl+B) / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 6\n Xcoords.append(xl + ((Bl+B) / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + ((Bl+B) / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel + Dbf)\n # point 7\n Xcoords.append(xr)\n Ycoords.append(yr)\n Zcoords.append(BedLevel + Dbf + InterPRHeight)\n else:\n # point 2\n Xcoords.append(xl)\n Ycoords.append(yl)\n Zcoords.append(BedLevel + InterPLHeight)\n # point 3\n Xcoords.append(xl + (Bl / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + (Bl / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 4\n Xcoords.append(xl + ((Bl+B) / (Bl + Br + B)) * (xr - xl))\n Ycoords.append(yl + ((Bl+B) / (Bl + Br + B)) * (yr - yl))\n Zcoords.append(BedLevel)\n # point 5\n Xcoords.append(xr)\n Ycoords.append(yr)\n Zcoords.append(BedLevel + InterPRHeight)\n\n # point 8\n Xcoords.append(xr)\n Ycoords.append(yr)\n Zcoords.append(BankRightLevel)\n\n return Xcoords, Ycoords, Zcoords",
"def report(self):\n bbox = \"verts: \" + str(self.lower_vertex) + \" \" + str(self.upper_vertex)\n dimensions = \"dimensions: \" + \",\".join(\n (\n str(self.dimension_along(0)),\n str(self.dimension_along(1)),\n str(self.dimension_along(2)),\n )\n )\n string = bbox + \"\\n\" + dimensions\n return bbox",
"def __dump_linestring(obj, big_endian):\n wkb_string = b''\n\n if big_endian:\n wkb_string += BIG_ENDIAN\n else:\n wkb_string += LITTLE_ENDIAN\n\n coords = obj['coordinates']\n vertex = coords[0]\n # Infer the number of dimensions from the first vertex\n num_dims = len(vertex)\n if num_dims == 2:\n type_byte_str = __WKB['2D']['LineString']\n elif num_dims == 3:\n type_byte_str = __WKB['Z']['LineString']\n elif num_dims == 4:\n type_byte_str = __WKB['ZM']['LineString']\n else:\n pass\n # TODO: raise\n if not big_endian:\n # reverse the byte ordering for little endian\n type_byte_str = type_byte_str[::-1]\n wkb_string += type_byte_str\n\n if big_endian:\n byte_fmt = '>'\n else:\n byte_fmt = '<'\n byte_fmt += 'd' * num_dims\n\n for vertex in coords:\n wkb_string += struct.pack(byte_fmt, *vertex)\n\n return wkb_string",
"def loadCoordinatesFromDumpFile(self):\n coordinates_dump_file = open(self.COORDINATES_DUMP_FNAME , 'r')\n coordinates = pickle.load(coordinates_dump_file)\n coordinates_dump_file.close()\n return coordinates",
"def DumpShape(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_DumpShape(*args)",
"def __str__(self):\n vertices = []\n for idx in range(3):\n v = self.vertices[idx]\n if v is not None:\n vertices.append(str(v))\n else:\n orig_idx, dest_idx = (idx - 1) % 3, (idx + 1) % 3\n orig, dest = self.vertices[orig_idx], self.vertices[dest_idx]\n halfway = (orig.x + dest.x) * .5, (orig.y + dest.y) * .5\n# print(halfway)\n d = orig.distance(dest)\n dx = dest.x - orig.x\n# print(d)\n# print(dx)\n dx /= d\n dy = dest.y - orig.y\n# print(dy)\n dy /= d\n dx *= d\n dy *= d\n pt_halfway = halfway[0] + dy, halfway[1] - dx\n# print(\"outside\", orig_idx, dest_idx, pt_halfway)\n vertices.append(\"{0[0]} {0[1]}\".format(pt_halfway))\n vertices.append(vertices[0])\n return \"POLYGON(({0}))\".format(\", \".join(vertices))",
"def __str__(self):\n struct_repr = \", \".join([\n \"latitude_deg: \" + str(self.latitude_deg),\n \"longitude_deg: \" + str(self.longitude_deg),\n \"absolute_altitude_m: \" + str(self.absolute_altitude_m),\n \"relative_altitude_m: \" + str(self.relative_altitude_m)\n ])\n\n return f\"Position: [{struct_repr}]\""
]
| [
"0.66817063",
"0.65917444",
"0.644076",
"0.64272916",
"0.6338845",
"0.620574",
"0.61719364",
"0.60670024",
"0.6045421",
"0.5986626",
"0.5952752",
"0.59344923",
"0.59212697",
"0.58783215",
"0.58656347",
"0.58263856",
"0.58222085",
"0.5821016",
"0.5802443",
"0.5800428",
"0.57936585",
"0.57661784",
"0.57659537",
"0.57633156",
"0.57383573",
"0.5736438",
"0.57350314",
"0.57011116",
"0.56985706",
"0.56972563"
]
| 0.786923 | 0 |
Helper function for geos__from_py functions in each geom type. If a create_func is specified the coodinate sequence is cloned and a new geometry is created with it, otherwise the geometry is cloned directly. This behaviour is useful for converting between LineString and LinearRing objects. | def geos_geom_from_py(ob, create_func=...): # -> tuple[Any | Unknown, Unknown]:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry",
"def _prepare_with_copy(geometry):\n geometry = pygeos.apply(geometry, lambda x: x) # makes a copy\n pygeos.prepare(geometry)\n return geometry",
"def _coords_transform(func):\n coords_func = getattr(Coords,func.__name__)\n def newf(self,*args,**kargs):\n \"\"\"Performs the Coords %s transformation on the coords attribute\"\"\" \n return self._set_coords(coords_func(self.coords,*args,**kargs))\n newf.__name__ = func.__name__\n newf.__doc__ =\"\"\"Apply '%s' transformation to the Geometry object. \n\n See :meth:`coords.Coords.%s` for details.\n\"\"\" % (func.__name__,func.__name__)\n return newf",
"def __init__(self, geom_input, srs=None):\n str_instance = isinstance(geom_input, str)\n\n # If HEX, unpack input to a binary buffer.\n if str_instance and hex_regex.match(geom_input):\n geom_input = memoryview(bytes.fromhex(geom_input))\n str_instance = False\n\n # Constructing the geometry,\n if str_instance:\n wkt_m = wkt_regex.match(geom_input)\n json_m = json_regex.match(geom_input)\n if wkt_m:\n if wkt_m[\"srid\"]:\n # If there's EWKT, set the SRS w/value of the SRID.\n srs = int(wkt_m[\"srid\"])\n if wkt_m[\"type\"].upper() == \"LINEARRING\":\n # OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.\n # See https://trac.osgeo.org/gdal/ticket/1992.\n g = capi.create_geom(OGRGeomType(wkt_m[\"type\"]).num)\n capi.import_wkt(g, byref(c_char_p(wkt_m[\"wkt\"].encode())))\n else:\n g = capi.from_wkt(\n byref(c_char_p(wkt_m[\"wkt\"].encode())), None, byref(c_void_p())\n )\n elif json_m:\n g = self._from_json(geom_input.encode())\n else:\n # Seeing if the input is a valid short-hand string\n # (e.g., 'Point', 'POLYGON').\n OGRGeomType(geom_input)\n g = capi.create_geom(OGRGeomType(geom_input).num)\n elif isinstance(geom_input, memoryview):\n # WKB was passed in\n g = self._from_wkb(geom_input)\n elif isinstance(geom_input, OGRGeomType):\n # OGRGeomType was passed in, an empty geometry will be created.\n g = capi.create_geom(geom_input.num)\n elif isinstance(geom_input, self.ptr_type):\n # OGR pointer (c_void_p) was the input.\n g = geom_input\n else:\n raise GDALException(\n \"Invalid input type for OGR Geometry construction: %s\"\n % type(geom_input)\n )\n\n # Now checking the Geometry pointer before finishing initialization\n # by setting the pointer for the object.\n if not g:\n raise GDALException(\n \"Cannot create OGR Geometry from input: %s\" % geom_input\n )\n self.ptr = g\n\n # Assigning the SpatialReference object to the geometry, if valid.\n if srs:\n self.srs = srs\n\n # Setting the class depending upon the OGR Geometry Type\n self.__class__ = GEO_CLASSES[self.geom_type.num]",
"def copyGeom(geom):\n geomJson = geom.ExportToJson()\n newGeom = ogr.CreateGeometryFromJson(geomJson)\n return newGeom",
"def newf(self,*args,**kargs): \n return self._set_coords(coords_func(self.coords,*args,**kargs))",
"def geodjango_to_shapely(geos_obj):\n assert HAS_GEODJANGO, \"Requires Geodjango\"\n\n geodjango_poly_to_shapely = lambda t: geometry.Polygon(shell=t.coords[0], holes=t.coords[1:])\n\n converters = {\n geos.Point: lambda t: geometry.Point(t.coords),\n geos.LineString: lambda t: geometry.LineString(t.coords),\n geos.Polygon: lambda t: geodjango_poly_to_shapely(t),\n geos.MultiPolygon: lambda t: geometry.MultiPolygon([geodjango_poly_to_shapely(x) for x in t])\n }\n\n if not issubclass(geos_obj.__class__, geos.GEOSGeometry):\n raise TypeError(\"Require object that inherits from geos.GEOSGeometry\")\n\n return converters[type(geos_obj)](geos_obj) # FIXME: why is PyCharm complaining about this line?!",
"def convert_and_update_xyfield(workspace,fc,xfield,yfield,to_cs,transformationname = None):\n # http://desktop.arcgis.com/en/arcmap/10.4/analyze/arcpy-classes/pdf/geographic_coordinate_systems.pdf\n # http://desktop.arcgis.com/en/arcmap/latest/map/projections/pdf/geographic_transformations.pdf\n \n arcpy.env.workspace = workspace\n errorcount = 0\n to_cs = arcpy.SpatialReference(to_cs)\n with arcpy.da.UpdateCursor(fc,['SHAPE@',xfield,yfield]) as cursor:\n for row in cursor:\n try:\n if transformationname:\n new_cs = row[0].projectAs(to_cs,transformationname)\n else:\n new_cs = row[0].projectAs(to_cs)\n row[1] = new_cs.firstPoint.X # xfield = SHAPE@X\n row[2] = new_cs.firstPoint.Y # yfield = SHAPE@Y\n cursor.updateRow(row)\n except RuntimeError as e:\n errorcount += 1\n print(f'{e}')\n except AttributeError as e:\n errorcount += 1\n print(f'{e}')\n print(f'errorcount: {errorcount}')",
"def _generate_geometry_from_points(self, geometry_type, points):\n if geometry_type == 'line':\n # Only x and y coordinates are considered for line\n geometry = LineString([(x[0], x[1]) for x in points])\n elif geometry_type == 'area':\n # Only x and y coordinates are considered for polygon area\n geometry = Polygon([(x[0], x[1]) for x in points])\n else:\n raise NotImplementedError()\n return geometry",
"def __geo_interface__(self):\r\n if HASARCPY:\r\n template = {\r\n \"type\": \"FeatureCollection\",\r\n \"features\": []\r\n }\r\n geom_type = self.geometry_type\r\n if geom_type.lower() == \"point\":\r\n geom_type = \"Point\"\r\n elif geom_type.lower() == \"polyline\":\r\n geom_type = \"LineString\"\r\n elif geom_type.lower() == \"polygon\":\r\n geom_type = \"Polygon\"\r\n df_copy = self.copy(deep=True)\r\n df_copy['geom_json'] = self.geometry.JSON\r\n df_copy['SHAPE'] = df_copy['geom_json']\r\n del df_copy['geom_json']\r\n for index, row in df_copy.iterrows():\r\n geom = row['SHAPE']\r\n del row['SHAPE']\r\n template['features'].append(\r\n {\"type\" : geom_type,\r\n \"geometry\" : pd.io.json.loads(geom),\r\n \"attributes\":row}\r\n )\r\n return pd.io.json.dumps(template)",
"def geometry():\n return Geometry()",
"def copy(self):\n return type(self)(self._geojson.copy(), **self._attrs)",
"def shape_factory_from_coords(neighbor_coords, ref_shape):\n shape = ref_shape.copy()\n shape.build_from_coords(neighbor_coords)\n return shape",
"def cf_to_shapely(ds: xr.Dataset):\n geom_type = ds.geometry_container.attrs[\"geometry_type\"]\n if geom_type == \"point\":\n geometries = cf_to_points(ds)\n elif geom_type in [\"line\", \"polygon\"]:\n raise NotImplementedError(\"Only point geometries conversion is implemented.\")\n else:\n raise ValueError(\n f\"Valid CF geometry types are 'point', 'line' and 'polygon'. Got {geom_type}\"\n )\n\n return geometries.rename(\"geometry\")",
"def __init__(self, geom):\n self.geom = deepcopy(geom)",
"def shapely_to_cf(geometries: xr.DataArray | Sequence, grid_mapping: str | None = None):\n # Get all types to call the appropriate translation function.\n types = {\n geom.item().geom_type if isinstance(geom, xr.DataArray) else geom.geom_type\n for geom in geometries\n }\n if types.issubset({\"Point\", \"MultiPoint\"}):\n ds = points_to_cf(geometries)\n elif types.issubset({\"Polygon\", \"MultiPolygon\"}) or types.issubset(\n {\"LineString\", \"MultiLineString\"}\n ):\n raise NotImplementedError(\"Only point geometries conversion is implemented.\")\n else:\n raise ValueError(\n f\"Mixed geometry types are not supported in CF-compliant datasets. Got {types}\"\n )\n\n # Special treatment of selected grid mappings\n if grid_mapping == \"longitude_latitude\":\n # Special case for longitude_latitude grid mapping\n ds = ds.rename(crd_x=\"lon\", crd_y=\"lat\")\n ds.lon.attrs.update(units=\"degrees_east\", standard_name=\"longitude\")\n ds.lat.attrs.update(units=\"degrees_north\", standard_name=\"latitude\")\n ds.geometry_container.attrs.update(coordinates=\"lon lat\")\n ds.x.attrs.update(units=\"degrees_east\", standard_name=\"longitude\")\n ds.y.attrs.update(units=\"degrees_north\", standard_name=\"latitude\")\n elif grid_mapping is not None:\n raise NotImplementedError(\n f\"Only grid mapping longitude_latitude is implemented. Got {grid_mapping}.\"\n )\n\n return ds",
"def create_ogr_geom(geom) -> ogr.Geometry:\n if isinstance(geom, ogr.Geometry):\n return geom\n\n # Converte os tipos para diferentes situações (python 2.7).\n # if isinstance(geom, str):\n # geom = str(geom)\n # elif isinstance(geom, unicode):\n # geom = str(geom)\n try:\n ogr_geom = ogr.CreateGeometryFromWkb(geom)\n except RuntimeError:\n ogr_geom = ogr.CreateGeometryFromWkt(geom)\n if not ogr_geom:\n ogr_geom = ogr.CreateGeometryFromWkt(geom)\n return ogr_geom",
"def split_bygeom(self, iterable, geom_getter=lambda x: x.geom):\n points, linestrings, multipoints, multilinestrings = [], [], [], []\n\n for x in iterable:\n geom = geom_getter(x)\n if geom is None:\n pass\n elif isinstance(geom, GeometryCollection):\n # Duplicate object, shapefile do not support geometry collections !\n subpoints, sublines, pp, ll = self.split_bygeom(geom, geom_getter=lambda geom: geom)\n if subpoints:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiPoint(subpoints, srid=geom.srid)\n multipoints.append(clone)\n if sublines:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiLineString(sublines, srid=geom.srid)\n multilinestrings.append(clone)\n elif isinstance(geom, Point):\n points.append(x)\n elif isinstance(geom, LineString):\n linestrings.append(x)\n else:\n raise ValueError(\"Only LineString and Point geom should be here. Got %s for pk %d\" % (geom, x.pk))\n return points, linestrings, multipoints, multilinestrings",
"def convertmany(self, *args, **kwargs):\n return _coordsys.coordsys_convertmany(self, *args, **kwargs)",
"def _create_geometry(self, coordinates_type, coordinates):\n if coordinates_type and coordinates:\n return {\n 'type': coordinates_type,\n 'coordinates': coordinates\n }",
"def normalizeGeometry(geom):\n\t# Convert string GEOSGeometry object to python dict\n\tgeom = json.loads(geom)\n\n\t# Normalize longitude to range [-180, 180) using saw tooth function\n\tc = geom['coordinates'][0]\n\tgeom['coordinates'][0] = (c+180 - ( math.floor( (c+180)/360 ) )*360) - 180\n\n\t# Normalize latitude to range [-90, 90) using saw tooth function\n\tc = geom['coordinates'][1]\n\tgeom['coordinates'][1] = (c+90 - ( math.floor( (c+90)/180 ) )*180) - 90\n\n\t# Encode and return GEOSGeometry object\n\treturn GEOSGeometry(json.dumps(geom))",
"def _create_shape(self, queryset, model, columns, filename):\n geo_field = geo_field_from_model(model, app_settings['GEOM_FIELD_NAME'])\n get_geom, geom_type, srid = info_from_geo_field(geo_field)\n\n if geom_type.upper() in (GeometryField.geom_type, GeometryCollectionField.geom_type):\n\n by_points, by_linestrings, multipoints, multilinestrings = self.split_bygeom(queryset, geom_getter=get_geom)\n\n for split_qs, split_geom_field in ((by_points, PointField),\n (by_linestrings, LineStringField),\n (multipoints, MultiPointField),\n (multilinestrings, MultiLineStringField)):\n if len(split_qs) == 0:\n continue\n split_geom_type = split_geom_field.geom_type\n shp_filepath = shape_write(split_qs, model, columns, get_geom, split_geom_type, srid)\n filename = '%s_%s' % (filename, split_geom_type.lower())\n self.layers[filename] = shp_filepath\n\n else:\n shp_filepath = shape_write(queryset, model, columns, get_geom, geom_type, srid)\n self.layers[filename] = shp_filepath",
"def to_geometry(self, to_crs=None):\n from geopandas import GeoDataFrame\n from shapely.geometry import Polygon\n out = GeoDataFrame()\n geoms = []\n ii = []\n jj = []\n xx = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx\n yy = self.corner_grid.y0 + np.arange(self.ny+1) * self.dy\n for j, (y0, y1) in enumerate(zip(yy[:-1], yy[1:])):\n for i, (x0, x1) in enumerate(zip(xx[:-1], xx[1:])):\n coords = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]\n geoms.append(Polygon(coords))\n jj.append(j)\n ii.append(i)\n out['j'] = jj\n out['i'] = ii\n out['geometry'] = geoms\n out.crs = self.proj.srs\n\n if check_crs(to_crs):\n transform_geopandas(out, to_crs=to_crs, inplace=True)\n return out",
"def _multigeometry(self, ogr_geometry):\n\n geo_type = ogr_geometry.GetGeometryType()\n\n if geo_type == ogr.wkbPolygon:\n return ogr.ForceToMultiPolygon(ogr_geometry)\n elif geo_type == ogr.wkbPoint:\n return ogr.ForceToMultiPoint(ogr_geometry)\n elif geo_type in [ogr.wkbLineString, ogr.wkbLinearRing]:\n return ogr.ForceToMultiLineString(ogr_geometry)\n else:\n return ogr_geometry",
"def transform_geometry(geom, crs=wgs84, to_crs=wgs84):\n\n from_crs = check_crs(crs)\n to_crs = check_crs(to_crs)\n\n if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj):\n project = partial(transform_proj, from_crs, to_crs)\n elif isinstance(to_crs, Grid):\n project = partial(to_crs.transform, crs=from_crs)\n elif isinstance(from_crs, Grid):\n project = partial(from_crs.ij_to_crs, crs=to_crs)\n else:\n raise NotImplementedError()\n\n from shapely.ops import transform\n return transform(project, geom)",
"def clone(self):\n return _libsbml.SBMLFunctionDefinitionConverter_clone(self)",
"def any_geom2ogr_geom(geom, osr_sref):\n\n if isinstance(geom, (tuple, list)) and (not isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 4) and osr_sref:\n geom_ogr = geometry.bbox2polygon(geom, osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, (tuple, list)) and (isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 2) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n geom = [geom[0], (geom[0][0], geom[1][1]), geom[1], (geom[1][0], geom[0][1])]\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, (tuple, list)) and isinstance(geom[0], (tuple, list)) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, shapely.geometry.Polygon):\n geom_ogr = ogr.CreateGeometryFromWkt(geom.wkt)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, ogr.Geometry):\n geom_sref = geom.GetSpatialReference()\n if geom_sref is None:\n geom.AssignSpatialReference(osr_sref)\n geom_ogr = geom\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n else:\n raise GeometryUnkown(geom)\n\n return geom_ogr",
"def __getGeometry(self, geom):\n if \"POLYGON\" in self.geometryType:\n rings = geom['rings']\n polygon = getMultiGeometry(rings)\n polyGeom = arcpy.Polygon(polygon, self.sr)\n return polyGeom\n elif \"POLYLINE\" in self.geometryType:\n paths = geom['paths']\n polyline = getMultiGeometry(paths)\n lineGeom = arcpy.Polyline(polyline, self.sr)\n return lineGeom\n elif \"POINT\" in self.geometryType:\n try:\n point = arcpy.Point(float(geom['x']), float(geom['y']))\n except:\n raise NullGeometryError(\"Point geometry is invalid or null\")\n pointGeom = arcpy.Geometry(\"point\", point, self.sr)\n return pointGeom",
"def get_obj_geo_copy(node_path):\n # Create a new hou.Geometry object.\n geo = hou.Geometry()\n\n # Get the geometry object's geo.\n source_geo = get_obj_geo(node_path)\n\n # Merge the geo to copy it.\n geo.merge(source_geo)\n\n return geo",
"def copy(self):\n return type(self)(self.lat_lon[0], self.lat_lon[1], **self._attrs)"
]
| [
"0.6204362",
"0.5892721",
"0.5733347",
"0.5654594",
"0.5577481",
"0.52522194",
"0.5130786",
"0.5058003",
"0.50202096",
"0.5018276",
"0.49738002",
"0.4966857",
"0.49666315",
"0.4957779",
"0.48849624",
"0.48557302",
"0.48483503",
"0.48371914",
"0.48297095",
"0.48190808",
"0.481199",
"0.4806898",
"0.4788599",
"0.47801894",
"0.47689894",
"0.47415316",
"0.47410515",
"0.47312835",
"0.47261432",
"0.47225553"
]
| 0.7071003 | 0 |
Decorator which helps avoid GEOS operations on null pointers. | def exceptNull(func): # -> (*args: Unknown, **kwargs: Unknown) -> Unknown:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_null_tracking(*args, **kwargs): # real signature unknown\n pass",
"def null(cls):\n return cls(*[Point.origin() for i in range(2)])",
"def nulltest():",
"def pass_null(func):\n\n def wrapper(obj, *args, **kwargs):\n if not obj:\n return obj\n return func(obj, *args, **kwargs)\n\n # used by django template parser to introspect args\n wrapper._decorated_function = getattr(func, '_decorated_function', func)\n return wraps(func)(wrapper)",
"def maybe_centroid2d(points):\n try:\n return centroid2d(points)\n except (ZeroDivisionError, TypeError, IndexError):\n return None",
"def _isnull_old(obj):\n if is_scalar(obj):\n return lib.checknull_old(obj)\n # hack (for now) because MI registers as ndarray\n elif isinstance(obj, ABCMultiIndex):\n raise NotImplementedError(\"isnull is not defined for MultiIndex\")\n elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):\n return _isnull_ndarraylike_old(obj)\n elif isinstance(obj, ABCGeneric):\n return obj._constructor(obj._data.isnull(func=_isnull_old))\n elif isinstance(obj, list) or hasattr(obj, '__array__'):\n return _isnull_ndarraylike_old(np.asarray(obj))\n else:\n return obj is None",
"def geos_geom_from_py(ob, create_func=...): # -> tuple[Any | Unknown, Unknown]:\n ...",
"def isnull(obj):\n return _isnull(obj)",
"def spatial(self):",
"def set_null(self, /, *defaults: Any, **kwargs: Any) -> \"fn\":\n return self._mod.set_null(self._func, *defaults, **kwargs)",
"def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...",
"def null(cls):\n return GXDMPPLY()",
"def is_empty(geometry, **kwargs):\n return lib.is_empty(geometry, **kwargs)",
"def nonull(val):\n return val if not pd.isnull(val) else None",
"def is_missing(geometry, **kwargs):\n return lib.is_missing(geometry, **kwargs)",
"def is_null(self):\n return self.length2 < pygonal.EPSILON2",
"def __noop(self, *args, **kwargs):\n return None",
"def extra_coords(self) -> ExtraCoordsABC:",
"def _isNullFunc():\n try:\n return vd.sheet.isNullFunc()\n except AttributeError:\n import visidata\n\n return visidata.isNullFunc()",
"def default_trim_value():\n return geos_version_tuple() >= (3, 12)",
"def get_geospatial(self):\n self.unimpl_base_class()",
"def IsNull(*args, **kwargs):\n return _gdi_.GraphicsObject_IsNull(*args, **kwargs)",
"def is_null(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_null)",
"def ensure_geo_reference(origin):\r\n\r\n if isinstance(origin, Geo_reference):\r\n geo_ref = origin\r\n elif origin is None:\r\n geo_ref = None\r\n else:\r\n geo_ref = apply(Geo_reference, origin)\r\n\r\n return geo_ref",
"def is_valid(geometry, **kwargs):\n # GEOS is valid will emit warnings for invalid geometries. Suppress them.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n result = lib.is_valid(geometry, **kwargs)\n return result",
"def notnull(obj):\n res = isnull(obj)\n if is_scalar(res):\n return not res\n return ~res",
"def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner",
"def test_insert_empty_geometry():\n empty = Polygon()\n geoms = [empty]\n tree = STRtree(geoms)\n query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])\n results = tree.query(query)\n assert len(results) == 0",
"def test_valid_null(self):\n f = lws.valid_null\n assert f(None, '') is True\n assert f('asdasdasd', '') is True",
"def test_query_empty_geometry():\n empty = Polygon()\n point = Point(1, 0.5)\n geoms = [empty, point]\n tree = STRtree(geoms)\n query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])\n results = tree.query(query)\n assert len(results) == 1\n assert results[0] == point"
]
| [
"0.573325",
"0.56983113",
"0.5669062",
"0.5669048",
"0.55313367",
"0.5461798",
"0.5193316",
"0.51848453",
"0.5162868",
"0.5157472",
"0.5082548",
"0.5073737",
"0.5057937",
"0.5054257",
"0.5050019",
"0.5043085",
"0.50347704",
"0.501953",
"0.50131",
"0.4963259",
"0.49362338",
"0.49251184",
"0.49232945",
"0.4915196",
"0.49110478",
"0.49016482",
"0.48853138",
"0.4878792",
"0.48631763",
"0.48505235"
]
| 0.57892126 | 0 |
Provide the Numpy array protocol. | def __array_interface__(self):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __array__(self, *args, **kwargs):\n\n return self.data",
"def __array__(self):\n return np.asarray(self.data)",
"def array(self):",
"def __array__(self):\n return np.zeros(self.shape, self.dtype)",
"def __array__(self):\n return self.array",
"def tonumpy(self):\n import numpy\n from numpy import ma\n\n # initialize the return\n narray = None\n\n if None in self._data:\n\n # define a lambda function\n # to create the mask array\n make_mask = lambda x: x == None\n\n # create the numpy array,\n # making on the fly the mask\n narray = numpy.ma.array(self._data, mask=list(map(make_mask, self._data)))\n\n else:\n # convert the list to a numpy object\n narray = numpy.array(self._data)\n\n # return the numpy object\n return narray",
"def __array__(self):\n return self.to_array()",
"def to_numpy(self, **kwargs):\n pass",
"def basic_array_creation():\n print('From normal creation')\n arr: pa.Array = pa.array([1, 2, 3, 4, 5], type=pa.int8())\n print(arr)\n\n print('From pandas series')\n arr: pa.Array = pa.Array.from_pandas(pd.Series([1, 2, 3, 4, 5]))\n print(arr)",
"def __array__(self, dtype=None) -> np.ndarray:\n return self.values",
"def numpy(self):\n return self.data",
"def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")",
"def asnumpy(self):\n return self.data.asnumpy()",
"def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\n \"construct_array_type does not support arguments\")\n return XndframesArray",
"def get_array_module(arr):\n # TODO: also check for __array_interface__ attribute and not\n # __cuda_array_interface__?\n if have_cupy:\n return cupy.get_array_module(arr)\n else:\n return np",
"def asarray(self):\n from numpy import asarray\n return asarray(self)",
"def array(self):\n return np.asarray(self)",
"def __array__(self):\n return dict2rec(self)",
"def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\"construct_array_type does not support arguments\")\n return FletcherArray",
"def get_array(self):\n return numpy.array(self._ar)",
"def __array_function__(self, func, types, args, kwargs):\n try:\n if not func.__module__.startswith(\"numpy\"):\n return NotImplemented\n except AttributeError:\n return NotImplemented\n _args = list(map(MetaTensor._convert, args))\n _kwargs = {k: MetaTensor._convert(v) for k, v in kwargs.items()}\n return func(*_args, **_kwargs)",
"def lookup_array(self, *args, **kwargs): # real signature unknown\n pass",
"def lookup_array(self, *args, **kwargs): # real signature unknown\n pass",
"def as_numpy(a):\n if isinstance(a, mx.nd.NDArray):\n a = a.asnumpy()\n return a",
"def numpy(self) -> np.ndarray:\n return self.tensor.numpy()",
"def to_numpy(array):\n if not CUPY_LOADED:\n return array\n else:\n return xp.asnumpy(array)",
"def numpy_vector(self):\n pass",
"def ndarray(dtype, shape):\n if isinstance(shape, numbers.Number):\n shape = (shape, )\n if dtype in all_types:\n return ScalarNdarray(dtype, shape)\n if isinstance(dtype, MatrixType):\n return MatrixNdarray(dtype.n, dtype.m, dtype.dtype, shape)\n\n raise TaichiRuntimeError(\n f'{dtype} is not supported as ndarray element type')",
"def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )",
"def a(*args, **kwargs):\n return np.array(*args, **kwargs)"
]
| [
"0.6984412",
"0.68687886",
"0.6676552",
"0.6667087",
"0.66398215",
"0.6637101",
"0.6610455",
"0.65973616",
"0.6503999",
"0.6497078",
"0.64307505",
"0.6386554",
"0.63856333",
"0.63472104",
"0.6298814",
"0.62879676",
"0.6276794",
"0.62321687",
"0.6229774",
"0.62225384",
"0.6192883",
"0.6171395",
"0.6171395",
"0.6141695",
"0.6139878",
"0.61258614",
"0.61165375",
"0.6107634",
"0.609104",
"0.6088997"
]
| 0.7552204 | 0 |
WKB representation of the geometry | def wkb(self): # -> bytes:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wkb_hex(self): # -> str:\n ...",
"def as_ewkb(self) -> ir.BinaryValue:\n return ops.GeoAsEWKB(self).to_expr()",
"def to_wkb(self):\n return _property_op(lambda x: x, self)",
"def geometry(self):\n return self._geometry",
"def geometry(self):\n return self._geometry",
"def geometry(self):\n return self[0].geometry",
"def getGeometry(self):\n return self.geometry",
"def getGeometry(self):\n return self.geometry",
"def footprint(self):\n # coords format: 'lon lat lon lat....'\n geojson = self._createGeoJSON()\n return geojson_to_wkt(geojson, 14)",
"def to_wkt(self):\n return _property_op(arctern.ST_AsText, self)",
"def wgs84_wkt():\n return WGS84.to_wkt()",
"def to_xml(self):\n start_str = GeometryTopologyData.__to_xml_vector__(self.start, self.format)\n size_str = GeometryTopologyData.__to_xml_vector__(self.size, self.format)\n structure = super(BoundingBox, self).to_xml()\n\n return '<BoundingBox>%s<Start>%s</Start><Size>%s</Size></BoundingBox>' % (structure, start_str, size_str)",
"def report(self):\n bbox = \"verts: \" + str(self.lower_vertex) + \" \" + str(self.upper_vertex)\n dimensions = \"dimensions: \" + \",\".join(\n (\n str(self.dimension_along(0)),\n str(self.dimension_along(1)),\n str(self.dimension_along(2)),\n )\n )\n string = bbox + \"\\n\" + dimensions\n return bbox",
"def to_geometry(v):\n return v / 1000",
"def getquoted(self):\n if self.is_geometry:\n # Psycopg will figure out whether to use E'\\\\000' or '\\000'.\n return b\"%s(%s)\" % (\n b\"ST_GeogFromWKB\" if self.geography else b\"ST_GeomFromEWKB\",\n sql.quote(self.ewkb).encode(),\n )\n else:\n # For rasters, add explicit type cast to WKB string.\n return b\"'%s'::raster\" % self.ewkb.hex().encode()",
"def get_geometry(self):\n rows, cols = self.get_gridspec().get_geometry()\n return rows, cols, self.num1, self.num2",
"def geometry():\n return Geometry()",
"def sa_wkb_to_wkt(sa_wkb):\n return gis_util.wkb_to_wkt(str(sa_wkb.geom_wkb))",
"def restore_geometry(self):\n return stools.SETTINGS.get(\"waveformWidget/geometry\")",
"def encode_geometry(geom: BasePolygon) -> str:\n encoded_geom = geobuf.encode(mapping(geom)).hex()\n\n # if the geometry is so complex is still goes over the limit, incrementally attempting to simplify it\n if sys.getsizeof(encoded_geom) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES:\n encoded_geom = geobuf.encode(\n mapping(geom.simplify(0.005, preserve_topology=False))\n ).hex()\n\n if sys.getsizeof(encoded_geom) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES:\n encoded_geom = geobuf.encode(\n mapping(geom.simplify(0.01, preserve_topology=False))\n ).hex()\n\n return encoded_geom",
"def to_xml(self):\n output = '<?xml version=\"1.0\" encoding=\"utf8\"?><GeometryTopologyData>'\n if self.num_dimensions != 0:\n output += ('<NumDimensions>%i</NumDimensions>' % self.num_dimensions)\n\n output += ('<CoordinateSystem>%s</CoordinateSystem>' % self.__coordinate_system_to_str__(self.coordinate_system))\n\n if self.lps_to_ijk_transformation_matrix is not None:\n output += self.__write_transformation_matrix__(self.lps_to_ijk_transformation_matrix)\n\n # Concatenate points (sort first)\n self.points.sort(key=lambda p: p.__id__)\n points = \"\".join(map(lambda i:i.to_xml(), self.points))\n # Concatenate bounding boxes\n bounding_boxes = \"\".join(map(lambda i:i.to_xml(), self.bounding_boxes))\n\n return output + points + bounding_boxes + \"</GeometryTopologyData>\"",
"def bbox_format(self) -> bbox_utils.BBoxFormat:\n raise NotImplementedError",
"def __dump_point(obj, big_endian):\n wkb_string = b''\n\n if big_endian:\n wkb_string += BIG_ENDIAN\n else:\n wkb_string += LITTLE_ENDIAN\n\n coords = obj['coordinates']\n num_dims = len(coords)\n if num_dims == 2:\n type_byte_str = __WKB['2D']['Point']\n elif num_dims == 3:\n type_byte_str = __WKB['Z']['Point']\n elif num_dims == 4:\n type_byte_str = __WKB['ZM']['Point']\n else:\n pass\n # TODO: raise\n\n if not big_endian:\n # reverse the byte ordering for little endian\n type_byte_str = type_byte_str[::-1]\n wkb_string += type_byte_str\n\n if big_endian:\n byte_fmt = '>'\n else:\n byte_fmt = '<'\n byte_fmt += 'd' * num_dims\n\n wkb_string += struct.pack(byte_fmt, *coords)\n return wkb_string",
"def prepare_wld(bbox, mwidth, mheight):\n pixel_x_size = (bbox.maxx - bbox.minx) / mwidth\n pixel_y_size = (bbox.maxy - bbox.miny) / mheight\n left_pixel_center_x = bbox.minx + pixel_x_size * 0.5\n top_pixel_center_y = bbox.maxy - pixel_y_size * 0.5\n return ''.join([\"{:.8f}\\n\".format(n) for n in [\n pixel_x_size, 0.0,\n 0.0, -pixel_y_size,\n left_pixel_center_x, top_pixel_center_y\n ]])",
"def getWKT(self):\n logger.debug(\"Entering in ocentricWKT.getWkt\")\n\n # building WKT string\n wkt = OcentricWKT.GEODCRS % (\n self.getGeoGcsName(), self.getDatumName(), self.getSpheroidName(), self.getRadius(), self.getInverseFlattening(),\n self.getRadius(), self.getAuthorityName(), self.getAuthorityCode()\n )\n\n logger.debug(\"Exiting from ocentricWKT.getWkt\")\n return wkt",
"def toXML(self):\n return _libsbml.BoundingBox_toXML(self)",
"def wkt(self): # -> str:\n ...",
"def w(self):\r\n return self.size.x",
"def get_data_structure_representation(self) -> dict:\n byte_buff = self.get_rle()\n encoding = \"RLE\"\n\n if len(byte_buff) > self.grid_size[0] * self.grid_size[1] * 4:\n encoding = \"RAW\"\n byte_buff = self.get_raw()\n print(\"RAW ran\")\n else:\n print(\"RLE ran\")\n\n json_dict = {\n \"encoding\": encoding,\n \"nodes\": [base64.b64encode(bytes(byte_buff)).decode()],\n \"dimensions\": [self.grid_size[0], self.grid_size[1]]\n }\n\n return json_dict",
"def encode(self):\n data = struct.pack('BBB', self.cid, self.margin, self.gwcnt)\n return data"
]
| [
"0.6447294",
"0.6303592",
"0.62818676",
"0.62680125",
"0.62680125",
"0.6183954",
"0.615516",
"0.615516",
"0.6141851",
"0.61130995",
"0.6106607",
"0.6060641",
"0.60426927",
"0.59896225",
"0.5929752",
"0.58390784",
"0.57963085",
"0.57863003",
"0.5744317",
"0.57364625",
"0.569089",
"0.5668226",
"0.5647571",
"0.55853695",
"0.55775094",
"0.55748993",
"0.55681294",
"0.5548925",
"0.5522038",
"0.5519076"
]
| 0.67043364 | 0 |
WKB hex representation of the geometry | def wkb_hex(self): # -> str:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)",
"def getquoted(self):\n if self.is_geometry:\n # Psycopg will figure out whether to use E'\\\\000' or '\\000'.\n return b\"%s(%s)\" % (\n b\"ST_GeogFromWKB\" if self.geography else b\"ST_GeomFromEWKB\",\n sql.quote(self.ewkb).encode(),\n )\n else:\n # For rasters, add explicit type cast to WKB string.\n return b\"'%s'::raster\" % self.ewkb.hex().encode()",
"def __repr__(self):\n return self._hex",
"def hex_str (self):\n return \"#%02X%02X%02X\"%(self._intern[0],self._intern[1],self._intern[2])",
"def hex(space, w_val):\n return space.hex(w_val)",
"def hex(self):\n return binascii.hexlify(self.data)",
"def get_hash(self):\n s = super(BoundingBox, self).get_hash()\n for c in self.start:\n s += \"_%f\" % c\n for c in self.size:\n s += \"_%f\" % c\n return s",
"def wkb(self): # -> bytes:\n ...",
"def xyToHEX(self, x, y, bri=1):\n r, g, b = self.color.getRGBFromXYAndBrightness(x, y, bri)\n return self.color.rgbToHex(r, g, b)",
"def as_hex(self):\n return binascii.hexlify(self.as_bytes()).decode('ascii')",
"def toHex(self):\r\n rgb = self.toRGB()\r\n return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],\r\n hex(rgb[2])[2:])).replace(' ', '0')",
"def encode_geometry(geom: BasePolygon) -> str:\n encoded_geom = geobuf.encode(mapping(geom)).hex()\n\n # if the geometry is so complex is still goes over the limit, incrementally attempting to simplify it\n if sys.getsizeof(encoded_geom) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES:\n encoded_geom = geobuf.encode(\n mapping(geom.simplify(0.005, preserve_topology=False))\n ).hex()\n\n if sys.getsizeof(encoded_geom) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES:\n encoded_geom = geobuf.encode(\n mapping(geom.simplify(0.01, preserve_topology=False))\n ).hex()\n\n return encoded_geom",
"def toHex(self):\n return hexlify(self.serialize()).decode(\"utf-8\")",
"def geometry_hash(geometry):\n if hasattr(geometry, 'md5'):\n # for most of our trimesh objects\n md5 = geometry.md5()\n elif hasattr(geometry, 'tostring'):\n # for unwrapped ndarray objects\n md5 = str(hash(geometry.tostring()))\n\n if hasattr(geometry, 'visual'):\n # if visual properties are defined\n md5 += str(geometry.visual.crc())\n return md5",
"def to_h(self):\n return str(self).encode('hex')",
"def as_hex(self, *, align='left'):\n return self.as_bytes(align=align).hex()",
"def generate_hex_vertices(geom):\n phi = np.arange(0, 2 * np.pi, np.pi / 3)\n\n # apply pixel rotation and conversion from flat top to pointy top\n phi += geom.pix_rotation.rad + np.deg2rad(30)\n\n # we need the circumcircle radius, pixel_width is incircle diameter\n unit = geom.pix_x.unit\n r = 2 / np.sqrt(3) * geom.pixel_width.to_value(unit) / 2\n\n x = geom.pix_x.to_value(unit)\n y = geom.pix_y.to_value(unit)\n\n return (\n x[:, np.newaxis] + r[:, np.newaxis] * np.cos(phi)[np.newaxis],\n y[:, np.newaxis] + r[:, np.newaxis] * np.sin(phi)[np.newaxis],\n )",
"def hex(cls, x):\n return c_hex(x)",
"def encode(self) -> bytes:\n\n coordinate = self.column % (2 ** Protocol.Formats.COORDINATE_DELIMITER)\n coordinate += (self.row << Protocol.Formats.COORDINATE_DELIMITER)\n\n encoded_message = struct.pack(Protocol.Formats.COORDINATE_FORMAT, coordinate)\n\n return encoded_message",
"def footprint(self):\n # coords format: 'lon lat lon lat....'\n geojson = self._createGeoJSON()\n return geojson_to_wkt(geojson, 14)",
"def hex(self) -> str:\n return self.__hash.hexdigest()",
"def to_xml(self):\n start_str = GeometryTopologyData.__to_xml_vector__(self.start, self.format)\n size_str = GeometryTopologyData.__to_xml_vector__(self.size, self.format)\n structure = super(BoundingBox, self).to_xml()\n\n return '<BoundingBox>%s<Start>%s</Start><Size>%s</Size></BoundingBox>' % (structure, start_str, size_str)",
"def report(self):\n bbox = \"verts: \" + str(self.lower_vertex) + \" \" + str(self.upper_vertex)\n dimensions = \"dimensions: \" + \",\".join(\n (\n str(self.dimension_along(0)),\n str(self.dimension_along(1)),\n str(self.dimension_along(2)),\n )\n )\n string = bbox + \"\\n\" + dimensions\n return bbox",
"def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode",
"def geometry():\n return Geometry()",
"def to_geometry(v):\n return v / 1000",
"def as_ewkb(self) -> ir.BinaryValue:\n return ops.GeoAsEWKB(self).to_expr()",
"def rgb_hex_str(self, x):\n return \"#%02x%02x%02x\" % self.rgb_bytes_tuple(x)",
"def GetBoundaryEdgesHex(self):\n\n p = self.InferPolynomialDegree()\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n # FIRST GET BOUNDARY FACES\n if not isinstance(self.faces,np.ndarray):\n self.GetBoundaryFacesHex()\n\n # BUILD A 2D MESH\n tmesh = Mesh()\n tmesh.element_type = \"quad\"\n tmesh.elements = self.faces\n tmesh.nelem = tmesh.elements.shape[0]\n del tmesh.faces\n del tmesh.points\n\n # ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES\n self.edges = tmesh.GetEdgesQuad()",
"def hex_value(value, width=8):\n return 'X\"{0:0{1}x}\"'.format(value, width)"
]
| [
"0.65884876",
"0.6582219",
"0.636124",
"0.61333066",
"0.6064004",
"0.5988412",
"0.59239453",
"0.5870547",
"0.58452266",
"0.58219653",
"0.58061785",
"0.5790033",
"0.57587856",
"0.5756235",
"0.5752591",
"0.57401365",
"0.5630617",
"0.557337",
"0.5546199",
"0.55373263",
"0.5535646",
"0.553015",
"0.5528724",
"0.55136275",
"0.5505043",
"0.54973716",
"0.5493609",
"0.5464704",
"0.5441967",
"0.5441518"
]
| 0.7904502 | 0 |
Name of the geometry's type, such as 'Point' | def geom_type(self): # -> str:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def geometry_type(self) -> ir.StringValue:\n return ops.GeoGeometryType(self).to_expr()",
"def geom_type(self):\n return _property_op(arctern.ST_GeometryType, self)",
"def get_geometry_type(self):\n return self.geometry_type",
"def get_geometry_type(self):\n return self._geometry_type",
"def getGeometryType(restGeom):\n if \"Polygon\" in restGeom:\n return \"POLYGON\"\n elif \"Polyline\" in restGeom:\n return \"POLYLINE\"\n elif \"Point\" in restGeom:\n return \"POINT\"\n else:\n return \"Unknown\"",
"def geometry_type(number):\n try:\n return GDAL_GEOMETRY_TYPES[number]\n except KeyError:\n return",
"def type_name(self):\n return self._type_name",
"def name(self) -> str:\n return self.type_data.name",
"def type_name(self) -> str: # pragma: no cover\n return repr_type(self.type_obj)",
"def type_name(self):\n return self.TYPE_NAMES[self.type]",
"def __str__(self):\n return 'TypeShape{dtype:%s shape:%s tag:\"%s\"}' % (\n self.dtype, self.shape, self.tag)",
"def getType(self): #$NON-NLS-1$\r",
"def type(name):",
"def className(self):\n return _osgAnimation.RigGeometry_className(self)",
"def type_name(self):\n # TODO(peria): Replace with exceptions.NotImplementedError() after shipping.\n assert 'type_name() is not implemented for class %s' % (type(self))",
"def type_name(self):\n return self.TYPE_NAMES.get(self.type, \"Unknown\")",
"def shape(self) -> str:\n return \"box\"",
"def name_type(self):\n return self.tag(\"name_type\")",
"def shape_type(self):\n return \"rectangle\"",
"def get_geometry_type(self, table_name, description):\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"\"\"\n SELECT t.coord_dimension, t.srid, t.type FROM (\n SELECT * FROM geometry_columns\n UNION ALL\n SELECT * FROM geography_columns\n ) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s\n \"\"\",\n (table_name, description.name),\n )\n row = cursor.fetchone()\n if not row:\n raise Exception(\n 'Could not find a geometry or geography column for \"%s\".\"%s\"'\n % (table_name, description.name)\n )\n dim, srid, field_type = row\n # OGRGeomType does not require GDAL and makes it easy to convert\n # from OGC geom type name to Django field.\n field_type = OGRGeomType(field_type).django\n # Getting any GeometryField keyword arguments that are not the default.\n field_params = {}\n if self.postgis_oid_lookup.get(description.type_code) == \"geography\":\n field_params[\"geography\"] = True\n if srid != 4326:\n field_params[\"srid\"] = srid\n if dim != 2:\n field_params[\"dim\"] = dim\n return field_type, field_params",
"def type(self):\n return _coordsys.coordsys_type(self)",
"def type(self):\n return self.VERTEX",
"def type(self):\n return self.VERTEX",
"def getTypeCode(self):\n return _libsbml.Point_getTypeCode(self)",
"def type_name(self):\n return \"%s %s\" % (self.param_type, self.name)",
"def shape_type(self):\n return self._shape_type",
"def orbital_type(self):\n return self.name[0].upper()",
"def XmlTypeName(self) -> str:",
"def shape_type_string(shape):\n shape_type = shape.ShapeType()\n types = {TopAbs_VERTEX: \"Vertex\",\n TopAbs_SOLID: \"Solid\",\n TopAbs_EDGE: \"Edge\",\n TopAbs_FACE: \"Face\",\n TopAbs_SHELL: \"Shell\",\n TopAbs_WIRE: \"Wire\",\n TopAbs_COMPOUND: \"Compound\",\n TopAbs_COMPSOLID: \"Compsolid\"}\n return \"%s (id %s)\" % (types[shape_type], hash(shape))",
"def GetObjectTypeString(type):\n return _gmat_py.GmatBase_GetObjectTypeString(type)"
]
| [
"0.77384764",
"0.76871836",
"0.75093687",
"0.73306936",
"0.7246454",
"0.7081469",
"0.69444716",
"0.6698534",
"0.66607857",
"0.6624185",
"0.66158676",
"0.6610985",
"0.6605221",
"0.65819466",
"0.65738606",
"0.65729874",
"0.6572675",
"0.6514944",
"0.6506146",
"0.64641595",
"0.64596075",
"0.6420266",
"0.6420266",
"0.64128697",
"0.63823694",
"0.6379862",
"0.63104916",
"0.6297645",
"0.62960714",
"0.6274092"
]
| 0.80697644 | 0 |
Unitless hausdorff distance to other geometry (float) | def hausdorff_distance(self, other):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hausdorff_distance(self, other):\n return _binary_op(arctern.ST_HausdorffDistance, self, other)",
"def hausdorffDistance(self, id1, id2):\r\n # productive #math\r\n if frequent: profprint()\r\n node1 = slicer.mrmlScene.GetNodeByID(id1)\r\n polydata1 = node1.GetPolyData()\r\n node2 = slicer.mrmlScene.GetNodeByID(id2)\r\n polydata2 = node2.GetPolyData()\r\n nb1 = polydata1.GetNumberOfPoints()\r\n nb2 = polydata2.GetNumberOfPoints()\r\n minimum = None\r\n maximum = None\r\n JJ, jj = None, None\r\n II, ii = None, None\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n polydata1.GetPoint(1, pt1)\r\n polydata1.GetPoint(nb1 - 1, pt2)\r\n minVal1 = min(pt1[2], pt2[2])\r\n maxVal1 = max(pt1[2], pt2[2])\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n pt1b, pt2b = None, None\r\n polydata2.GetPoint(1, pt1)\r\n polydata2.GetPoint(nb2 - 1, pt2)\r\n minVal2 = min(pt1[2], pt2[2])\r\n maxVal2 = max(pt1[2], pt2[2])\r\n valueBase = max(minVal1, minVal2)\r\n valueTip = min(maxVal1, maxVal2)\r\n\r\n # truncate polydatas\r\n truncatedPolydata1 = self.clipPolyData(node1, valueBase)\r\n truncatedPolydata2 = self.clipPolyData(node2, valueBase)\r\n\r\n cellId = vtk.mutable(1)\r\n subid = vtk.mutable(1)\r\n dist = vtk.mutable(1)\r\n cl2 = vtk.vtkCellLocator()\r\n cl2.SetDataSet(truncatedPolydata2)\r\n cl2.BuildLocator()\r\n # Hausforff 1 -> 2\r\n minima = []\r\n for i in range(int(nb1 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata1.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl2.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff12 = max(minima)\r\n\r\n # Hausforff 2 -> 1\r\n minima = []\r\n cl1 = vtk.vtkCellLocator()\r\n cl1.SetDataSet(truncatedPolydata1)\r\n cl1.BuildLocator()\r\n for i in range(int(nb2 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata2.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl1.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff21 = max(minima)\r\n return max(hausdorff12, hausdorff21)",
"def hausdorffDistance(self,id1,id2):\n #productive #math\n profprint()\n node1 = slicer.mrmlScene.GetNodeByID(id1)\n polydata1=node1.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID(id2)\n polydata2=node2.GetPolyData()\n nb1 = polydata1.GetNumberOfPoints()\n nb2 = polydata2.GetNumberOfPoints()\n minimum=None\n maximum=None\n JJ,jj=None,None\n II,ii=None,None\n pt1=[0,0,0]\n pt2=[0,0,0]\n polydata1.GetPoint(1,pt1)\n polydata1.GetPoint(nb1-1,pt2)\n minVal1=min(pt1[2],pt2[2])\n maxVal1=max(pt1[2],pt2[2])\n pt1=[0,0,0]\n pt2=[0,0,0]\n pt1b,pt2b=None,None\n polydata2.GetPoint(1,pt1)\n polydata2.GetPoint(nb2-1,pt2)\n minVal2 = min(pt1[2],pt2[2])\n maxVal2 = max(pt1[2],pt2[2])\n valueBase=max(minVal1,minVal2)\n valueTip=min(maxVal1,maxVal2)\n\n # truncate polydatas\n truncatedPolydata1 = self.clipPolyData(node1,valueBase)\n truncatedPolydata2 = self.clipPolyData(node2,valueBase)\n\n cellId=vtk.mutable(1)\n subid=vtk.mutable(1)\n dist=vtk.mutable(1)\n cl2=vtk.vtkCellLocator()\n cl2.SetDataSet(truncatedPolydata2)\n cl2.BuildLocator()\n # Hausforff 1 -> 2\n minima=[]\n for i in range(int(nb1/float(10))):\n pt=[0,0,0]\n polydata1.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl2.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff12 = max(minima)\n \n # Hausforff 2 -> 1\n minima=[]\n cl1=vtk.vtkCellLocator()\n cl1.SetDataSet(truncatedPolydata1)\n cl1.BuildLocator()\n for i in range(int(nb2/float(10))):\n pt=[0,0,0]\n polydata2.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl1.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff21 = max(minima)\n return max(hausdorff12,hausdorff21)",
"def hausdorffDistance13(self, id1, id2):\r\n # productive #math\r\n if frequent: profprint()\r\n node1 = slicer.mrmlScene.GetNodeByID(id1)\r\n polydata1 = node1.GetPolyData()\r\n node2 = slicer.mrmlScene.GetNodeByID(id2)\r\n polydata2 = node2.GetPolyData()\r\n nb1 = polydata1.GetNumberOfPoints()\r\n nb2 = polydata2.GetNumberOfPoints()\r\n minimum = None\r\n maximum = None\r\n JJ, jj = None, None\r\n II, ii = None, None\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n polydata1.GetPoint(1, pt1)\r\n polydata1.GetPoint(nb1 - 1, pt2)\r\n minVal1 = min(pt1[2], pt2[2])\r\n maxVal1 = max(pt1[2], pt2[2])\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n pt1b, pt2b = None, None\r\n polydata2.GetPoint(1, pt1)\r\n polydata2.GetPoint(nb2 - 1, pt2)\r\n minVal2 = min(pt1[2], pt2[2])\r\n maxVal2 = max(pt1[2], pt2[2])\r\n valueBase = max(minVal1, minVal2)\r\n valueTip = min(maxVal1, maxVal2)\r\n cellId = vtk.mutable(1)\r\n subid = vtk.mutable(1)\r\n dist = vtk.mutable(1)\r\n cl2 = vtk.vtkCellLocator()\r\n cl2.SetDataSet(polydata2)\r\n cl2.BuildLocator()\r\n # Hausforff 1 -> 2\r\n minima = []\r\n for i in range(int(nb1 / float(100))):\r\n pt = [0, 0, 0]\r\n polydata1.GetPoint(100 * i, pt)\r\n closest = [0, 0, 0]\r\n cl2.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff12 = max(minima)\r\n\r\n # Hausforff 2 -> 1\r\n minima = []\r\n cl1 = vtk.vtkCellLocator()\r\n cl1.SetDataSet(polydata1)\r\n cl1.BuildLocator()\r\n for i in range(int(nb2 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata2.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl1.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff21 = max(minima)\r\n return max(hausdorff12, hausdorff21)",
"def hausdorff_distance(image1, image2):\n image1_int = image1.clone(\"unsigned int\")\n image2_int = image2.clone(\"unsigned int\")\n\n libfn = utils.get_lib_fn(\"hausdorffDistance%iD\" % image1_int.dimension)\n d = libfn(image1_int.pointer, image2_int.pointer)\n\n return d",
"def distancia(self, other):\n return ((self.x-other.x)**2 + (self.y-other.y)**2 + (self.z-other.z)**2) ** (1 / 2)",
"def __distance_to(self, other: Any) -> float:\n return np.linalg.norm(self.pos - other.pos)",
"def hausdorffDistance(self,fiber1,fiber2):\n polyA = fiber1.GetPolyData()\n polyB = fiber2.GetPolyData()\n\n locA = vtk.vtkMergePoints()\n locB = vtk.vtkMergePoints()\n\n locA.SetDataSet(polyA)\n locB.SetDataSet(polyB)\n\n locs = (locA,locB)\n for loc in locs:\n loc.AutomaticOn()\n loc.BuildLocator()\n\n ptsA = polyA.GetPoints()\n ptsB = polyB.GetPoints()\n\n rangeA = ptsA.GetNumberOfPoints()\n rangeB = ptsB.GetNumberOfPoints()\n\n maxd = 0.0\n maxd1 = 0.0\n avgd = 0.0\n avgd1 = 0.0\n\n distanceA = vtk.vtkFloatArray()\n distanceA.SetName(\"Distance\")\n for i in range(rangeA):\n pt = ptsA.GetPoint(i)\n bid = locB.FindClosestPoint(pt)\n ptb = ptsB.GetPoint(bid)\n d = self.pointDistance(pt,ptb)\n distanceA.InsertNextValue(d)\n avgd += d\n if d > maxd:\n maxd = d\n avgd = avgd / rangeA\n\n distanceB = vtk.vtkFloatArray()\n distanceB.SetName(\"Distance\")\n for i in range(rangeB):\n pt = ptsB.GetPoint(i)\n bid = locA.FindClosestPoint(pt)\n ptb = ptsA.GetPoint(bid)\n d = self.pointDistance(pt,ptb)\n distanceB.InsertNextValue(d)\n avgd1 += d\n if d > maxd1:\n maxd1 = d\n avgd1 = avgd1 / rangeB\n\n polyA.GetPointData().SetScalars(distanceA)\n polyB.GetPointData().SetScalars(distanceB)\n\n return max(maxd,maxd1)",
"def hausdorffDistance13(self,id1,id2):\n #productive #math\n profprint()\n node1 = slicer.mrmlScene.GetNodeByID(id1)\n polydata1=node1.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID(id2)\n polydata2=node2.GetPolyData()\n nb1 = polydata1.GetNumberOfPoints()\n nb2 = polydata2.GetNumberOfPoints()\n minimum=None\n maximum=None\n JJ,jj=None,None\n II,ii=None,None\n pt1=[0,0,0]\n pt2=[0,0,0]\n polydata1.GetPoint(1,pt1)\n polydata1.GetPoint(nb1-1,pt2)\n minVal1=min(pt1[2],pt2[2])\n maxVal1=max(pt1[2],pt2[2])\n pt1=[0,0,0]\n pt2=[0,0,0]\n pt1b,pt2b=None,None\n polydata2.GetPoint(1,pt1)\n polydata2.GetPoint(nb2-1,pt2)\n minVal2 = min(pt1[2],pt2[2])\n maxVal2 = max(pt1[2],pt2[2])\n valueBase=max(minVal1,minVal2)\n valueTip=min(maxVal1,maxVal2)\n cellId=vtk.mutable(1)\n subid=vtk.mutable(1)\n dist=vtk.mutable(1)\n cl2=vtk.vtkCellLocator()\n cl2.SetDataSet(polydata2)\n cl2.BuildLocator()\n # Hausforff 1 -> 2\n minima=[]\n for i in range(int(nb1/float(100))):\n pt=[0,0,0]\n polydata1.GetPoint(100*i,pt)\n closest=[0,0,0]\n cl2.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff12 = max(minima)\n \n # Hausforff 2 -> 1\n minima=[]\n cl1=vtk.vtkCellLocator()\n cl1.SetDataSet(polydata1)\n cl1.BuildLocator()\n for i in range(int(nb2/float(10))):\n pt=[0,0,0]\n polydata2.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl1.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff21 = max(minima)\n return max(hausdorff12,hausdorff21)",
"def hellinger_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n x = np.sqrt(x)\n y = np.sqrt(y)\n # x (120, 40), y (100, 40), H(x,y) (120, 100)\n xx = np.tile(x, (y.shape[0], 1, 1)).transpose((1, 0, 2))\n yy = np.tile(y, (x.shape[0], 1, 1))\n xx_yy = xx - yy\n res = np.sqrt(np.sum(xx_yy ** 2, axis=-1))\n return np.float64((1. / np.sqrt(2)) * res)",
"def distance(self, other_pt, is_lla=True):\n return 0.0",
"def calculate(self):\n\n distance_filter = sitk.HausdorffDistanceImageFilter()\n distance_filter.Execute(self.ground_truth, self.segmentation)\n return distance_filter.GetHausdorffDistance()",
"def Hausdorff_distance(clust1, clust2, forward, dir):\n if forward == None:\n return max(Hausdorff_distance(clust1,clust2,True,dir),Hausdorff_distance(clust1,clust2,False,dir))\n else:\n clstart, clend = (clust1,clust2) if forward else (clust2,clust1)\n dx, dy = dir if forward else (-dir[0],-dir[1])\n return sum([min([Dist((p1[0]+dx,p1[1]+dy),p2) for p2 in clend]) for p1 in clstart])/len(clstart)",
"def distance_to(self, other):\n return abs(self.x-other.x) + abs(self.y-other.y) + abs(self.z-other.z)",
"def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance",
"def directed_Hausdorff_hyperbox(b1,b2): \n return max(0,np.max(np.hstack((b1.u-b2.u,b2.l-b1.l))))",
"def hausd(result, reference, voxelspacing=None, connectivity=1):\n hd1 = __surface_distances(result, reference, voxelspacing, connectivity).max()\n hd2 = __surface_distances(reference, result, voxelspacing, connectivity).max()\n hd = max(hd1, hd2)\n return hd",
"def hellinger_distance(doca, docb, axis=1):\n return np.sum((doca**.5 - docb**.5)**2, axis=axis)",
"def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance",
"def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))",
"def calculate(self):\n\n distance_filter = sitk.HausdorffDistanceImageFilter()\n distance_filter.Execute(self.ground_truth, self.segmentation)\n return distance_filter.GetAverageHausdorffDistance()",
"def distance(self, other_cluster):\n vert_dist = self._vert_center - other_cluster.vert_center()\n horiz_dist = self._horiz_center - other_cluster.horiz_center()\n return math.sqrt(vert_dist ** 2 + horiz_dist ** 2)",
"def distance(self, other_cluster):\n vert_dist = self._vert_center - other_cluster.vert_center()\n horiz_dist = self._horiz_center - other_cluster.horiz_center()\n return math.sqrt(vert_dist ** 2 + horiz_dist ** 2)",
"def manhatam_distance(self) -> int:\n raise NotImplementedError",
"def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)",
"def dist(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return length(sub(first,other))",
"def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)",
"def dist(self, other):\n return math.sqrt((self.x - other.x)**2 +\n (self.y - other.y)**2 +\n (self.z - other.z)**2)",
"def h(pos,obj):\n return D(pos)*(distancia_nodos(pos,obj))",
"def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)"
]
| [
"0.74562",
"0.72187144",
"0.7027511",
"0.6888543",
"0.6825782",
"0.66274846",
"0.65860593",
"0.65808034",
"0.6572659",
"0.6412051",
"0.6410486",
"0.64041233",
"0.63872296",
"0.6371733",
"0.63689435",
"0.63612485",
"0.6321926",
"0.6309879",
"0.6283407",
"0.6269362",
"0.62305546",
"0.62163454",
"0.62163454",
"0.62143",
"0.6208756",
"0.62054294",
"0.6198416",
"0.6193814",
"0.61816233",
"0.6164796"
]
| 0.77946174 | 0 |
Returns a lower dimension geometry that bounds the object The boundary of a polygon is a line, the boundary of a line is a collection of points. The boundary of a point is an empty (null) collection. | def boundary(self): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()",
"def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))",
"def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]",
"def get_bounding_box(self):\n if len(self.elements) == 0:\n return None\n if not (self._bb_valid and\n all(ref._bb_valid for ref in self.get_dependencies(True))):\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for element in self.elements:\n if isinstance(element, PolygonSet):\n all_polygons.extend(element.polygons)\n elif isinstance(element, CellReference) or isinstance(\n element, CellArray):\n element_bb = element.get_bounding_box()\n if element_bb is not None:\n bb[0, 0] = min(bb[0, 0], element_bb[0, 0])\n bb[0, 1] = min(bb[0, 1], element_bb[0, 1])\n bb[1, 0] = max(bb[1, 0], element_bb[1, 0])\n bb[1, 1] = max(bb[1, 1], element_bb[1, 1])\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bb_valid = True\n _bounding_boxes[self] = bb\n return _bounding_boxes[self]",
"def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not edge.nxt][0]\n # next(obj for obj in objs if obj.val==5)\n first_vertex = edge.origin\n outer_boundary = []\n while (not edge.get_destination() == first_vertex):\n if(edge.get_destination().is_infinity()):\n edge = edge.twin.nxt\n else:\n outer_boundary.append(edge)\n edge = edge.nxt\n outer_boundary.append(edge)\n return outer_boundary",
"def _boundary_constraint_fence(\n self,\n x: np.ndarray,\n ) -> np.ndarray:\n # clip dimensions to fit within the boundary\n x_constrained = np.clip(\n x,\n self.boundary_fence['min'],\n self.boundary_fence['max'],\n )\n return x_constrained",
"def get_bounding_box(self):\n deps_still_valid = all(ref._bb_valid for ref in self.get_dependencies(True))\n cached_bbox_still_valid = self._bb_valid and deps_still_valid\n if not cached_bbox_still_valid:\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for polygon in self.polygons:\n all_polygons.extend(polygon.polygons)\n for path in self.paths:\n all_polygons.extend(path.to_polygonset().polygons)\n for reference in self.references:\n reference_bb = reference.get_bounding_box()\n if reference_bb is not None:\n all_polygons.append(reference_bb)\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bounding_box = bb\n else:\n self._bounding_box = None\n self._bb_valid = True\n\n if self._bounding_box is None:\n return None\n else:\n # return a *copy* of the cached bounding box to ensure it doesn't get inadvertently modified\n return numpy.array(self._bounding_box)",
"def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]",
"def get_rectangle(polygon):\n polygon = tuple(polygon)\n hull_ordered = [polygon[index] for index in ConvexHull(polygon).vertices]\n hull_ordered.append(hull_ordered[0])\n hull_ordered = tuple(hull_ordered)\n min_rectangle = _bounding_area(0, hull_ordered)\n for i in range(1, len(hull_ordered) - 1):\n rectangle = _bounding_area(i, hull_ordered)\n if rectangle['area'] < min_rectangle['area']:\n min_rectangle = rectangle\n\n min_rectangle['unit_vector_angle'] = atan2(min_rectangle['unit_vector'][1], min_rectangle['unit_vector'][0])\n min_rectangle['rectangle_center'] = _to_xy_coordinates(min_rectangle['unit_vector_angle'],\n min_rectangle['rectangle_center'])\n return bounding_box_tuple(\n area=min_rectangle['area'],\n length_parallel=min_rectangle['length_parallel'],\n length_orthogonal=min_rectangle['length_orthogonal'],\n rectangle_center=min_rectangle['rectangle_center'],\n unit_vector=min_rectangle['unit_vector'],\n unit_vector_angle=min_rectangle['unit_vector_angle'],\n corner_points=set(_rectangle_corners(min_rectangle))\n )",
"def real_boundaries(self):\n return (self._points[0][1], self._points[0][3])",
"def bounding_box(self):\n return None",
"def get_raw_bounds(self) -> [Vector, Vector]:\n\t\tverts = np.array([v.co for mesh in self._meshes for v in mesh.data.vertices])\n\t\tbbox_min = Vector([*np.min(verts, axis=0)])\n\t\tbbox_max = Vector([*np.max(verts, axis=0)])\n\t\treturn bbox_min, bbox_max",
"def Bounds(self):\n assert self.points is not None\n\n if self.points.shape[1] == 3:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1]),\n np.min(self.points[:,2])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1]),\n np.max(self.points[:,2])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 2:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 1:\n bounds = np.array([[np.min(self.points[:,0])],\n [np.max(self.points[:,0])]])\n makezero(bounds)\n return bounds\n else:\n raise ValueError(\"Invalid dimension for mesh coordinates\")",
"def get_bounding_box(self):\n return self._domain.get_bounding_box()",
"def boundary(self):\n return self._boundary",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if self.rotation is None or self.rotation % 90 == 0:\n cell_bbox = self.ref_cell.get_bounding_box()\n if cell_bbox is None:\n return None\n polygons = self._transform_polygons([cell_bbox])\n else:\n # For non-cardinal rotations of a reference, we must use the\n # flattened polygons for the reference\n polygons = self.get_polygons()\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(\n (\n (all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max()),\n )\n )\n return bb",
"def detect_boundary(self, x, l_old):\n pass",
"def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n\n if self.rotation is None or self.rotation % 90 == 0:\n cell_bbox = self.ref_cell.get_bounding_box()\n if cell_bbox is None:\n return None\n polygons = self._transform_polygons([cell_bbox])\n else:\n # For non-cardinal rotations of a reference, we must use the\n # flattened polygons for the reference\n polygons = self.get_polygons()\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(\n (\n (all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max()),\n )\n )\n return bb",
"def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if (self.rotation is None and self.magnification is None and\n self.x_reflection is None):\n key = self\n else:\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection)\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))",
"def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection, self.columns, self.rows, self.spacing[0],\n self.spacing[1])\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))",
"def get_bounds(self):\n return self._geometry.bounds",
"def clip_to_extent(geometry: Polygon, image_box: box = image_box) -> Polygon | float:\n if not image_box.contains(geometry):\n geometry = geometry.intersection(image_box)\n if geometry.is_empty:\n return np.nan\n return geometry",
"def boundary_polygon(self, time):\n ti = np.where(time == self.times)[0][0]\n com_x, com_y = self.center_of_mass(time)\n # If at least one point along perimeter of the mask rectangle is unmasked, find_boundaries() works.\n # But if all perimeter points are masked, find_boundaries() does not find the object.\n # Therefore, pad the mask with zeroes first and run find_boundaries on the padded array.\n padded_mask = np.pad(self.masks[ti], 1, 'constant', constant_values=0)\n chull = convex_hull_image(padded_mask)\n boundary_image = find_boundaries(chull, mode='inner', background=0)\n # Now remove the padding.\n boundary_image = boundary_image[1:-1, 1:-1]\n boundary_x = self.x[ti].ravel()[boundary_image.ravel()]\n boundary_y = self.y[ti].ravel()[boundary_image.ravel()]\n r = np.sqrt((boundary_x - com_x) ** 2 + (boundary_y - com_y) ** 2)\n theta = np.arctan2((boundary_y - com_y), (boundary_x - com_x)) * 180.0 / np.pi + 360\n polar_coords = np.array([(r[x], theta[x]) for x in range(r.size)], dtype=[('r', 'f4'), ('theta', 'f4')])\n coord_order = np.argsort(polar_coords, order=['theta', 'r'])\n ordered_coords = np.vstack([boundary_x[coord_order], boundary_y[coord_order]])\n return ordered_coords",
"def bounds_gdf(gdf: gpd.GeoDataFrame) -> box:\n if gdf.empty:\n return Polygon()\n gdf_bounds = gdf.total_bounds\n gdf_bounds_box = box(*gdf_bounds.tolist())\n return gdf_bounds_box",
"def calc_bounding_box(self):\n self.BB = self.geos.abs_el(0).BB\n for geo in self.geos.abs_iter():\n self.BB = self.BB.joinBB(geo.BB)",
"def lower_bound(height):\n tan_108 = math.tan(math.radians(108))\n lower_boundary = 250 + height / tan_108\n return lower_boundary",
"def get_geometry(self):\n geometry = self._geometry\n for geo in self._holes:\n geometry = geometry.difference(geo) \n return geometry"
]
| [
"0.70245296",
"0.6996683",
"0.67483956",
"0.6624984",
"0.6564128",
"0.633718",
"0.63227165",
"0.6250293",
"0.61947584",
"0.6138098",
"0.6067745",
"0.6062616",
"0.60375464",
"0.6032164",
"0.5988255",
"0.59842104",
"0.59677136",
"0.5945666",
"0.594247",
"0.5940417",
"0.59305745",
"0.59131896",
"0.5899361",
"0.58980167",
"0.5892461",
"0.58910716",
"0.5883555",
"0.5853275",
"0.58205",
"0.5805241"
]
| 0.70275587 | 0 |
A figure that envelopes the geometry | def envelope(self): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def envelope(self):\n return _property_geo(arctern.ST_Envelope, self)",
"def _gen_axes_patch(self):\n\n return Polygon([[0,0], [0.5,np.sqrt(3)/2], [1,0]], closed=True)",
"def figure():\n fig = plt.figure()\n ax = fig.add_subplot()\n ax.set_aspect('equal')\n return fig, ax",
"def show_geometry(self):\n import matplotlib.pyplot as plt\n fig_geom = plt.figure()\n ax_geom = fig_geom.add_subplot(111)\n rectangle = []\n for i in range(len(self.coordinates)):\n rectangle.append(plt.Rectangle((self.coordinates[i][0],\n self.coordinates[i][1]),\n self.size[i][0], self.size[i][1], alpha=0.5))\n ax_geom.add_patch(rectangle[i])\n plt.axis('auto')\n plt.show()",
"def envelope(self) -> ir.PolygonValue:\n return ops.GeoEnvelope(self).to_expr()",
"def envelope_plot(x, y, winsize, ax=None, fill='gray', color='blue'):\r\n\t\r\n\tif ax is None:\tax = plt.gca()\r\n\t# Coarsely chunk the data, discarding the last window if it's not evenly\r\n\t# divisible. (Fast and memory-efficient)\r\n\tnumwin = x.size // winsize\r\n\tywin = y[:winsize * numwin].reshape(-1, winsize)\r\n\txwin = x[:winsize * numwin].reshape(-1, winsize)\r\n\t# Find the min, max, and mean within each window \r\n\tymin = ywin.min(axis=1)\r\n\tymax = ywin.max(axis=1)\r\n\tymean = ywin.mean(axis=1)\r\n\txmean = xwin.mean(axis=1)\r\n\r\n\tfill_artist = ax.fill_between(xmean, ymin, ymax, color=fill, \r\n\t\t edgecolor='none', alpha=0.5)\r\n\tline, = ax.plot(xmean, ymean, color=color, linestyle='-')\r\n\treturn fill_artist, line",
"def Figure4Main(self, supplemental1=False):\n if not supplemental1:\n example_cells = [5, 9, 17, 30]\n else:\n example_cells = [2, 6, 10, 11, 13, 18]\n\n start_letter = \"A\"\n parent_figure = None\n\n if not supplemental1:\n sizer = {\n \"D\": {\"pos\": [6.5, 2.2, 4.25, 2.5], \"labelpos\": (-0.15, 1.02),},\n \"E\": {\"pos\": [9.5, 2.2, 4.25, 2.5], \"labelpos\": (-0.15, 1.02),},\n \"F\": {\"pos\": [6.5, 2.2, 0.5, 2.5], \"labelpos\": (-0.15, 1.02),},\n \"G\": {\"pos\": [9.5, 2.2, 0.5, 2.5], \"labelpos\": (-0.15, 1.02),},\n }\n figsize = (12,8)\n else:\n sizer = {}\n figsize = (9, 8)\n xw = 1.1\n trace_axes = []\n for j in range(len(example_cells)):\n i = j + 1\n xl = j * 1.25 + 0.75\n axn = f\"A{i:d}\"\n trace_axes.append(axn)\n sizer[axn] = {\n \"pos\": [xl, xw, 3.25, 4.25],\n \"labelpos\": (-0.15, 1.02),\n \"noaxes\": True,\n }\n sizer[f\"B{i:d}\"] = {\n \"pos\": [xl, xw, 2.0, 1.0],\n \"labelpos\": (-0.15, 1.02),\n # \"noaxes\": True,\n }\n sizer[f\"C{i:d}\"] = {\n \"pos\": [xl, xw, 0.5, 1.0],\n \"labelpos\": (-0.15, 0.9),\n \"noaxes\": True,\n }\n # dict pos elements are [left, width, bottom, height] for the axes in the plot. gr = [(a, a+1, 0, 1) for a in range(0, 8)] # just generate subplots - shape do not matter axmap = OrderedDict(zip(sizer.keys(), gr))\n P = PH.arbitrary_grid(\n sizer,\n order=\"columnsfirst\",\n units=\"in\",\n figsize=figsize,\n label=True,\n showgrid=False,\n parent_figure=parent_figure,\n )\n # Efficacy plot\n if not supplemental1:\n EFP = EF.EfficacyPlots(parent_figure=P)\n EFP.plot_efficacy(\"Full\", ax=P.axdict[\"D\"], figuremode=\"clean\")\n # participation plots\n synperum2 = 0.7686 # taken from cell_config.py, line 127 (11/15/2021)\n\n def plot_participation(ax, n, a, b, dB=0, color=None):\n ap = a[n][0].participation / a[n][0].npost_spikes\n bp = b[n][0].participation / b[n][0].npost_spikes\n ax.plot(\n [a[n][0].sites / synperum2, a[n][0].sites / synperum2],\n [ap, bp],\n \"-\",\n color=color,\n )\n ax.scatter(a[n][0].sites / synperum2, ap, marker=\"o\", color=color)\n ax.scatter(a[n][0].sites / synperum2, bp, marker=\"x\", color=color)\n ax.set_xlabel(r\"Input ASA (${\\mu m^2}$)\")\n ax.set_xlim(0, 300)\n ax.set_ylim(0, 1.0)\n ax.set_ylabel(f\"Participation at 0 and {dB:2d} dBSPL\")\n PH.talbotTicks(ax, floatAdd={\"x\": 0, \"y\": 2})\n\n def plot_diff_participation(ax, n, a, b, dB=0, color=None, legend=True):\n ap = a[n][0].participation / a[n][0].npost_spikes\n bp = b[n][0].participation / b[n][0].npost_spikes\n ax.scatter(\n a[n][0].sites / synperum2,\n bp / ap,\n marker=\"o\",\n color=color,\n label=f\"VCN_c{n:02d}\",\n )\n ax.set_xlabel(r\"Input ASA (${\\mu m^2}$)\")\n ax.set_xlim(0, 300)\n ax.set_ylim(0, 3)\n ax.set_ylabel(f\"Participation ratio {dB:2d}/{0:2d} dBSPL\")\n PH.talbotTicks(ax, floatAdd={\"x\": 0, \"y\": 2})\n if legend:\n ax.legend(fontsize=8, loc=\"upper right\", ncol=2)\n\n dB = 30\n if not supplemental1:\n ds = self._load_rcdata(\"Spont\")\n drc = self._load_rcdata(f\"{dB:2d}dB\")\n palette = sns.color_palette(None, len(ds.keys()))\n for i, c in enumerate(ds.keys()):\n # plot_participation(P.axdictax[0], c, ds, drc, dB=dB, color=palette[i])\n plot_diff_participation(\n P.axdict[\"E\"], c, ds, drc, dB=dB, color=palette[i], legend=False\n )\n\n axl = [P.axdict[axi] for axi in trace_axes]\n self.plot_stacked_traces(cells=example_cells, figure=P, axes=axl, maxstack=10)\n if not supplemental1:\n self.plot_revcorr_compare(\n parent_figure=P,\n axlist=[P.axdict[\"F\"], P.axdict[\"G\"]],\n dBSPLs=[\"Spont\", \"30dB\"],\n legend=False,\n )\n synlabel_num = 5\n else:\n synlabel_num = 2\n self.plot_revcorr_supplement(cells=example_cells, parent_figure=P, dBSPL=\"30dB\", synlabel_num=synlabel_num)\n # self.plot_efficacy_supplement(cells=example_cells, parent_figure=P, traces=False)\n\n for j in range(len(example_cells)):\n ax = P.axdict[f\"B{j+1:d}\"]\n ax.set_ylim(0, 0.8)\n ax.set_xlim(-5.0, 0.0)\n\n if j > 0:\n PH.noaxes(ax, whichaxes=\"y\")\n else:\n ax.set_ylabel(\"Coinc. Rate (Hz)\")\n ax.xaxis.set_minor_locator(MultipleLocator(1))\n ax.tick_params(which=\"major\", length=4, direction=\"in\")\n ax.tick_params(which=\"minor\", length=2, direction=\"in\")\n fig = FigInfo()\n if parent_figure is not None:\n fig.P = parent_figure\n else:\n fig.P = P\n if not supplemental1:\n fig.filename = \"Figure4_Ephys2_main_v4.pdf\"\n fig.title[\n \"title\"\n ] = \"SBEM Project Figure 4 Modeling: Singles, Efficacy and Revcorr\"\n else:\n fig.filename = \"Figure4-Supplemental1_Revcorr.pdf\"\n fig.title[\n \"title\"\n ] = \"SBEM Project Figure 4 Modeling: other cells Singles and Revcorr\"\n\n title2 = {\"title\": f\"\", \"x\": 0.99, \"y\": 0.01}\n fig.title2 = title2\n print(\"returnin fig: \", fig)\n return fig",
"def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return",
"def createFigure(self):\n\n SMALL_SIZE = 14\n MEDIUM_SIZE = 18\n BIGGER_SIZE = 36\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n fig, axes = plt.subplots()\n fig.set_size_inches(10, 6, forward=True)\n serialNumber = self.spectrometer.getSerialNumber()\n model = self.spectrometer.model\n fig.canvas.manager.set_window_title('Spectrometer [serial # {0}, model {1}]'.format(serialNumber, model))\n axes.set_xlabel(\"Wavelength [nm]\")\n axes.set_ylabel(\"Intensity [arb.u]\")\n return fig, axes",
"def envelope_aggr(self):\n return GeoSeries(arctern.ST_Envelope_Aggr(self))",
"def create_figure(self):\n plt.rcParams.update(general_utils.returnGraphConfigs(\"anim\"))\n self.fig = plt.figure()\n self.axes = plt.axes()\n self.axes.set_xlabel(\"Cells In X (Columns)\")\n self.axes.set_ylabel(\"Cells In Y (Rows)\")\n self.axes.set_xlim(0, self.dimensions - 1)\n self.axes.set_ylim(0, self.dimensions - 1)",
"def plot_single_hfo(hfo, envelope = False, xlim =[-1,1], cutoff = None, v = True,\n axes = None, figure_size = (15,10),dpi=600,saveplot = None):\n if axes == None:\n # Creating the figure \n fig = plt.figure(figsize=figure_size,dpi=dpi)\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n else:\n ax1 = axes[0]\n ax2 = axes[1]\n ax3 = axes[2]\n\n # number of points\n npoints = hfo.waveform.shape[0]\n time_v = np.linspace(-1,1,npoints,endpoint=True)\n # creating the axes\n \n ax1.plot(time_v,hfo.waveform[:,0],'b')\n ax1.plot(time_v[hfo.start_idx:hfo.end_idx],hfo.waveform[hfo.start_idx:hfo.end_idx,0],'k')\n \n adjust_spines(ax1, ['left'])\n ax1.set_xlim(xlim)\n \n \n \n filt = hfo.waveform[:,1]\n ax2.plot(time_v,filt) \n ax2.plot(time_v[hfo.start_idx:hfo.end_idx],filt[hfo.start_idx:hfo.end_idx],'k')\n if envelope:\n env = hfo.waveform[:,2]\n ax4 = ax2.twinx()\n ax4.plot(time_v,env,'g')\n \n\n \n adjust_spines(ax2, ['left', 'bottom'])\n ax2.set_xlim(xlim)\n \n \n hfo.spectrum.plot(cutoff = cutoff, v = v, ax = ax3)\n ax3.set_title('peak freq = ' + str(hfo.spectrum.peak_freq))\n adjust_spines(ax3, ['left', 'bottom'])\n \n if saveplot != None:\n if type(saveplot) == str: \n plt.savefig(saveplot, bbox_inches='tight')\n else:\n raise Exception('saveplot should be a string')\n plt.draw()",
"def interior(self):\n return Shape(self - self.edge('inner'))",
"def figure(fp=dict(), ap=dict(left=0.15, bottom=0.12, right=0.95, top=0.95, \n wspace=0.10, hspace=0.10), orientation='portrait'):\n\n __init__()\n golden = (5 ** 0.5 + 1.0) / 2.0 # The golden ratio\n \n if 'figsize' not in fp.keys():\n if orientation == 'landscape':\n fp['figsize'] = (11, 8)\n elif orientation == 'portrait':\n fp['figsize'] = (8, 11)\n elif orientation == 'squared':\n fp['figsize'] = (8, 8)\n elif orientation == 'worldmap':\n fp['figsize'] = (9, 5.0625) # Widescreen aspect ratio 16:9\n else:\n raise Warning, 'Orientation \\'%s\\' not allowed.' % (orientation, )\n \n fig = pylab.figure(**fp)\n fig.subplots_adjust(**ap)\n \n return fig",
"def create_figure_new(self):\n kw = {}\n self.p = figure(plot_height=400, plot_width=400, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)\n self.p.circle(x=[0],y=[0])",
"def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)",
"def build_fig(self, size=(10, 10), dpi=300):\n fig = plt.figure(figsize=size, \n facecolor='w',\n dpi=dpi)\n\n plt.axis('off')\n return fig",
"def figure4():\n\n plot_settings = {'y_limits': [-80, -50],\n 'x_limits': None,\n 'y_ticks': [-80, -70, -60, -50],\n 'locator_size': 5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 20,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_4',\n 'legend': ['control', 'apamin'],\n 'legend_size': 8,\n 'y_on': True}\n line_styles = ['-', 'dotted']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate figure 1 (top)\n for ix, g_sk_bar in enumerate([0.3, 0]):\n t, y = solver(100, g_sk_bar=g_sk_bar)\n plt.plot(t, y[:, 0], c='k', linestyle=line_styles[ix])\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 1, 2)\n t1 = 1200\n t, y = solver(t1, t_start=50, duration=t1, i_bias_on=0.33, g_sk_bar=0.03)\n plt.plot(t, y[:, 0], 'k-')\n\n plot_settings['y_limits'] = [-100, 30]\n plot_settings['x_limits'] = [0, t1]\n plot_settings['y_ticks'] = [-80, -60, -40, -20, 0, 20]\n plot_settings['locator_size'] = 10\n plot_settings['scale_size'] = 100\n plot_settings['legend'] = None\n alter_figure(plot_settings, close=True) # Alter plot for publication",
"def figure():\n global fig\n return fig",
"def generate_figure(figsize=(2, 2), xlim=[0, 1], ylim=[0, 1]):\n plt.figure(figsize=figsize)\n plt.grid()\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('$\\mathrm{Re}$')\n plt.ylabel('$\\mathrm{Im}$')",
"def envelope(self) -> ty.Optional[reapy.Envelope]:\r\n ...",
"def draw_figure(self, **kwargs):\n\n fig = figure(**kwargs)\n fig.xaxis.axis_label = self.x_label\n fig.yaxis.axis_label = self.y_label\n\n return fig",
"def frame():\n fig = plt.figure(figsize = (6, 3))\n\n plt.subplots_adjust(left=.15, bottom=.2, right=.95, top=.9)\n ax = fig.add_subplot(111)\n \n ax.tick_params(axis=\"x\", labelsize=12)\n ax.tick_params(axis=\"y\", labelsize=12)\n\n return fig, ax",
"def draw_env(ax=None):\n ax = ax or plt.gca()\n \n ax.set_aspect('equal')\n ax.set_xlim([-4,4])\n ax.set_ylim([-4,4])\n\n # Draw walls\n ax.add_patch(patches.Rectangle((-4/3,-4),0.2,16/3,linewidth=1,edgecolor='k',facecolor='k'))\n ax.add_patch(patches.Rectangle((4/3,-4/3),0.2,16/3,linewidth=1,edgecolor='k',facecolor='k'))\n \n ax.invert_yaxis()",
"def plot_initial_geometry(ni=0.0, mu=0.5):",
"def figure1():\n\n plot_settings = {'y_limits': [-80, -50],\n 'x_limits': None,\n 'y_ticks': [-80, -70, -60, -50],\n 'locator_size': 5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 20,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_1',\n 'legend_size': 8,\n 'legend': None,\n 'y_on': True}\n\n t, y = solver(100) # Integrate solution\n plt.figure(figsize=(5, 2)) # Create figure\n plt.plot(t, y[:, 0], 'k-') # Plot solution\n\n \"\"\"\n Annotate plot with figures\n \"\"\"\n plt.gca().annotate('fAHP', xy=(13.5, -65), xytext=(17, -60),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n plt.gca().annotate('ADP', xy=(15.5, -66), xytext=(25, -65),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n plt.gca().annotate('mAHP', xy=(38, -77), xytext=(43, -72),\n arrowprops=dict(facecolor='black', shrink=0, headlength=10, headwidth=5, width=1), )\n alter_figure(plot_settings, close=True) # Alter figure for publication",
"def render(self):\n\n theta = self.angle*math.pi/180.0\n cth = math.cos(theta)\n sth = math.sin(theta)\n pts = []\n cornerpts = []\n\n for vertex in self.points:\n x = vertex[0] + self.pos[0] - self.anchor[0]\n y = vertex[1] + self.pos[1] - self.anchor[1]\n\n xt = x * cth - y * sth\n yt = x * sth + y * cth\n\n x = xt + self.anchor[0]\n y = yt + self.anchor[1]\n\n cornerpts.append([x,y])\n pts.append(gr.Point(self.scale * x, self.win.getHeight() - self.scale*y))\n\n self.corners = cornerpts\n self.vis = [gr.Polygon(pts)]",
"def _setup_figure(self):\n\n plt.figure(1)\n plt.clf()\n\n # Two main axes\n self._tsne_window = plt.axes([0.05, 0.05, 0.4, 0.4])\n self._main_window = plt.axes([0.05, 0.55, 0.4, 0.4])\n\n # Nine sub axes\n self._sub_windows = []\n for row in range(3):\n for col in range(3):\n tt = plt.axes([0.5+0.17*col, 0.75-0.25*row, 0.15, 0.15])\n tt.set_xticks([])\n tt.set_yticks([])\n self._sub_windows.append(tt)\n\n # Register the button click\n self._cid = plt.figure(1).canvas.mpl_connect('button_press_event', self._onclick)\n\n # Text\n plt.figure(1).text(0.6, 0.2, 'Click with 2nd or 3rd mouse button to select image...')\n plt.figure(1).text(0.05, 0.5, 'Click in main image or tSNE plot to find similar cutouts...')\n plt.figure(1).text(0.6, 0.05, 'The tSNE data reduction calculated from data run through {}'.format(self._model_name), fontsize=8)\n\n # Show\n plt.figure(1).show()\n plt.figure(1).canvas.draw()",
"def render(fig, rec):\n fig.gca().add_patch(\n PolygonPatch(rec['geometry'], fc=BLUE, ec=BLUE, alpha=0.5, zorder=2))\n return fig"
]
| [
"0.6156262",
"0.6136208",
"0.6098685",
"0.6080936",
"0.57456595",
"0.56995076",
"0.5694695",
"0.5625121",
"0.56112766",
"0.55992675",
"0.5583434",
"0.5560338",
"0.5522078",
"0.54982924",
"0.5496067",
"0.54602045",
"0.5441012",
"0.5427183",
"0.5390842",
"0.5371041",
"0.5364203",
"0.53493094",
"0.53488076",
"0.53328586",
"0.5271423",
"0.52575934",
"0.5218447",
"0.52133644",
"0.5210695",
"0.5187003"
]
| 0.7686538 | 0 |
Returns a simplified geometry produced by the DouglasPeucker algorithm Coordinates of the simplified geometry will be no more than the tolerance distance from the original. Unless the topology preserving option is used, the algorithm may produce selfintersecting or otherwise invalid geometries. | def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simplify(self, tolerance):\n return _unary_geo(arctern.ST_SimplifyPreserveTopology, self, tolerance)",
"def simplify(\n self,\n tolerance: ir.FloatingValue,\n preserve_collapsed: ir.BooleanValue,\n ) -> GeoSpatialValue:\n return ops.GeoSimplify(self, tolerance, preserve_collapsed).to_expr()",
"def get_geometry(self):\n geometry = self._geometry\n for geo in self._holes:\n geometry = geometry.difference(geo) \n return geometry",
"def test_simplify(self):\n name = 'Antarctic Box'\n feature = {\n 'type': 'Feature',\n 'properties': {\n 'name': name,\n 'tags': '',\n 'object': 'region',\n 'component': 'ocean',\n 'author': 'Xylar Asay-Davis'\n },\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [\n [\n [\n 90.000000,\n -70.000000\n ],\n [\n 90.000000,\n -80.000000\n ],\n [\n 90.000000,\n -80.000000\n ],\n [\n 70.000000,\n -80.000000\n ],\n [\n 70.000000,\n -70.000000\n ],\n [\n 90.000000,\n -70.000000\n ],\n [\n 90.000000,\n -70.000000\n ]\n ]\n ]\n }\n }\n fc = FeatureCollection()\n fc.add_feature(feature=feature)\n self.check_feature(fc.features[0],\n expected_name=name,\n expected_type='Polygon')\n\n # verify that the original shape has 7 coordinates (with 2 redundant\n # points)\n geom = fc.features[0]['geometry']\n orig_shape = shapely.geometry.shape(geom)\n assert len(orig_shape.exterior.coords) == 7\n\n simplified = fc.simplify(tolerance=0.0)\n\n # verify that the simplified shape has 5 coordinates (with the 2\n # redundant points removed)\n geom = simplified.features[0]['geometry']\n simplified_shape = shapely.geometry.shape(geom)\n assert len(simplified_shape.exterior.coords) == 5",
"def simplify(self, tolerance=1e-3):\n for n, points in enumerate(self.polygons):\n self.polygons[n] = _simplify(points, tolerance=tolerance)\n if self.parent is not None:\n self.parent._bb_valid = False\n return self",
"def _prepare_with_copy(geometry):\n geometry = pygeos.apply(geometry, lambda x: x) # makes a copy\n pygeos.prepare(geometry)\n return geometry",
"def roundify_geometry(output_string):\n\n # Get lines for the output string\n lines = output_string.splitlines()\n\n # Get the number of atoms\n natom = int(lines[0])\n\n # loop over the lines to find the smallest geometry\n rrminmax = 1.0e10\n ngeom = 0\n small_geo_idx = 0\n while ngeom*(natom+2) < len(lines):\n rrmax = 0.0\n for i in range(natom):\n for j in range(i+1, natom):\n # Get the line\n xyz1 = lines[i+ngeom*(natom+2)+2].split()[1:]\n xyz2 = lines[j+ngeom*(natom+2)+2].split()[1:]\n # Get the coordinates\n atom1 = [float(val) for val in xyz1]\n atom2 = [float(val) for val in xyz2]\n # Calculate the interatomic distance\n rrtest = np.sqrt((atom1[0]-atom2[0])**2 +\n (atom1[1]-atom2[1])**2 +\n (atom1[2]-atom2[2])**2)\n # Check and see if distance is more than max\n if rrtest > rrmax:\n rrmax = rrtest\n # If max below moving threshold, set to smallest geom\n if rrmax < rrminmax:\n rrminmax = rrmax\n small_geo_idx = ngeom\n ngeom += 1\n\n # Set the output geometry\n geom_str = '{}\\n'.format(natom)\n for i in range(natom):\n geom_str += lines[i+small_geo_idx*(natom+2)+2] + '\\n'\n\n return geom_str",
"def simplify(input_table, decimals):\n\n # Convert to geodataframe\n gdf = df_to_gdf(input_table)\n\n # Simplify with fixed tolerance\n tolerance = 10 ** -decimals\n gdf.geometry = gdf.geometry.simplify(tolerance, preserve_topology=True)\n\n # TODO: Dynamic simplify. Tolerance is 0.01% van de omtrek\n #def dynamic_simplify(geom):\n # return geom.simplify(geom.length * 0.0001, preserve_topology=True)\n #\n #gdf.geometry = gdf.geometry.apply(dynamic_simplify)\n\n # Return as a dataframe\n return gdf_to_df(gdf)",
"def simplify(uniques, intersections, tolerance):\n uniques_sm = [u.simplify(tolerance=tolerance) for u in uniques]\n\n intersections_sm = [[None for i in range(len(uniques))] for j in range(len(uniques))]\n for i,s1 in enumerate(intersections):\n for j,s2 in enumerate(s1):\n if type(s2) is not shapely.geometry.GeometryCollection():\n intersections_sm[i][j] = s2.simplify(tolerance=tolerance)\n return uniques_sm, intersections_sm",
"def findSensibleProjection(geom):\n coords = getCoords(geom)\n y = coords[:, 1]\n x = coords[:, 0]\n yMin = y.min()\n yMax = y.max()\n if (yMax - yMin) > 90:\n # We are crossing a lot of latitude, which suggests that we have a \n # long strip> In this case, we don't even bother to suggest an EPSG. \n epsg = None\n elif yMin < -80:\n # We are nearing the south pole, so go with UPS south\n epsg = 32761\n elif yMax > 80:\n # Nearing north pole, so UPS North\n epsg = 32661\n else:\n # Work out a UTM zone. Note that we use the median value to get a rough \n # idea of the centre, rather than the mean, because the mean is subject to all \n # sorts of problems when crossing the date line\n xMedian = numpy.median(x)\n yMedian = numpy.median(y)\n zone = int((xMedian + 180)/6) % 60 + 1\n if yMedian < 0:\n epsgBase = 32700\n else:\n epsgBase = 32600\n epsg = epsgBase + zone\n return epsg",
"def simplify_network(network):\n network_proj = ox.project_graph(network)\n simplified_proj = ox.consolidate_intersections(network_proj, rebuild_graph=True, tolerance=50, dead_ends=False)\n simplified = ox.project_graph(simplified_proj, network.graph[\"crs\"])\n return simplified",
"def test_extract_geometry():\r\n file_path = 'C:/Oregon_State/Spring_2019/Soft_dev_eng/StoveOpt/tests/Stove_test_Geometry.xlsx'\r\n pt1x, pt1z, pt1y, pt2x, pt2z, pt2y, pt3x, pt3z, pt3y, pt4x, pt4z, pt4y, pt5x, pt5z, pt5y, pt6x, pt6z, pt6y, pt7x, pt7z, pt7y, pt8x, pt8z, pt8y, pt9x, pt9z, pt9y, pt10x, pt10z, pt10y, pt11x, pt11z, pt11y, pt12x, pt12z, pt12y, pt13x, pt13z, pt13y, pt14x, pt14z, pt14y, pt15x, pt15z, pt15y, pt16x, pt16z, pt16y = extract_geometry(file_path)\r\n assert pt2x == 0.1\r\n assert pt2z == 0\r\n assert pt2y == 0\r\n assert pt3x == 0\r\n assert pt3z == 0.15\r\n assert pt3y == 0\r\n assert pt4x == 0.1\r\n assert pt4z == 0.15\r\n assert pt4y == 0\r\n assert pt5x == 0.1\r\n assert pt5z == 0.16\r\n assert pt5y == 0\r\n assert pt6x == 0\r\n assert pt6z == 0.16\r\n assert pt6y == 0\r\n assert pt7x == 0\r\n assert pt7z == 0.3\r\n assert pt7y == 0\r\n assert pt8x == 0.1\r\n assert pt8z == 0.3\r\n assert pt8y == 0\r\n assert pt9x == 0.17\r\n assert pt9z == 0.3\r\n assert pt9y == 0\r\n assert pt10x == -0.07\r\n assert pt10z == 0.3\r\n assert pt10y == 0\r\n assert pt11x == -0.07\r\n assert pt11z == 0.5\r\n assert pt11y == 0\r\n assert pt12x == -.04\r\n assert pt12z == 0.5\r\n assert pt12y == 0\r\n assert pt13x == 0.14\r\n assert pt13z == 0.5\r\n assert pt13y == 0\r\n assert pt14x == 0.17\r\n assert pt14z == 0.5\r\n assert pt14y == 0\r\n assert pt15x == -0.04\r\n assert pt15z == 0.33\r\n assert pt15y == 0\r\n assert pt16x == 0.14\r\n assert pt16z == 0.33\r\n assert pt16y == 0\r\n #assert U_100x == 1\r\n #assert U_100y == 0\r\n #assert U_100z == 0\r",
"def simplify(self):\n\n from podpac.core.coordinates.uniform_coordinates1d import UniformCoordinates1d\n\n if self.is_uniform:\n return UniformCoordinates1d(self.start, self.stop, self.step, **self.properties)\n\n return self",
"def substituteGeometry(*args, disableNonSkinDeformers: bool=True, newGeometryToLayer: bool=True,\n oldGeometryToLayer: bool=True, reWeightDistTolerance: float=0.0,\n retainOldGeometry: bool=True, **kwargs)->AnyStr:\n pass",
"def warp_geometry(geom, src_crs, dst_crs):\n return shapely.geometry.shape(rasterio.warp.transform_geom(src_crs, dst_crs, shapely.geometry.mapping(geom)))",
"def prim_solve(self):\n\n\t\tmin_span_tree = Graph([self.graph.vertices[0]], [])\n\t\tdup_graph = self.graph.duplicate()\n\n\t\tfor i in range(len(self.graph.vertices) - 1):\n\t\t\tneighbour_edges = []\n\t\t\tfor cur in min_span_tree.vertices:\n\t\t\t\tneighbour_edges += dup_graph.get_neighbour_edges(cur)\n\n\t\t\tneighbour_edges.sort(key=lambda x: x[2])\n\t\t\tshortest_edge = neighbour_edges[0]\n\t\t\tnew_node = shortest_edge[0] if shortest_edge[1] in min_span_tree.vertices else shortest_edge[1]\n\n\t\t\tmin_span_tree.edges.append(shortest_edge)\n\t\t\tmin_span_tree.vertices.append(new_node)\n\t\t\tdup_graph.edges.remove(shortest_edge)\n\n\t\treturn min_span_tree",
"def test_dist_itslef(self):\n X = [[0, 10], [4, 2]] # Just some points. I've no idea where on globe.\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))\n\n X = [[34.0522, 118.2437], # Lon Angeles\n [37.7749, 122.4194]] # San Francisco\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))",
"def simplify(self):\n \n added_clumps = []\n staying_tunnels = []\n removed_clumps = set()\n \n for tunnel in self.tunnels:\n tunnel_end_distance = self.get_distance(tunnel.start, tunnel.end)\n if tunnel_end_distance - tunnel.start.distance_from_wall < 0 or \\\n tunnel_end_distance - tunnel.end.distance_from_wall < 0:\n removed_clumps.add(tunnel.start.node)\n removed_clumps.add(tunnel.end.node)\n new_node = tunnel.merge_endpoints()\n added_clumps.append(new_node)\n else:\n staying_tunnels.append(tunnel)\n #print removed_clumps\n \n new_clumps = []\n \n for clump in list(self.clumps) + added_clumps:\n if clump not in removed_clumps:\n new_clumps.append(clump)\n else:\n removed_clumps.remove(clump)\n\n if removed_clumps:\n raise Exception(\"Some removed clumps couldn't be found in the main set and I'm scared\")\n \n self.clumps = new_clumps\n self.tunnels = staying_tunnels",
"def getGeometry(self):\n return self.geometry",
"def getGeometry(self):\n return self.geometry",
"def propanolLowest():\n coords = [\n [-1.9554949371, 0.1467391618, 0.0031595607],\n [-0.5906278346, -0.5279387138, -0.0201649611],\n [0.5440986558, 0.4958779663, 0.0283462055],\n [0.4812068385, 1.1678478833, -0.8308000219],\n [0.4590669813, 1.0993020658, 0.9450529713],\n [1.8195161785, -0.0957487212, -0.0534239359],\n [1.9103706588, -0.7338049177, 0.6631507673],\n [-0.5004127933, -1.2028008461, 0.8364936998],\n [-0.4854009629, -1.1250023438, -0.9282499098],\n [-2.7476736372, -0.5972665554, -0.0242488945],\n [-2.0700756998, 0.8040326560, -0.8554507953],\n [-2.0722381370, 0.7410005769, 0.9069567477],\n ]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"O\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)",
"def douglas_peucker(\n *,\n df: pd.DataFrame = None,\n tolerance: float,\n x: Union[str, pd.Series] = \"x\",\n y: Union[str, pd.Series] = \"y\",\n z: Union[None, str, pd.Series] = None,\n z_factor: float = 3.048,\n lat: Union[None, str, pd.Series] = None,\n lon: Union[None, str, pd.Series] = None,\n) -> np.ndarray: # type: ignore\n\n if df is None and (isinstance(x, str) or isinstance(y, str)):\n raise ValueError(\"Provide a dataframe if x and y are column names\")\n if df is None and (isinstance(lon, str) or isinstance(lat, str)):\n raise ValueError(\"Provide a dataframe if lat and lon are column names\")\n if tolerance < 0:\n raise ValueError(\"tolerance must be a positive float\")\n\n if df is not None and isinstance(lat, str) and isinstance(lon, str):\n lat, lon = df[lat], df[lon]\n if isinstance(lat, str) or isinstance(lon, str):\n raise ValueError(\"lat and lon must now be Pandas Series\")\n if df is not None and lat is not None and lon is not None:\n projection = pyproj.Proj(\n proj=\"lcc\",\n ellps=\"WGS84\",\n lat_1=lat.min(),\n lat_2=lat.max(),\n lat_0=lat.mean(),\n lon_0=lon.mean(),\n )\n\n transformer = pyproj.Transformer.from_proj(\n pyproj.Proj(\"epsg:4326\"), projection, always_xy=True\n )\n x, y = transformer.transform(lon.values, lat.values)\n else:\n if df is not None:\n x, y = df[x].values, df[y].values\n x, y = np.array(x), np.array(y)\n\n if z is not None:\n if df is not None:\n z = df[z].values\n z = z_factor * np.array(z)\n\n mask = np.ones(len(x), dtype=bool)\n if z is None:\n _douglas_peucker_rec(x, y, mask, tolerance)\n else:\n _douglas_peucker_rec_3d(x, y, z, mask, tolerance)\n\n return mask",
"def sw_corner(self):\n return (self.min_lat, self.min_lon)",
"def test_project(self):\n import itertools\n from numpy import array, dot\n from numpy.linalg import det\n\n # our little magic constant\n magic = 0.33377777373737737777\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 1 + magic, -1 - magic):\n \n s = space(curvature=k)\n\n # test line preserving projection\n # 3 points are colinear when\n # | x1 y1 1 |\n # | x2 y2 1 | = 0\n # | x3 y3 1 |\n # let's test this!\n\n for p, q in itertools.permutations((\n (1, 0),\n (3/5, 4/5),\n (-5/13, 12/13),\n (-8/17, -15/17),\n ), 2):\n p = s.make_point(p, magic)\n q = s.make_point(q, magic)\n u = p.project(projection_types.preserve_lines)\n v = (p+q).project(projection_types.preserve_lines)\n w = (p+(-magic)*q).project(projection_types.preserve_lines)\n d = det([[*u, 1],[*v, 1],[*w, 1]])\n self.assertTrue(abs(d) < 1e-9)\n\n # test angle preserving projection\n # map will be conformal, so we do like a secant test\n\n delta = 1e-9\n vi = s.make_point((1, 0, 0), delta)\n vj = s.make_point((0, 1, 0), delta)\n vk = s.make_point((0, 0, 1), delta)\n for p in (\n (1, 0, 0),\n (0, 3/5, 4/5),\n (-5/13, 12/13, 0),\n (2/11, 6/11, 9/11),\n (3/7, 6/7, 2/7)\n ):\n p = s.make_point(p, magic)\n pp = p.project(projection_types.preserve_angles)\n pi, pj, pk = (array((p+v).project(projection_types.preserve_angles)) - pp for v in (vi, vj, vk))\n # should stay orthogonal and same size\n # note that we're doing a secant thing so it's only approximate\n # thus we set a relatively high tolerance\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pj, pj),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pk, pk),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pj),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pk),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pj, pk),\n 0,\n abs_tol = 1e-6\n ))",
"def reprojectQcew(overwrite=False):\n\n\tif exists(qcew_2913) and not overwrite:\n\t\tprint '\\nstate plane qcew already exists, if you wish to'\n\t\tprint 'overwrite the existing file use the \"overwrite\" flag\\n'\n\t\treturn\n\n\tgeom_type = 'POINT'\n\ttemplate = src_qcew\n\tospn = arcpy.SpatialReference(2913)\n\tmanagement.CreateFeatureclass(dirname(qcew_2913),\n\t\tbasename(qcew_2913), geom_type, template, spatial_reference=ospn)\n\n\ti_cursor = da.InsertCursor(qcew_2913, '*')\n\n\ts_fields = ['Shape@', '*']\n\twith da.SearchCursor(src_qcew, s_fields) as s_cursor:\n\t\t# replace point coordinates with geometry object in field\n\t\t# definition\n\t\tfields = list(s_cursor.fields)\n\t\tfields[1] = fields.pop(0)\n\n\t\tfor row in s_cursor:\n\t\t\tlist_row = list(row)\n\t\t\tlist_row[1] = list_row.pop(0)\n\t\t\td = OrderedDict(zip(fields, list_row))\n\n\t\t\tgeom = d['Shape@']\n\t\t\tgeom_2913 = geom.projectAs(ospn) \n\t\t\td['Shape@'] = geom_2913\n\t\t\td['POINT_X'] = geom_2913.firstPoint.X\n\t\t\td['POINT_Y'] = geom_2913.firstPoint.Y\n\n\t\t\twrite_row = [v for v in d.values()]\n\t\t\ti_cursor.insertRow(write_row)\n\n\tdel i_cursor",
"def geometry(self):\n return self._geometry",
"def geometry(self):\n return self._geometry",
"def round(self):\n return GeoPoint(round(self.lat, 5), round(self.long, 5))",
"def to_geometry(v):\n return v / 1000",
"def geometry():\n return Geometry()"
]
| [
"0.69383496",
"0.63768506",
"0.59485835",
"0.588781",
"0.58418727",
"0.54773796",
"0.5404978",
"0.539588",
"0.53490907",
"0.5239898",
"0.5094586",
"0.504801",
"0.50427777",
"0.49809536",
"0.49339372",
"0.48368916",
"0.48302823",
"0.48212743",
"0.4820858",
"0.4820858",
"0.48066518",
"0.47895372",
"0.47613084",
"0.47607073",
"0.4745463",
"0.4736029",
"0.4736029",
"0.4735147",
"0.472541",
"0.47191676"
]
| 0.7587484 | 0 |
Returns the difference of the geometries | def difference(self, other): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def difference(self, other):\n return self._geomgen(capi.geom_diff, other)",
"def get_geometry(self):\n geometry = self._geometry\n for geo in self._holes:\n geometry = geometry.difference(geo) \n return geometry",
"def difference(self, right: GeoSpatialValue) -> GeoSpatialValue:\n return ops.GeoDifference(self, right).to_expr()",
"def symmetric_difference(self, other): # -> BaseGeometry:\n ...",
"def test_difference(self):\n fc = self.read_feature('Global_Ocean')\n mask = self.read_feature()\n difference = fc.difference(maskingFC=mask)\n assert len(difference.features) == 1\n self.check_feature(difference.features[0],\n expected_name='Global Ocean',\n expected_type='Polygon')\n\n # make sure the original global ocean and mask have no holes\n for fc_test in [fc, mask]:\n geom = fc_test.features[0]['geometry']\n shape = shapely.geometry.shape(geom)\n assert isinstance(shape, shapely.geometry.Polygon)\n assert len(shape.interiors) == 0\n\n geom = difference.features[0]['geometry']\n shape = shapely.geometry.shape(geom)\n assert isinstance(shape, shapely.geometry.Polygon)\n assert len(shape.interiors) == 1",
"def diff(self):\n return differential(self)",
"def geodesic_difference(self, x1, x0):\n return x1 - x0 # Default to Euclidean behavior.",
"def sym_difference(self, other):\n return self._geomgen(capi.geom_sym_diff, other)",
"def compute_difference(self, evolved_elevation, difference):\n\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{difference} = {evolved_elevation}-{elevation}\".format(\n difference=difference,\n elevation=self.elevation,\n evolved_elevation=evolved_elevation),\n overwrite=True)\n # gscript.write_command(\n # 'r.colors',\n # map=difference,\n # rules='-',\n # stdin=difference_colors)\n gscript.run_command(\n 'r.colors',\n map=difference,\n color=\"differences\")\n\n return difference",
"def difference(self) -> np.ndarray:\n return self._dist['target'] - self._dist['current']",
"def diff(self, x1, x2):\n return x2 - x1",
"def diff(self):\n return self.client.api.diff(self.id)",
"def compute_layers(prod):\n # 1. Do polygons overlap for the same outlook\n LOG.warning(\"==== Running Geometry differences\")\n for day in prod.outlook_collections:\n prod.outlook_collections[day].difference_geometries()",
"def diff(self, other):\n Δx = self.x - other.x\n Δy = self.y - other.y\n return (Δx, Δy)",
"def diff(self, x0, x1):\n nq, nv, nx = self.model.nq, self.model.nv, self.nx\n assert (x0.shape == (nx, ) and x1.shape == (nx, ))\n q0 = x0[:nq]\n q1 = x1[:nq]\n v0 = x0[-nv:]\n v1 = x1[-nv:]\n dq = pinocchio.difference(self.model, a2m(q0), a2m(q1))\n return np.concatenate([dq.flat, v1 - v0])",
"def get_difference(self, other, x, y, norm='L2'):\n norms = {'L2': None, 'Linf': numpy.inf}\n field = self.restrict(x, y)\n other = other.restrict(x, y)\n subtracted = field.subtract(other)\n return numpy.linalg.norm(subtracted.values, ord=norms[norm])",
"def differences(self):\n return self._differences",
"def diff(self):\n\t\tif len(self.v) < 4:\n\t\t\treturn None\n\t\tif self.poli == None:\n\t\t\tself.generar_polinomio()\n\t\tif x != None:\n\t\t\treturn diff(self.poli)(x)\n\t\treturn diff(self.poli)",
"def Erase(inputgeodf, erasegeodf):\n return gpd.overlay(inputgeodf, gpd.GeoDataFrame({'geometry': erasegeodf.unary_union}), how='difference')",
"def diff(self):\n return [node.diff for node in self]",
"def angle_difference(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['angle_difference']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for label in self.uuid_map:\n if label == 'LSTATE' or 'MAG' in label:\n continue\n distillate_label = get_distillate_label([label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_ref_label = \"{0} {1}\".format(label, self.ref_name)\n dep_ref_name = fields['deps'][0]\n dep_ref_uuid = self.reference_uuid_map[label]\n dep_label = \"{0} {1}\".format(label, self.name)\n dep_name = fields['deps'][1]\n dep_uuid = self.uuid_map[label]\n deps = [[dep_ref_label, dep_ref_name, dep_ref_uuid], [dep_label, dep_name, dep_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}/{3}\".format(self.location, self.ref_name, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"ANGLE-DIFF\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label] = emitted[-2][-36:]\n\n filename = \"{0}/ANG-DIFF_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map",
"def difference(self, other): # type: (Term) -> Term\n return self.intersect(other.inverse)",
"def reverse_difference():",
"def difference(self, mass_function):\n result = copy.deepcopy(self)\n for focal, value in mass_function.items():\n result.remove_mass((focal, value))\n\n newFocals = {}\n for focal, value in result.items():\n if value != 0:\n newFocals[focal] = value\n result.focals = newFocals\n return result",
"def subtract(x, y):\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)",
"def diff(A,B):\n #A-B\n x = list(set(A) - set(B))\n #B-A\n y = list(set(B) - set(A))\n return x, y",
"def diff(self, x1, x2):\n return self.State.diff(x1, x2)",
"def get_feature_differences(self):\n feats = self.get_feature_sets()\n\n # create a dictionary storing the features which are distinct\n diff_dict = dict()\n\n # compare each dataset against each and collect differences\n for i in range(len(feats)):\n for j in range(i + 1, len(feats)):\n # take union from differences\n diff_dict[i, j] = feats[i].difference(feats[j]).union(feats[j].difference(feats[i]))\n\n return diff_dict",
"def setdiff(self, other):\n\n return self.intersect(other, op=np.setdiff1d)",
"def getdifference(triplet_old,triplet_new):\r\n for i in range(0,3):\r\n if (triplet_new[i]!=triplet_old[i]):\r\n \r\n return (triplet_new[i],triplet_old[i],i)"
]
| [
"0.7835668",
"0.67538625",
"0.66875976",
"0.668591",
"0.6672432",
"0.6330556",
"0.6302645",
"0.6291098",
"0.6205153",
"0.6158713",
"0.5979545",
"0.5974062",
"0.5971922",
"0.59454995",
"0.5910263",
"0.5900204",
"0.5885507",
"0.58507663",
"0.5831299",
"0.58161",
"0.5814541",
"0.5807841",
"0.58017325",
"0.57662433",
"0.57312006",
"0.57225233",
"0.5701517",
"0.5692487",
"0.5665859",
"0.565867"
]
| 0.7628914 | 1 |
Returns the intersection of the geometries | def intersection(self, other): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def intersection(self, other):\n return self._geomgen(capi.geom_intersection, other)",
"def intersection(self, other):\n return _binary_geo(arctern.ST_Intersection, self, other)",
"def get_intersections(self):\n return self.intersection_list",
"def intersection(self):\n return Intersection(self.source, self)",
"def intersection(*entities):\n from entity import GeometryEntity\n\n entities = GeometryEntity.extract_entities(entities, False)\n if len(entities) <= 1:\n return []\n\n res = GeometryEntity.do_intersection(entities[0], entities[1])\n for entity in entities[2:]:\n newres = []\n for x in res:\n newres.extend(GeometryEntity.do_intersection(x, entity))\n res = newres\n return res",
"def intersection(self, right: GeoSpatialValue) -> GeoSpatialValue:\n return ops.GeoIntersection(self, right).to_expr()",
"def intersects(geometry, sr=None):\r\n\r\n return _filter(geometry, sr, 'esriSpatialRelIntersects')",
"def intersection(self, other):\n \n self_corners = self.corners\n\n other_corners = get_2d_false_corners(other)\n\n #shell()\n\n return planar_intersection_polygon(self_corners,other_corners)",
"def intersection(x, y, f, p):",
"def envelope_intersects(geometry, sr=None):\r\n return _filter(geometry, sr, 'esriSpatialRelEnvelopeIntersects')",
"def intersection(self, other):\n new_ieqs = []\n new_ieqs.extend(self.inequalities())\n new_ieqs.extend(other.inequalities())\n\n new_eqns = []\n new_eqns.extend(self.equations())\n new_eqns.extend(other.equations())\n\n return Polyhedron(ieqs = new_ieqs, eqns = new_eqns, \n field=self.coerce_field(other))",
"def intersection_list(self):\n return self._intersection_list",
"def intersects(self, other):\n return _binary_op(arctern.ST_Intersects, self, other).astype(bool, copy=False)",
"def Intersect(*args, **kwargs):\n return _gdi_.Region_Intersect(*args, **kwargs)",
"def intersect(self, rays):\n raise NotImplementedError",
"def get_intersect(a1, a2, b1, b2):\n s = np.vstack((a1, a2, b1, b2)) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return None\n return np.array([x / z, y / z])",
"def intersection(self, other):\n ### Original\n from pyresample.spherical_geometry import intersection_polygon\n # MLS This was failing if all the corners of the \n # area_definition fell inside the data box definition.\n # I think __contains__ in spherical_geometry is\n # not working properly? This seems to work, should\n # watch for false positives ?\n # This DOES NOT WORK for over the pole...\n allselfcornersin = False\n allothercornersin = False\n retcorners = intersection_polygon(self.corners, other.corners)\n if not retcorners:\n # Only try these if intersection_polygon didn't return anything.\n for i in self.corners:\n if planar_point_inside(i,other.corners):\n allselfcornersin = True\n else:\n allselfcornersin = False\n for i in other.corners:\n if planar_point_inside(i,self.corners):\n allothercornersin = True\n else:\n allothercornersin = False\n\n if allselfcornersin:\n return self.corners\n if allothercornersin: \n return other.corners\n return retcorners\n \n ### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in intersection')\n #shell()\n #sphpoly = SphPolygon(self.corners)\n #return sphpoly.intersection(SphPolygon(other.corners))",
"def get_intersect(a1, a2, b1, b2):\r\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\r\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\r\n l1 = np.cross(h[0], h[1]) # get first line\r\n l2 = np.cross(h[2], h[3]) # get second line\r\n x, y, z = np.cross(l1, l2) # point of intersection\r\n if z == 0: # lines are parallel\r\n return (float('inf'), float('inf'))\r\n return (x/z, y/z)",
"def intersect(self, *args, **kwargs): # real signature unknown\n pass",
"def intersect(self, rays): \n result = {}\n \n if bool(self._merged):\n result[\"x\"], result[\"y\"], result[\"z\"], result[\"valid\"], result[\"ray_u\"], \\\n result[\"trig_u\"], result[\"trig_v\"], result[\"gather_ray\"], \\\n result[\"gather_trig\"] = self._intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"z_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n rays[\"z_end\"],\n self._merged[\"xp\"],\n self._merged[\"yp\"],\n self._merged[\"zp\"],\n self._merged[\"x1\"],\n self._merged[\"y1\"],\n self._merged[\"z1\"],\n self._merged[\"x2\"],\n self._merged[\"y2\"],\n self._merged[\"z2\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n \n result[\"norm\"] = tf.gather(\n self._merged[\"norm\"],\n result[\"gather_trig\"]\n )\n \n return result",
"def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]",
"def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"",
"def get_intersect(a1, a2, b1, b2):\n s = np.vstack([a1, a2, b1, b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return float('inf'), float('inf')\n return x / z, y / z",
"def intersection(self, axis2):",
"def intersection(s1, s2):\n \"*** YOUR CODE HERE ***\"\n return s1.intersection(s2) # ...",
"def intersection_with(self, other):\n i = self.line_intersection_with(other)\n if i is None:\n return None# parallel lines\n\n if self.contains(i) and other.contains(i) and not (i in self.endpoints and i in other.endpoints):\n return i\n return None",
"def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P",
"def index_intersects(geometry, sr=None):\r\n return _filter(geometry, sr, 'esriSpatialRelIndexIntersects')",
"def intersection(self, ray):\n \n points = []\n intersection_objects = []\n for obj in self.objects:\n intersection = obj.shape.intersection(ray)\n if intersection != None:\n for pt in intersection:\n points.append(pt)\n intersection_objects.append(obj)\n \n if len(points) == 0:\n return None, None\n return points, intersection_objects",
"def intersection(self, x):\n _x = self._intersection(self.xArea, x.xArea)\n _y = self._intersection(self.yArea, x.yArea)\n return _x and _y"
]
| [
"0.81696415",
"0.73289883",
"0.70724213",
"0.70247513",
"0.7009846",
"0.6927248",
"0.6887975",
"0.6862611",
"0.6845613",
"0.6817662",
"0.67587274",
"0.6738185",
"0.66817015",
"0.6627198",
"0.6520325",
"0.6497146",
"0.6444743",
"0.643527",
"0.64025766",
"0.638235",
"0.63786036",
"0.6378246",
"0.6346746",
"0.6336407",
"0.63348794",
"0.6333382",
"0.6324555",
"0.6294977",
"0.62659764",
"0.6255284"
]
| 0.78489655 | 1 |
Returns the symmetric difference of the geometries (Shapely geometry) | def symmetric_difference(self, other): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sym_difference(self, other):\n return self._geomgen(capi.geom_sym_diff, other)",
"def get_geometry(self):\n geometry = self._geometry\n for geo in self._holes:\n geometry = geometry.difference(geo) \n return geometry",
"def difference(self, other): # -> BaseGeometry:\n ...",
"def difference(self, other):\n return self._geomgen(capi.geom_diff, other)",
"def symmetric_difference(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n\n n_nodes = G.order()\n diff1 = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n\n diff2 = ((u, v) for u in H.nodes()\n for v in set(H.successors(u)) - set(G.successors(u)))\n \n edges = chain(diff1, diff2)\n deg = make_deg(n_nodes, edges)\n \n diff1 = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n\n diff2 = ((u, v) for u in H.nodes()\n for v in set(H.successors(u)) - set(G.successors(u)))\n \n edges = chain(diff1, diff2)\n D = make(n_nodes, G.size() + H.size(), edges, deg)\n return D",
"def symmetricDifference(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs ^ rhs)",
"def symmetric_difference(self, other):\n return SymmetricDifference(self, other)",
"def geodesic_difference(self, x1, x0):\n return x1 - x0 # Default to Euclidean behavior.",
"def symmetric_difference(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n # convert to a RangeSet\n rng_set = RangeSet._to_rangeset(rng_set)\n # get union and then remove intersections\n union = self.union(rng_set)\n intersection = self.intersection(rng_set)\n union.difference_update(intersection)\n return union",
"def reverse_difference():",
"def erase(self, other, inplace=False):\r\n if inplace:\r\n df = self\r\n else:\r\n df = self.copy()\r\n if isinstance(other, Geometry):\r\n df.geometry = self.geometry.symmetricDifference(other)\r\n return df\r\n else:\r\n raise ValueError(\"Input must be of type arcpy.Geometry, not %s\" % type(other))",
"def graph_symmetric_difference(graph1, graph2, edge_diff=False, return_copy=False):\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(graph1, graph2)\n\n # Compute node or edge symmetric difference.\n if edge_diff:\n symdiff_edges = graph1.edges.symmetric_difference(graph2.edges)\n else:\n symdiff_nodes = graph1.nodes.symmetric_difference(graph2.nodes)\n\n if share_common_origin(graph1, graph2) and not return_copy:\n if edge_diff:\n return graph1.origin.getedges(symdiff_edges)\n return graph1.origin.getnodes(symdiff_nodes)\n else:\n # Get node or edge difference for both and join them in a new graph\n if edge_diff:\n result = graph1.getedges(symdiff_edges.difference(graph2.edges)).copy(deep=True, copy_view=False)\n graph_join(result, graph2.getedges(symdiff_edges.difference(graph1.edges)))\n else:\n result = graph1.getnodes(symdiff_nodes.difference(graph2.nodes)).copy(deep=True, copy_view=False)\n graph_join(result, graph2.getnodes(symdiff_nodes.difference(graph1.nodes)))\n\n return result",
"def test_difference(self):\n fc = self.read_feature('Global_Ocean')\n mask = self.read_feature()\n difference = fc.difference(maskingFC=mask)\n assert len(difference.features) == 1\n self.check_feature(difference.features[0],\n expected_name='Global Ocean',\n expected_type='Polygon')\n\n # make sure the original global ocean and mask have no holes\n for fc_test in [fc, mask]:\n geom = fc_test.features[0]['geometry']\n shape = shapely.geometry.shape(geom)\n assert isinstance(shape, shapely.geometry.Polygon)\n assert len(shape.interiors) == 0\n\n geom = difference.features[0]['geometry']\n shape = shapely.geometry.shape(geom)\n assert isinstance(shape, shapely.geometry.Polygon)\n assert len(shape.interiors) == 1",
"def symmetric_diff(a,b):\n return a ^ b",
"def pairwise_correlation_difference(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n substract_m = np.subtract(corr_real, corr_rand)\r\n prwcrdst = LA.norm(substract_m)\r\n\r\n return prwcrdst, substract_m",
"def compute_layers(prod):\n # 1. Do polygons overlap for the same outlook\n LOG.warning(\"==== Running Geometry differences\")\n for day in prod.outlook_collections:\n prod.outlook_collections[day].difference_geometries()",
"def difference(self, right: GeoSpatialValue) -> GeoSpatialValue:\n return ops.GeoDifference(self, right).to_expr()",
"def Erase(inputgeodf, erasegeodf):\n return gpd.overlay(inputgeodf, gpd.GeoDataFrame({'geometry': erasegeodf.unary_union}), how='difference')",
"def simplify(self, tolerance):\n return _unary_geo(arctern.ST_SimplifyPreserveTopology, self, tolerance)",
"def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...",
"def symmetric(self):\n result = self.directed()\n result.extend([(down, up) for up, down in result])\n return Pairs(result)",
"def difference_update(self, *others):\r\n return self.sdiffstore(self.r_key, slef.r_key, *[o.r_key for o in others])",
"def reverse_feature(feature, back_square):\n back_square = np.array(back_square)\n feature1 = MultiPolygon([Polygon([tuple(z) for z in y]) for y in feature.coord])\n\n init = np.min(back_square[:,1])\n height =0.5\n back = []\n\n while (init + height<=np.max(back_square[:,1])):\n\n test_square = np.array([[np.min(back_square[:,0]),init],[np.max(back_square[:,0]),init],\n [np.max(back_square[:,0]),init+height],[np.min(back_square[:,0]),init+height]])\n feature2 = Polygon([tuple(z) for z in test_square])\n difference = feature2.difference(feature1)\n newdifference = difference\n\n while (has_hole(newdifference)==0) and (init + height<=np.max(back_square[:,1])):\n difference = newdifference\n height = height+0.5\n test_square = np.array([[np.min(back_square[:,0]),init],[np.max(back_square[:,0]),init],\n [np.max(back_square[:,0]),init+height],[np.min(back_square[:,0]),init+height]])\n feature2 = Polygon([tuple(z) for z in test_square])\n newdifference = feature2.difference(feature1)\n if difference.geom_type == 'Polygon':\n back.append(np.array(difference.exterior.coords))\n if difference.geom_type == 'MultiPolygon':\n for x in difference.geoms:\n back.append(np.array(x.exterior.coords))\n init = init+height-0.5\n height = 0.5\n \n topoinvert_obj = Feature()\n topoinvert_obj.coord = back\n return topoinvert_obj",
"def diff(self):\n return differential(self)",
"def diff(self, other):\n Δx = self.x - other.x\n Δy = self.y - other.y\n return (Δx, Δy)",
"def getdifference(triplet_old,triplet_new):\r\n for i in range(0,3):\r\n if (triplet_new[i]!=triplet_old[i]):\r\n \r\n return (triplet_new[i],triplet_old[i],i)",
"def difference(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n \n n_nodes = G.order()\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n deg = make_deg(n_nodes, edges)\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n D = make(n_nodes, G.size(), edges, deg)\n return D",
"def subtract(x, y):\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)",
"def mergeGeometries(self):\n self.geometry = reduce(lambda p1,p2 : p1.union(p2) ,map(lambda tax : tax.biomeGeometry,self.taxonomies))\n return self.geometry",
"def obtain_geometries(self):\n\n assert isinstance(self.ts, TS)\n\n \n symbol_dict = {\n 17: \"Cl\",\n 9: \"F\",\n 8: \"O\",\n 7: \"N\",\n 6: \"C\",\n 1: \"H\",\n }\n atoms = []\n\n parser = ccread(self.log_file, loglevel=logging.ERROR)\n\n for atom_num, coords in zip(parser.atomnos, parser.atomcoords[-1]):\n atoms.append(Atom(symbol=symbol_dict[atom_num], position=coords))\n \n self.ts._ase_molecule = Atoms(atoms)\n self.ts.update_coords_from(\"ase\")\n\n self.pre_geometry = self.ts.ase_molecule.copy()\n self.post_geometry = self.ts.ase_molecule.copy()\n\n for vib, displacements in self.vibrations:\n if vib < 0: # Finding the imaginary frequency\n self.post_geometry.arrays[\"positions\"] -= displacements\n\n return self.pre_geometry, self.post_geometry"
]
| [
"0.68364435",
"0.66142195",
"0.65243906",
"0.62599766",
"0.578314",
"0.5745995",
"0.5697593",
"0.5660089",
"0.56346405",
"0.5620938",
"0.5573971",
"0.5544287",
"0.55151415",
"0.5363903",
"0.53637695",
"0.53566664",
"0.5340382",
"0.5328883",
"0.53185844",
"0.53170276",
"0.5302328",
"0.5291651",
"0.5227482",
"0.5194716",
"0.5176287",
"0.51743984",
"0.5172401",
"0.51582485",
"0.51400036",
"0.51308185"
]
| 0.78318787 | 0 |
Returns the union of the geometries (Shapely geometry) | def union(self, other): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mergeGeometries(self):\n self.geometry = reduce(lambda p1,p2 : p1.union(p2) ,map(lambda tax : tax.biomeGeometry,self.taxonomies))\n return self.geometry",
"def union(self, other):\n return self._geomgen(capi.geom_union, other)",
"def union(feature):\n\n mp = MultiPolygon([Polygon([tuple(z) for z in y]) for y in feature.coord])\n union = ops.unary_union(mp)\n \n coords = [] \n if union.geom_type == 'Polygon':\n coords.append(np.array(union.exterior.coords))\n if union.geom_type == 'MultiPolygon':\n for x in union.geoms:\n coords.append(np.array(x.exterior.coords))\n\n new_feature = Feature()\n new_feature.coord = coords\n return new_feature",
"def unary_union(self):\n return GeoSeries(arctern.ST_Union_Aggr(self))",
"def unary_union(self) -> ir.GeoSpatialScalar:\n return ops.GeoUnaryUnion(self).to_expr().name(\"union\")",
"def union(self, *args):\n _ub = None\n for _obj in args:\n if _ub is None:\n _ub = self.bbox(_obj)\n else:\n _b = self.bbox(_obj)\n _x = np.sort(np.array([_b[:, 0], _ub[:, 0]]), axis=None)\n _y = np.sort(np.array([_b[:, 1], _ub[:, 1]]), axis=None)\n _ub = np.array([[_x[0], _y[0]], [_x[3], _y[3]]])\n return _ub",
"def boundary_polygon_by_union(self):\n cell_geoms = [None]*self.Ncells()\n\n for i in self.valid_cell_iter():\n xy = self.nodes['x'][self.cell_to_nodes(i)]\n cell_geoms[i] = geometry.Polygon(xy)\n return ops.cascaded_union(cell_geoms)",
"def union(self, other):\n from sage.misc.misc import deprecation\n deprecation('The function union is replaced by convex_hull.', 'Sage Version 4.4.4')\n return self.convex_hull(other)",
"def union(bs: Iterable['BBox']) -> Optional['BBox']:\n b = BBox.spanning(\n chain.from_iterable([b.corners() for b in bs]))\n return b",
"def union(self, right: GeoSpatialValue) -> GeoSpatialValue:\n return ops.GeoUnion(self, right).to_expr()",
"def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom",
"def unionFeatureCollections(*collections):\n features = []\n for collection in collections:\n if collection[\"type\"] == \"FeatureCollection\":\n collectionFeatures = collection[\"features\"]\n features.extend(collectionFeatures)\n if collection[\"type\"] == \"Feature\":\n features.append(collection)\n return geojson.FeatureCollection(features)",
"def poly_merge(s0, label):\n if s0.geom_type == 'Polygon':\n return s0\n ff = copy(s0)\n try:\n nc = len(s0.geoms)\n buffer_size = 100.0\n\n while ff.geom_type == 'MultiPolygon' and len(ff.geoms) > 1 and buffer_size <= 500.0:\n tmp0 = copy(s0)\n tmp1 = tmp0.buffer(+buffer_size)\n tmp2 = tmp1.buffer(-buffer_size)\n ff = shapely.ops.cascaded_union((tmp2, s0))\n buffer_size += 50.0\n except ValueError:\n print('!!! Error in poly_merge')\n return ff",
"def exterior(self):\n if isinstance(self.substrates, MultiPolygon):\n geoms = self.substrates.geoms\n elif isinstance(self.substrates, Polygon):\n geoms = [self.substrates]\n else:\n raise RuntimeError(\"Uknown type '{}' of substrate geometry\".format(type(self.substrates)))\n polygons = [Polygon(p.exterior) for p in geoms]\n return unary_union(polygons)",
"def Union(*args, **kwargs):\n return _gdi_.Region_Union(*args, **kwargs)",
"def union(set1, set2):",
"def _union_polygons(polygons, precision = 1e-4, max_points = 4000):\n polygons = _merge_floating_point_errors(polygons, tol = precision/1000)\n unioned = gdspy.boolean(polygons, [], operation = 'or',\n precision = precision, max_points = max_points)\n return unioned",
"def test_merge_stitches_together_geometry_collections(self):\n topology = {\n \"type\": \"Topology\",\n \"objects\": {\n \"collection\": {\n \"type\": \"GeometryCollection\",\n \"geometries\": [\n {\"type\": \"Polygon\", \"arcs\": [[0, 1]]},\n {\"type\": \"Polygon\", \"arcs\": [[-1, 2]]},\n ],\n }\n },\n \"arcs\": [\n [[1, 1], [1, 0]],\n [[1, 0], [0, 0], [0, 1], [1, 1]],\n [[1, 1], [2, 1], [2, 0], [1, 0]],\n ],\n }\n self.assertDictEqual(\n {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [\n [[[1, 0], [0, 0], [0, 1], [1, 1], [2, 1], [2, 0], [1, 0]]]\n ],\n },\n self.merge(topology, [topology[\"objects\"][\"collection\"]]),\n )",
"def union(self, other):\n\n return self.intersect(other, op=np.union1d)",
"def test_self_union():\n gdf = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (2, 2), (2, 0)]),\n Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),\n Polygon([(1, 1), (1, 2), (2, 2), (2, 1)]),\n ]\n ),\n \"x\": [0, 1, 2],\n \"y\": [4.0, 8.0, 1.0],\n }\n )\n\n result_one = self_union(gdf)\n expected_one = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (1, 2), (1, 1), (2, 1), (2, 0)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(2, 2), (1, 2), (1, 3), (3, 3), (3, 1), (2, 1)]),\n ],\n index=[(0,), (0, 1, 2), (0, 1, 2), (0, 1, 2), (1,)],\n ),\n \"x\": [0, 0, 1, 2, 1],\n \"y\": [4.0, 4.0, 8.0, 1.0, 8.0],\n }\n )\n assert_geodataframe_equal(result_one, expected_one)\n\n result_two = self_union(gdf, ratios=[\"y\"])\n expected_two = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (1, 2), (1, 1), (2, 1), (2, 0)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(2, 2), (1, 2), (1, 3), (3, 3), (3, 1), (2, 1)]),\n ],\n index=[(0,), (0, 1, 2), (0, 1, 2), (0, 1, 2), (1,)],\n ),\n \"x\": [0, 0, 1, 2, 1],\n \"y\": [3.0, 1.0, 2.0, 1.0, 6.0],\n }\n )\n assert_geodataframe_equal(result_two, expected_two)",
"def make_union(self, *args, **kwargs): # real signature unknown\n pass",
"def union(self, *others):\r\n return self.r.sunion(self.r_key, *[o.r_key for o in others])",
"def _multigeometry(self, ogr_geometry):\n\n geo_type = ogr_geometry.GetGeometryType()\n\n if geo_type == ogr.wkbPolygon:\n return ogr.ForceToMultiPolygon(ogr_geometry)\n elif geo_type == ogr.wkbPoint:\n return ogr.ForceToMultiPoint(ogr_geometry)\n elif geo_type in [ogr.wkbLineString, ogr.wkbLinearRing]:\n return ogr.ForceToMultiLineString(ogr_geometry)\n else:\n return ogr_geometry",
"def union(self, *args):\n return self.phy2abs.union(*args)",
"def union(self, StdVectorFst other):\n cdef StdVectorFst result = self.copy()\n result.set_union(other)\n return result",
"def union(arguments, flatten=True):\n return Component(\n \"Union\",\n arguments=arguments,\n options={\n 'flatten': flatten\n },\n constraints=None)",
"def combined_emprise(self):\n combined = self.emprise_set.aggregate(Union(\"mpoly\"))\n if \"mpoly__union\" in combined:\n return combined[\"mpoly__union\"]\n else:\n return None",
"def union(self, other):\n self.vertices.extend(other.vertices)\n self.edges.extend(other.edges)\n self.faces.extend(other.faces)\n return self",
"def union(a, b):\r\n return list(set(a) | set(b))",
"def union(s1, s2):\n \"*** YOUR CODE HERE ***\"\n s = set()\n for member in s1:\n s.add(member)\n for member in s2:\n s.add(member)\n return s"
]
| [
"0.7533483",
"0.7369448",
"0.71881694",
"0.709492",
"0.6810885",
"0.6660458",
"0.6628999",
"0.6492786",
"0.6377294",
"0.63297266",
"0.63130426",
"0.62937534",
"0.6286035",
"0.6256824",
"0.6231118",
"0.6177483",
"0.61481446",
"0.60948616",
"0.6084603",
"0.6042804",
"0.5930949",
"0.59249496",
"0.5922429",
"0.592004",
"0.5914273",
"0.59075797",
"0.58475786",
"0.5843046",
"0.5838869",
"0.5834472"
]
| 0.752098 | 1 |
Returns True if geometries are disjoint, else False | def disjoint(self, other): # -> bool:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disjoint(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoDisjoint(self, right).to_expr()",
"def is_disjoint(self, other):\n return self.intersect(other) == S.EmptySet",
"def isdisjoint(self, other):\n self._check_title(other)\n\n # sort by top-left vertex\n if self.bounds > other.bounds:\n i = self\n self = other\n other = i\n\n return (self.max_col, self.max_row) < (other.min_col, other.max_row)",
"def is_disjoint(self, other):\n\n for element in other:\n if element in self:\n return False\n\n return True",
"def is_disjoint(self, other):\n if not isinstance(other, SetPy):\n raise TypeError(\"Can only be disjoint with another SetPy\")\n return set_combinations.SetIntersect(self, other).cardinality() == 0",
"def intersects(self, other): # -> bool:\n ...",
"def merge_adjacent_polygons(feature):\n if feature.geometry().wkbType() != WKBMultiPolygon:\n return False\n mp = Geometry.get_multipolygon(feature)\n if len(mp) < 2:\n return False\n else:\n geom = None\n for p in mp:\n g = Geometry.fromPolygonXY(p)\n ng = g if geom is None else geom.combine(g)\n if ng.isGeosValid():\n geom = ng\n if geom is not None:\n feature.setGeometry(geom)\n return geom.isGeosValid()",
"def has_geom(self):\n return bool(self.give_geom())",
"def hasNoDoubleVertices(self):\n assert all(self.vertices.count(v) == 1 for v in self.vertices)\n return (all(all(v1 == v2 or v1.dist(v2) > COMPARISON_EPSILON for v2 in self.vertices)\n for v1 in self.vertices) and\n all(self.vertices.count(v) == 1 for v in self.vertices))",
"def _is_disjoint(x, y):\n if x.ndim != 1 or y.ndim != 1:\n raise ValueError(\"Inputs must be 1-d ndarrays\")\n\n test_val = np.setdiff1d(x, y).size\n test_val += np.setdiff1d(y, x).size\n\n return test_val == 0",
"def is_multipoint_on_polygon(feature_1: Sequence, feature_2: Sequence) -> bool:\n points_on_poly = False\n\n points_on_poly = all(\n boolean_point_in_polygon(coords_1, feature_2[1]) for coords_1 in feature_1[1]\n )\n\n if not points_on_poly:\n return points_on_poly\n\n points_on_poly = any(\n boolean_point_in_polygon(coords_1, feature_2[1], {\"ignoreBoundary\": True})\n for coords_1 in feature_1[1]\n )\n\n return points_on_poly",
"def _intersects_1D(A, B):\n return False if (B[1] <= A[0]) or (B[0] >= A[1]) else True",
"def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise",
"def can_overlap(self):\n return False",
"def intersects(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False",
"def is_poly_in_multipoly(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n return any(is_poly_in_poly(feature_1, coords_2) for coords_2 in feature_2)",
"def intersects(self, other):\n if isinstance(self.crs, CartesianCRS):\n if not self._bbox_overlap(other):\n return False\n interx = _cintersection.all_intersections(self.vertices, other.vertices)\n return len(interx) != 0\n else:\n for a in self.segment_tuples:\n for b in other.segment_tuples:\n try:\n geodesy.intersection_spherical(a, b)\n except geodesy.NoIntersection:\n continue\n return True\n return False",
"def disjoint(a, b, **kwargs):\n return lib.disjoint(a, b, **kwargs)",
"def boundary_invariant(self):\n for cell in self.fire_boundary():\n if self.is_empty(cell[0], cell[1]):\n print \"Cell \" + str(cell) + \" in fire boundary is empty.\"\n return False\n return True",
"def isdisjoint(self, other: Union[Rangelike, Iterable[Rangelike]]) -> bool:\n # convert to RangeSet\n other = RangeSet._to_rangeset(other)\n # O(n^2) comparison\n # TODO improve efficiency by mergesort/short-circuiting\n return all(rng1.isdisjoint(rng2) for rng1 in self._ranges for rng2 in other._ranges)",
"def bfs_is_connected(self):\n q = Queue.Queue()\n origins = [self.vertices()[0]]\n traveled = set(origins)\n while origins:\n for o in origins:\n for child in self.out_vertices(o):\n if child not in traveled:\n q.put(child)\n traveled.add(child)\n\n origins = []\n while not q.empty():\n origins.append(q.get())\n if len(traveled) == self.order():\n return True\n return False",
"def is_new(self):\n c_up = self.upper_binary_tree().single_edge_cut_shapes()\n c_down = self.lower_binary_tree().single_edge_cut_shapes()\n return not any(x in c_up for x in c_down)",
"def intersects(a, b, c, d):\n return ccw(a, c, d) != ccw(b, c, d) and ccw(a, b, c) != ccw(a, b, d)",
"def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid",
"def is_line_in_multipoly(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n return any(is_line_in_poly(feature_1, coords_2) for coords_2 in feature_2)",
"def is_collision(dict_a, dict_b):\n\n intersection = set(dict_a.values()) & set(dict_b.values())\n if not intersection:\n # Empty\n return False\n else:\n # Not Empty\n return True",
"def is_complete_multipartite(self):\n if self._.d != 2:\n return False\n if not self._has(\"p\"):\n self.pTable()\n return any(self._.p[0, i, i] == self._.p[j, i, i]\n for i, j in [(1, 2), (2, 1)])",
"def collided(self, shape):\n\n d, _, _ = self.closest_point(shape, homogenous=False)\n\n if d is not None and d <= 0:\n return True\n else:\n return False",
"def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c",
"def __ne__(self, other):\n return np.all(self.grid != other.grid) or np.all(self.pos != other.pos)"
]
| [
"0.72080624",
"0.6969701",
"0.65656924",
"0.64380586",
"0.64328504",
"0.6396868",
"0.6366332",
"0.6359289",
"0.6352083",
"0.6330388",
"0.6246516",
"0.61746126",
"0.6119184",
"0.61187744",
"0.61034775",
"0.60886997",
"0.60143226",
"0.60018754",
"0.59931576",
"0.5951325",
"0.59484583",
"0.59447056",
"0.594171",
"0.5933341",
"0.59155715",
"0.5905165",
"0.58955276",
"0.5892302",
"0.5842503",
"0.58343244"
]
| 0.72793204 | 0 |
Returns True if geometries intersect, else False | def intersects(self, other): # -> bool:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def intersects(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False",
"def intersects(self, other):\n if isinstance(self.crs, CartesianCRS):\n if not self._bbox_overlap(other):\n return False\n interx = _cintersection.all_intersections(self.vertices, other.vertices)\n return len(interx) != 0\n else:\n for a in self.segment_tuples:\n for b in other.segment_tuples:\n try:\n geodesy.intersection_spherical(a, b)\n except geodesy.NoIntersection:\n continue\n return True\n return False",
"def intersects(self, other):\n return _binary_op(arctern.ST_Intersects, self, other).astype(bool, copy=False)",
"def intersects(self):\n match = False\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n bounds = self.__line_segment(p1, p2)\n if not bounds is None:\n xmin = bounds[0]\n ymin = bounds[1]\n xmax = bounds[0]\n ymax = bounds[1]\n for j in range(len(bounds)):\n if not (j % 2):\n if bounds[j] < xmin:\n xmin = bounds[j]\n elif bounds[j] > xmax:\n xmax = bounds[j]\n else:\n if bounds[j] < ymin:\n ymin = bounds[j]\n elif bounds[j] > ymax:\n ymax = bounds[j]\n x = self.x\n y = self.y\n # TODO: Determine direction, and check two leading edge points; ie. last vector ----> then points are x+width,y+width x+width,y-width\n if x > xmin and x < xmax and y > ymin and y < ymax:\n match = True\n break\n return match",
"def intersects(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoIntersects(self, right).to_expr()",
"def _intersects_1D(A, B):\n return False if (B[1] <= A[0]) or (B[0] >= A[1]) else True",
"def check_intersection(obj1, obj2):\n (x1, y1, w1, h1) = obj1.get_box()\n (x2, y2, w2, h2) = obj2.get_box()\n if x2 + w2 - 1 < x1 or x2 >= x1 + w1:\n return False\n if y2 + h2 - 1 < y1 or y2 >= y1 + h1:\n return False\n \n return True",
"def intersection(self, other): # -> BaseGeometry:\n ...",
"def intersects(a, b, c, d):\n return ccw(a, c, d) != ccw(b, c, d) and ccw(a, b, c) != ccw(a, b, d)",
"def intersect(self, rectangle):\n return self.contains(rectangle.corner) or rectangle.contains(self.corner)",
"def envelope_intersects(geometry, sr=None):\r\n return _filter(geometry, sr, 'esriSpatialRelEnvelopeIntersects')",
"def intersects(self, other):\n return (self.x1 <= other.x2 and self.x2 >= other.x1 and\n self.y1 <= other.y2 and self.y2 >= other.y1)",
"def doBoundingBoxesIntersect(self, other):\n if(self.upperLeft.x <= other.lowerRight.x and\n self.lowerRight.x >= other.upperLeft.x and\n self.upperLeft.y >= other.lowerRight.y and\n self.lowerRight.y <= other.upperLeft.y):\n return True\n return False",
"def intersects(self, other_line):\n intpt= self.intersection(other_line)\n return bool(intpt)",
"def hasIntersectedWith(self, f):\n try:\n return f in self.hasIntersected\n except AttributeError:\n return False",
"def is_intersecting(self, ray):\n\n intersecting_point = self._sympy_plane.intersection(ray.sympy_line)[0]\n\n if 'x' in self._name:\n\n if self._within_y_bounds(intersecting_point.y) and self._within_z_bounds(intersecting_point.z):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n\n\n elif 'y' in self._name:\n\n if self._within_x_bounds(intersecting_point.x) and self._within_z_bounds(intersecting_point.z):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n\n\n elif 'z' in self._name:\n\n if self._within_y_bounds(intersecting_point.y) and self._within_x_bounds(intersecting_point.x):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n return False, None",
"def are_polygons_intersecting(a: Vector, b: Vector,\n displacement_a: Vector, displacement_b: Vector) \\\n -> Tuple[bool, bool, Optional[np.ndarray]]:\n intersecting = will_intersect = True\n min_distance = np.inf\n translation, translation_axis = None, None\n for polygon in [a, b]:\n for p1, p2 in zip(polygon, polygon[1:]):\n normal = np.array([-p2[1] + p1[1], p2[0] - p1[0]])\n normal /= np.linalg.norm(normal)\n min_a, max_a = project_polygon(a, normal)\n min_b, max_b = project_polygon(b, normal)\n\n if interval_distance(min_a, max_a, min_b, max_b) > 0:\n intersecting = False\n\n velocity_projection = normal.dot(displacement_a - displacement_b)\n if velocity_projection < 0:\n min_a += velocity_projection\n else:\n max_a += velocity_projection\n\n distance = interval_distance(min_a, max_a, min_b, max_b)\n if distance > 0:\n will_intersect = False\n if not intersecting and not will_intersect:\n break\n if abs(distance) < min_distance:\n min_distance = abs(distance)\n d = a[:-1].mean(axis=0) - b[:-1].mean(axis=0) # center difference\n translation_axis = normal if d.dot(normal) > 0 else -normal\n\n if will_intersect:\n translation = min_distance * translation_axis\n return intersecting, will_intersect, translation",
"def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True",
"def _intersects_3D(A, B):\n return all([_intersects_1D((A[i], A[i+3]), (B[i], B[i+3]))\n for i in range(3)])",
"def is_intersecting(self, node):\n return node.visited_left and node.visited_right",
"def has_intersection(self, obj):\r\n obj_x, obj_y = obj.get_location()\r\n x = self.__x\r\n y = self.__y\r\n # Distance formula\r\n distance = sqrt((obj_x - x) ** 2 + (obj_y - y) ** 2)\r\n if distance <= obj.get_radius() + self.__radius:\r\n return True\r\n return False",
"def intersects(a, b, **kwargs):\n return lib.intersects(a, b, **kwargs)",
"def items_intersect(self):\n for a, b in combinations(self.items, 2):\n if a.intersects_with(b):\n return True\n\n return False",
"def intersect_with(self, other):\n point = self._lines_intersection(other)\n\n if point is False:\n return False\n\n if point is True:\n return not(\n self.min_x() > other.max_x() or\n other.min_x() > self.max_x() or\n self.min_y() > other.max_y() or\n other.min_y() > self.max_y()\n )\n\n else:\n return (\n self.contains_point(point) and\n other.contains_point(point) and\n point\n )",
"def intersects(geometry, sr=None):\r\n\r\n return _filter(geometry, sr, 'esriSpatialRelIntersects')",
"def overlaps(self, other):\n retVal = False\n bounds = self.points\n\n if( isinstance(other,Feature) ):# A feature\n retVal = True\n for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.\n retVal = self._pointInsidePolygon(p,bounds)\n if( retVal ):\n break\n\n elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):\n retVal = self._pointInsidePolygon(other,bounds)\n\n elif( isinstance(other,tuple) and len(other)==3 and not isinstance(other[0],tuple)): # A circle\n #assume we are in x,y, r format\n retVal = False\n rr = other[2]*other[2]\n x = other[0]\n y = other[1]\n for p in bounds:\n test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))\n if( test < rr ):\n retVal = True\n break\n\n elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):\n retVal = ( self.contains( (other[0],other[1] ) ) or # see if we contain any corner\n self.contains( (other[0]+other[2],other[1] ) ) or\n self.contains( (other[0],other[1]+other[3] ) ) or\n self.contains( (other[0]+other[2],other[1]+other[3] ) ) )\n elif(isinstance(other,list) and len(other) >= 3): # an arbitrary polygon\n #everything else ....\n retVal = False\n for p in other:\n test = self._pointInsidePolygon(p,bounds)\n if(test):\n retVal = True\n break\n else:\n logger.warning(\"SimpleCV did not recognize the input type to features.overlaps. This method only takes another blob, an (x,y) tuple, or a ndarray type.\")\n return False\n\n return retVal",
"def is_on_intersection(intersection, coord):\n return intersection.is_on_intersection(coord)",
"def is_poly_in_multipoly(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n return any(is_poly_in_poly(feature_1, coords_2) for coords_2 in feature_2)",
"def is_line_in_multipoly(feature_1: Sequence, feature_2: Sequence) -> bool:\n\n return any(is_line_in_poly(feature_1, coords_2) for coords_2 in feature_2)",
"def in_geofence(self, coordinates):\n\t\tcoords_transformed = ogr.Geometry(ogr.wkbPoint)\n\t\tcoords_transformed.AddPoint(*coordinates)\n\t\treturn self.polygon.Contains(coords_transformed)"
]
| [
"0.7437675",
"0.69443166",
"0.6796653",
"0.6768023",
"0.67520535",
"0.66880053",
"0.6674216",
"0.66541874",
"0.6616146",
"0.660466",
"0.660402",
"0.6571344",
"0.6570313",
"0.6554788",
"0.6529189",
"0.6515813",
"0.6445119",
"0.642645",
"0.63719386",
"0.6354527",
"0.63500655",
"0.63345855",
"0.6326569",
"0.63236845",
"0.6322411",
"0.6303503",
"0.6281039",
"0.6280667",
"0.62751555",
"0.6273482"
]
| 0.7402378 | 1 |
Returns True if geometries are equal to within a specified tolerance Refers to coordinate equality, which requires coordinates to be equal and in the same order for all components of a geometry | def equals_exact(self, other, tolerance): # -> bool:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance",
"def compare_geometrycollection(config, geometry_x, geometry_y):\n if config in BLIST:\n # print('arc distance: %s' % str(arc_distance(x, y)))\n return arc_distance(geometry_x, geometry_y) < EPOCH_CURVE_RELATIVE\n # return True\n # else:\n # print('arc distance: %s' % str(arc_distance(x, y)))\n # return False\n # else:\n\n if not config in BLIST:\n arct = wkt.loads(geometry_x)\n pgis = wkt.loads(geometry_y)\n result = arct.equals(pgis)\n return result\n\n return False",
"def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance",
"def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance",
"def _is_same_position(pos1, pos2, position_tolerance):\n return np.isclose(_pos_distance(pos1, pos2), 0, atol=position_tolerance)",
"def compare_geometry(config, geometry_x, geometry_y):\n if geometry_x.upper().endswith('EMPTY') and geometry_y.upper().endswith(\n 'EMPTY'):\n return True\n\n if config in BLIST:\n return arc_distance(geometry_x, geometry_y) < EPOCH_CURVE_RELATIVE\n # return True\n # else:\n # print('arc distance: %s' %\n # str(arc_distance(geometry_x, geometry_y)))\n # return False\n\n if not config in BLIST:\n arct = wkt.loads(geometry_x)\n pgis = wkt.loads(geometry_y)\n result = arct.equals_exact(pgis, EPOCH)\n return result\n\n return False",
"def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True",
"def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True",
"def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True",
"def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n return True",
"def compare_floats(config, geometry_x, geometry_y):\n value_x = float(geometry_x)\n value_y = float(geometry_y)\n if value_x == 0:\n return value_y == 0\n\n if config in ALIST:\n precision_error = EPOCH_CURVE_RELATIVE\n else:\n precision_error = EPOCH\n\n return abs((value_x - value_y)) <= precision_error",
"def all_close(goal, actual, tolerance):\n #all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True",
"def almost_equal(self, other, rtol=1e-05, atol=1e-08):\n\n # float attributes defining the instance\n fkeys = ['x0', 'y0', 'dx', 'dy']\n # unambiguous attributes\n ckeys = ['nx', 'ny', 'origin']\n\n ok = True\n for k in fkeys:\n ok = ok and np.isclose(getattr(self.corner_grid, k),\n getattr(other.corner_grid, k),\n rtol=rtol, atol=atol)\n for k in ckeys:\n _ok = getattr(self.corner_grid, k) == getattr(other.corner_grid, k)\n ok = ok and _ok\n p1 = self.corner_grid.proj\n p2 = other.corner_grid.proj\n return ok and proj_is_same(p1, p2)",
"def within_tolerance(x, y, tolerance): \r\n return abs(x) <= tolerance and abs(y) <= tolerance",
"def is_equivalent(self, other, name, logger, tolerance=0.):\n if not isinstance(other, Vector):\n logger.debug('other is not a Vector object.')\n return False\n for component in ('x', 'y', 'z', 'r', 't'):\n if not self._check_equivalent(other, name, component, logger,\n tolerance):\n return False\n return True",
"def equals(\n self, other, rtol=None, atol=None, verbose=None, ignore_type=False\n ):\n pp = super()._equals_preprocess(\n other, verbose=verbose, ignore_type=ignore_type\n )\n if pp is True or pp is False:\n return pp\n\n other = pp\n\n coords0 = self.coordinates()\n coords1 = other.coordinates()\n if len(coords0) != len(coords1):\n logger.info(\n f\"{self.__class__.__name__}: Different sized collections of \"\n f\"coordinates ({coords0}, {coords1})\"\n )\n\n return False\n\n if not self.coordinate_conversion.equals(\n other.coordinate_conversion,\n rtol=rtol,\n atol=atol,\n verbose=verbose,\n ignore_type=ignore_type,\n ):\n logger.info(\n f\"{self.__class__.__name__}: Different coordinate conversions\"\n )\n\n return False\n\n if not self.datum.equals(\n other.datum,\n rtol=rtol,\n atol=atol,\n verbose=verbose,\n ignore_type=ignore_type,\n ):\n logger.info(f\"{self.__class__.__name__}: Different datums\")\n\n return False\n\n # Still here? Then the two coordinate references are as equal\n # as can be ascertained in the absence of domains.\n return True",
"def is_almost_equal(self, x ,y ,epsilon=1*10**(-8)):\n \treturn abs(x-y) <= epsilon",
"def _are_equal(grid: List[List[str]], other: List[List[str]]) -> bool:\n for row in range(len(grid)):\n for col in range(len(grid[row])):\n if grid[row][col] != other[row][col]:\n return False\n return True",
"def __eq__(self, other):\n if isinstance(other, self.__class__):\n return math.isclose(self.x, other.x, rel_tol=1e-12, abs_tol=1e-12) and\\\n math.isclose(self.y, other.y, rel_tol=1e-12, abs_tol=1e-12)\n else:\n return False",
"def _check_equivalent(self, other, name, component, logger, tolerance):\n arr = getattr(self, component)\n other_arr = getattr(other, component)\n if arr is None:\n if other_arr is not None:\n logger.debug(\"%s has no %s component but 'other' does.\", name,\n component.upper())\n return False\n else:\n if tolerance > 0.:\n if not numpy.allclose(other_arr, arr, tolerance, tolerance):\n logger.debug(\"%s %s values are not 'close'.\", name,\n component.upper())\n return False\n else:\n if (other_arr != arr).any():\n logger.debug('%s %s values are not equal.', name,\n component.upper())\n return False\n return True",
"def equals_exact(a, b, tolerance=0.0, normalize=False, **kwargs):\n if normalize:\n a = lib.normalize(a)\n b = lib.normalize(b)\n\n return lib.equals_exact(a, b, tolerance, **kwargs)",
"def is_equal(image_a, image_b, tolerance=0.0):\n return image_diff_percent(image_a, image_b) <= tolerance",
"def is_equal_approx(x, y, epsilon=1e-6):\r\n # Check absolute precision.\r\n if -epsilon <= x - y <= epsilon:\r\n return True\r\n\r\n # Is x or y too close to zero?\r\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\r\n return False\r\n\r\n # Check relative precision.\r\n return (-epsilon <= (x - y) / x <= epsilon\r\n or -epsilon <= (x - y) / y <= epsilon)",
"def IsApproximatelyEqual(x, y, epsilon):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?0.\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon\n or -epsilon <= (x - y) / y <= epsilon)",
"def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True",
"def IsApproximatelyEqual(x, y, epsilon = 1e-6):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon or -epsilon <= (x - y) / y <= epsilon)",
"def equals(a, b, tol=1e-10):\n return np.abs(a-b) <= tol",
"def __eq__(self, other):\n if self.rows != other.rows or self.cols != other.cols:\n return False\n for i in range(self.rows):\n for j in range(self.cols):\n # Need isclose (Python >= 3.5) for float precision\n if not math.isclose(self[i, j], other[i, j]):\n return False\n return True",
"def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)",
"def nearly_equal(a, b, sig_fig=5):\n return a == b or int(a*10**sig_fig) == int(b*10**sig_fig)"
]
| [
"0.65638185",
"0.649459",
"0.64573056",
"0.6321705",
"0.622471",
"0.61462414",
"0.613085",
"0.613085",
"0.613085",
"0.6104649",
"0.6088571",
"0.6067109",
"0.60385925",
"0.6033399",
"0.5987341",
"0.5935575",
"0.5908245",
"0.5903495",
"0.5901533",
"0.5879786",
"0.5853027",
"0.5851462",
"0.5822093",
"0.57694864",
"0.5756323",
"0.57423246",
"0.5729704",
"0.57092565",
"0.5706156",
"0.56628245"
]
| 0.71459395 | 0 |
Returns True if the DE9IM string code for the relationship between the geometries satisfies the pattern, else False | def relate_pattern(self, other, pattern): # -> bool:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def match(self):\r\n if len(self.string1) != len(self.string2):\r\n return False\r\n self._build_prefix()\r\n pattern = self.string2\r\n text = self.string11\r\n m = len(self.string2)\r\n n = len(self.string11)\r\n p = self._prefix\r\n k = 0\r\n for i in range(n):\r\n while k > 0 and text[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == text[i]:\r\n k = k+1\r\n if k == m:\r\n return True\r\n return False",
"def is_solved(self):\n marker = self._marker\n\n count = 0\n for row in marker:\n for piece in row:\n if piece == \"*\":\n count += 1\n if count == 1:\n return True\n else:\n return False",
"def is_ftf(self):\n g = self.get_gene().get_seq()\n if 'd' != g[1]:\n return False\n if not len(g) >= 4:\n return False\n for x in range(2, len(g)):\n dec = 'c' if x % 2 == 0 else 'd'\n if dec != g[x]:\n return False\n return True",
"def is_solved(self):\n i = 0\n for row in self._marker:\n for x in row:\n if x == \"*\":\n i += 1\n if i > 1:\n return False\n return True",
"def is_c4_arn(arn: str) -> bool:\n pattern = r\"(fourfront|cgap|[:/]c4-)\"\n return True if re.search(pattern, arn) else False",
"def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result",
"def is_relation(s):\n return s[0] >= 'F' and s[0] <= 'T' and s.isalnum()",
"def is_canonical(hybrids):\n mrhyb = hybrids[2].upper().replace(\"U\", \"T\")\n mirhyb = hybrids[0].upper().replace(\"U\", \"T\")\n hybrid = hybrids[1]\n \"\"\"\n 2-8\n \"\"\"\n if hybrid[1:8] == \"|||||||\":\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:8], mrhyb[1:8]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-8-Gwoble\"\n else:\n return True, \"2-8\"\n elif (hybrid[1:7] == \"||||||\" and mrhyb[0] == 'A'):\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:7], mrhyb[1:7]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-7-A-Gwoble\"\n else:\n return True, \"2-7-A\"\n else:\n if hybrid[0:7] == \"|||||||\":\n return False, \"1-7-ElMMo\"\n elif hybrid[1:7] == \"||||||\":\n return False, \"6-mer\"\n if \"v\" in hybrid[0:8]:\n return False, \"mRNAbulge\"\n elif \"^\" in hybrid[0:8]:\n return False, \"miRNAbulge\"\n elif \"O\" in hybrid[0:8]:\n return False, \"symmetric_loop\"\n else:\n return False, \"unknown\"",
"def isValid(text):\n return bool(re.search(r'\\R2D2\\b', text, re.IGNORECASE))",
"def is_equivalence(self) -> bool:",
"def is_atom_convex(self) -> bool:\n return False",
"def is_atom_convex(self):\n return False",
"def is_atom_convex(self):\n return False",
"def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True",
"def match(self) -> bool:",
"def has_non_standard_residues(self):\n for frag in self.iter_non_standard_residues():\n return True\n return False",
"def looks_like_a_smiles(self):\n regexp = r\"^([^J][0-9BCOHNSOPIFKcons@+\\-\\[\\]\\(\\)\\\\\\/%=#$,.~&!|Si|Se|Br|Mg|Na|Cl|Al]{3,})$\"\n return re.search(regexp, self.dirty) is not None",
"def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True",
"def isFormula(string):\r\n string = string.replace(' ', '')\r\n if string == '':\r\n return True\r\n elif re.sub(r\"\\w|\\d|->|_|\\(|\\)|~\", '', string):\r\n return False\r\n elif re.findall(r\"(?<!\\w_)\\d+|(?<!\\w)\\d+|->->\", string):\r\n return False\r\n else:\r\n string1 = string.replace('~', '').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string1)\r\n for part in info:\r\n string1 = string1.replace(part, '(-1)')\r\n try:\r\n eval(string1)\r\n except:\r\n return False\r\n string2 = string.replace('~', '-').replace('->', '+')\r\n info = re.findall(r'\\w_\\d+|\\w\\d*', string2)\r\n for part in info:\r\n string2 = string2.replace(part, '(-1)')\r\n try:\r\n eval(string2)\r\n except:\r\n return False\r\n return True",
"def is_atom_convex(self):\n return True",
"def validate_abstract_pattern(self):\n valid = True\n if not self._validate_pattern_fields():\n valid = False\n if not self._validate_entities():\n valid = False\n return valid",
"def matches(self):\n return False",
"def are_same(string_window, pat_window):\n for i in range(256):\n if string_window[i] != pat_window[i]:\n return False\n return True",
"def is_dementia(code):\n assert isinstance(code, str)\n code_set = ('294.10', '294.11', '294.20', '294.21', '2941', '29411', '2942', '29421')\n code_set += ('290',)\n code_set += ('F01', 'F02', 'F03')\n return code.startswith(code_set)",
"def is_tft(self):\n g = self.get_gene().get_seq()\n if 'c' != g[1]:\n return False\n if not len(g) >= 4:\n return False\n for x in range(2, len(g)):\n dec = 'c' if x % 2 == 0 else 'd'\n if dec != g[x]:\n return False\n return True",
"def matches_output(self, text):\n if self.markers:\n for marker in self.markers:\n if marker in text:\n return True\n # -- OTHERWISE:\n return False",
"def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid",
"def is_organic(fragment):\n # TODO: Consider a different definition?\n # Could allow only H, C, N, O, S, P, F, Cl, Br, I\n for a in fragment.GetAtoms():\n if a.GetAtomicNum() == 6:\n return True\n return False",
"def is_atomic(self):\n \n symbols=set()\n for e in self.symbols:\n if not e=='':\n symbols.add(e)\n\n for s in symbols: #unicity first\n count=0\n for e in symbols:\n if s==e:\n count+=1\n if count!=1:\n return False\n else:\n continue \n temp=symbols.copy()\n for s in symbols:\n temp.remove(s)\n for e in temp:\n if s in e:\n return False\n else:\n continue\n temp=symbols.copy()\n\n return True",
"def istrue(self):\n return has_pos_and_neg(self.literals)"
]
| [
"0.5887881",
"0.57415825",
"0.5705854",
"0.56977844",
"0.56829077",
"0.5674485",
"0.56383944",
"0.5585288",
"0.5558476",
"0.55112875",
"0.5505687",
"0.5493004",
"0.5493004",
"0.54758126",
"0.542464",
"0.54054475",
"0.5403299",
"0.5375537",
"0.53690135",
"0.5365296",
"0.53553426",
"0.5354523",
"0.53496695",
"0.53458714",
"0.5334004",
"0.5328388",
"0.53258705",
"0.5319622",
"0.5309061",
"0.53043336"
]
| 0.5991061 | 0 |
Return a point at the specified distance along a linear geometry Negative length values are taken as measured in the reverse direction from the end of the geometry. Outofrange index values are handled by clamping them to the valid range of values. If the normalized arg is True, the distance will be interpreted as a fraction of the geometry's length. | def interpolate(self, distance, normalized=...): # -> BaseGeometry:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_norm_distance(length: int, distance: float) -> float:\n return distance/(length*2)",
"def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))",
"def closest_point(self, point, maxdist=0.0, return_param=False):\n return self.xyz",
"def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))",
"def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())",
"def distance(v1: Union[np.ndarray, np.iterable, int, float], v2: Union[np.ndarray, np.iterable, int, float],\n normalised=True) -> float:\n return 1 - np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)) if not normalised else 1 - np.dot(v1, v2)",
"def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n",
"def distance_point_signed(self, point: array_like) -> np.float64:\n vector_to_point = Vector.from_points(self.point, point)\n\n return self.normal.scalar_projection(vector_to_point)",
"def dist(self, point: np.array):\n return np.linalg.norm(\n np.cross(point - self.r_start, self.direction), axis=1) / \\\n np.linalg.norm(self.direction)",
"def getOffsetLine(self, distance, side=c.INSIDE):\n StartA = np.array([self.start.x, self.start.y])\n EndA = np.array([self.end.x, self.end.y])\n r = StartA - EndA #The slope vector of self\n rn = np.array([-r[c.Y], r[c.X]]) #flip x and y and inverse y to get the normal vector of the slope\n rn = rn/np.linalg.norm(rn)*distance #normalize by dividing by its magnitude and multipy by distance to get the correct length\n \n if side == c.INSIDE:\n return self.translate(-rn[c.X], -rn[c.Y]) #the \"minus\" side line is the left side which is inside.\n \n return self.translate(rn[c.X], rn[c.Y]) #the \"Plus\" side of the line is the right side which is outside.",
"def get_nearest(self, vector, limit):\n raise NotImplementedError",
"def distanceToPoint(self, point):\n\n length = self.length\n if not length:\n raise ValueError('Cannot calculate point distance. Invalid line segment.')\n\n s = self.start\n e = self.end\n deltaX = e.x - s.x\n deltaY = e.y - s.y\n\n distance = abs(deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y)/length.raw\n\n B = deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y\n AbsB = abs(B)\n D = math.sqrt(deltaX*deltaX + deltaY*deltaY)\n DPrime = 1.0/math.pow(deltaX*deltaX + deltaY*deltaY, 3.0/2.0)\n bBD = B/(AbsB*D)\n\n pointXErr = point.xUnc*abs(deltaY*B/(AbsB*D))\n pointYErr = point.yUnc*abs(deltaX*B/(AbsB*D))\n startXErr = s.xUnc*abs(AbsB*DPrime + bBD*(point.y - e.y))\n startYErr = s.yUnc*abs(AbsB*DPrime + bBD*(e.x - point.x))\n endXErr = e.xUnc*abs(bBD*(s.y - point.y) - AbsB*DPrime)\n endYErr = e.yUnc*abs(bBD*(point.x - s.x) - AbsB*DPrime)\n error = pointXErr + pointYErr + startXErr + startYErr + endXErr + endYErr\n\n return NumericUtils.toValueUncertainty(distance, error)",
"def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray",
"def _normalize_point(self, p):\n segment_right_index = 1\n while p.x > self._hull_points[segment_right_index].x and segment_right_index < len(self._hull_points) - 1:\n segment_right_index += 1\n p1, p2 = self._hull_points[segment_right_index - 1], self._hull_points[segment_right_index]\n k = (p2.y - p1.y) / (p2.x - p1.x)\n b = p1.y - k * p1.x\n return Point(p.x, p.y / (k * p.x + b))",
"def norm(point):\n return np.sqrt(norm2(point))",
"def distance_point_line_3d(point: Vector, start: Vector, end: Vector) -> float:\n if start.isclose(end):\n raise ZeroDivisionError('Not a line.')\n v1 = point - start\n # point projected onto line start to end:\n v2 = (end - start).project(v1)\n # Pythagoras:\n return math.sqrt(v1.magnitude_square - v2.magnitude_square)",
"def closestIntersectionPoint(origin, direction, outline, maxDistance):\n testLine = LineString([origin, origin + direction * maxDistance])\n inter = testLine.intersection(outline)\n if inter.is_empty:\n if TABFAIL_VISUAL:\n import matplotlib.pyplot as plt\n\n plt.axis('equal')\n x, y = outline.coords.xy\n plt.plot(list(map(toMm, x)), list(map(toMm, y)))\n x, y = testLine.coords.xy\n plt.plot(list(map(toMm, x)), list(map(toMm, y)))\n plt.show()\n raise NoIntersectionError(f\"No intersection found within given distance\", origin)\n origin = Point(origin[0], origin[1])\n geoms = list()\n for geom in listGeometries(inter):\n if isinstance(geom, Point):\n geoms.append(geom)\n elif isinstance(geom, LineString):\n # When a linestring is an intersection, we know that the starting or\n # ending points are the nearest one\n geoms.extend([Point(geom.coords[0]), Point(geom.coords[-1])])\n else:\n raise TypeError(f\"intersection() returned an unsupported datatype: {geom.__class__.__name__}\")\n return min([(g, origin.distance(g)) for g in geoms], key=lambda t: t[1])[0]",
"def norm(a):\n return distance((0, 0), a)",
"def point_at_distance(self, distance, heading):\n distance_rad = distance / R_EARTH\n heading_rad = math.radians(heading)\n return self.__spherical_between(math.radians(self.lat), math.radians(self.long), distance_rad, heading_rad)",
"def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point",
"def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la",
"def normal(self, point):\n point = self._center - np.array(point)\n # if abs(point.dot(point) - self._radius**2) > 1e-15:\n # raise RayTraceError(\n # 'Cannot compute normal. Point is too far from surface ({}).'.format(\n # (abs(point.dot(point) - self._radius**2))))\n return normalize(point / self._radius)",
"def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)",
"def random_uniform(self, n_samples=1, max_norm=1):\n point = ((np.random.rand(n_samples, self.dimension) - .5)\n * max_norm)\n point = self.intrinsic_to_extrinsic_coords(point)\n assert np.all(self.belongs(point))\n\n assert point.ndim == 2\n return point",
"def local_normal_at(self, shape_point: Point) -> Vector:\n raise NotImplementedError(f'Method \"{self.local_intersect.__name__}\" needs to be implemented')",
"def approximate_signed_distance(self, location):\n center = 0.5 * (self.lower + self.upper)\n extent = self.upper - self.lower\n distance = math.abs(location - center) - extent * 0.5\n distance = math.max(distance, 'vector')\n distance = math.min(distance, self.shape.instance) # union for instance dimensions\n return distance",
"def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]",
"def get_linear_distance(p1,p2):\r\n if p1[0] < p2[0]:\r\n d = 0\r\n else:\r\n d = np.sqrt((p1[0]-p2[0])**2.0 + (p1[1]-p2[1])**2.0)\r\n return d",
"def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)",
"def get_distance_from_point(self, pstart, p_end):\n a = numpy.array((pstart.x, pstart.y, pstart.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n\n distance = numpy.linalg.norm(a - b)\n\n return distance"
]
| [
"0.56439435",
"0.56294566",
"0.55049217",
"0.5495214",
"0.54535615",
"0.53204644",
"0.52492005",
"0.5180053",
"0.5160316",
"0.51442784",
"0.51392424",
"0.5118768",
"0.51137716",
"0.51114225",
"0.5111166",
"0.50826037",
"0.5079231",
"0.50748944",
"0.50442165",
"0.5022545",
"0.50031406",
"0.5001377",
"0.4992883",
"0.49794585",
"0.49743372",
"0.49566793",
"0.49462804",
"0.49454376",
"0.4934333",
"0.49335873"
]
| 0.59387493 | 0 |
Provide the Numpy array protocol. | def __array_interface__(self):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __array__(self, *args, **kwargs):\n\n return self.data",
"def __array__(self):\n return np.asarray(self.data)",
"def array(self):",
"def __array__(self):\n return np.zeros(self.shape, self.dtype)",
"def __array__(self):\n return self.array",
"def tonumpy(self):\n import numpy\n from numpy import ma\n\n # initialize the return\n narray = None\n\n if None in self._data:\n\n # define a lambda function\n # to create the mask array\n make_mask = lambda x: x == None\n\n # create the numpy array,\n # making on the fly the mask\n narray = numpy.ma.array(self._data, mask=list(map(make_mask, self._data)))\n\n else:\n # convert the list to a numpy object\n narray = numpy.array(self._data)\n\n # return the numpy object\n return narray",
"def __array__(self):\n return self.to_array()",
"def to_numpy(self, **kwargs):\n pass",
"def basic_array_creation():\n print('From normal creation')\n arr: pa.Array = pa.array([1, 2, 3, 4, 5], type=pa.int8())\n print(arr)\n\n print('From pandas series')\n arr: pa.Array = pa.Array.from_pandas(pd.Series([1, 2, 3, 4, 5]))\n print(arr)",
"def __array__(self, dtype=None) -> np.ndarray:\n return self.values",
"def numpy(self):\n return self.data",
"def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")",
"def asnumpy(self):\n return self.data.asnumpy()",
"def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\n \"construct_array_type does not support arguments\")\n return XndframesArray",
"def get_array_module(arr):\n # TODO: also check for __array_interface__ attribute and not\n # __cuda_array_interface__?\n if have_cupy:\n return cupy.get_array_module(arr)\n else:\n return np",
"def asarray(self):\n from numpy import asarray\n return asarray(self)",
"def array(self):\n return np.asarray(self)",
"def __array__(self):\n return dict2rec(self)",
"def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\"construct_array_type does not support arguments\")\n return FletcherArray",
"def get_array(self):\n return numpy.array(self._ar)",
"def __array_function__(self, func, types, args, kwargs):\n try:\n if not func.__module__.startswith(\"numpy\"):\n return NotImplemented\n except AttributeError:\n return NotImplemented\n _args = list(map(MetaTensor._convert, args))\n _kwargs = {k: MetaTensor._convert(v) for k, v in kwargs.items()}\n return func(*_args, **_kwargs)",
"def lookup_array(self, *args, **kwargs): # real signature unknown\n pass",
"def lookup_array(self, *args, **kwargs): # real signature unknown\n pass",
"def as_numpy(a):\n if isinstance(a, mx.nd.NDArray):\n a = a.asnumpy()\n return a",
"def numpy(self) -> np.ndarray:\n return self.tensor.numpy()",
"def to_numpy(array):\n if not CUPY_LOADED:\n return array\n else:\n return xp.asnumpy(array)",
"def numpy_vector(self):\n pass",
"def ndarray(dtype, shape):\n if isinstance(shape, numbers.Number):\n shape = (shape, )\n if dtype in all_types:\n return ScalarNdarray(dtype, shape)\n if isinstance(dtype, MatrixType):\n return MatrixNdarray(dtype.n, dtype.m, dtype.dtype, shape)\n\n raise TaichiRuntimeError(\n f'{dtype} is not supported as ndarray element type')",
"def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )",
"def a(*args, **kwargs):\n return np.array(*args, **kwargs)"
]
| [
"0.6984412",
"0.68687886",
"0.6676552",
"0.6667087",
"0.66398215",
"0.6637101",
"0.6610455",
"0.65973616",
"0.6503999",
"0.6497078",
"0.64307505",
"0.6386554",
"0.63856333",
"0.63472104",
"0.6298814",
"0.62879676",
"0.6276794",
"0.62321687",
"0.6229774",
"0.62225384",
"0.6192883",
"0.6171395",
"0.6171395",
"0.6141695",
"0.6139878",
"0.61258614",
"0.61165375",
"0.6107634",
"0.609104",
"0.6088997"
]
| 0.7552204 | 1 |
Executes the experiment with the given config. The experiment t creates mean and sub neuron activities for each label for each given dataset. If xFoldCrossValidation is set this will be repeated x times. | def execute(self):
for csnn_config in self.__config["csnnConfigs"]:
csnn_name = csnn_config["modelName"]
try:
for dataset_config in self.__config["datasetConfigs"]:
provider = getDatasetProvider(dataset_config)
if not dataset_config["nameOfDataset"] in csnn_config["batchSizes"].keys():
continue
for i in range(0, csnn_config["xFoldCrossValidation"]):
model_dir = "/" + csnn_name + "/" + dataset_config["nameOfDataset"] + "/xFoldCrossVal" + str(i)
self.__logger.info("Starting to train: " + model_dir, "CsnnVisualizationExperiment:execute")
if csnn_config["xFoldCrossValidation"] <= 1:
xseed = None
else:
xseed = 42 + i
dataset, dataset_generator = prepareDataset(provider, dataset_config, xfold_seed=xseed,
augment_data=csnn_config["augmentData"])
dataset_max_div, _ = prepareDataset(provider, dataset_config, xfold_seed=xseed,
augment_data=csnn_config["augmentData"],
normalize_data="maxDiv")
self.__logger.info("Starting to create dataset encoding with: " + model_dir,
"CsnnVisualizationExperiment:execute")
encoding_provider, encoding = prepareEncoding(csnn_config, dataset_generator, dataset,
dataset_config, csnn_name, self.__num_gpus,
model_dir + "/Csnn", zero_mean_unit_variance=
csnn_config["zeroMeanUnitVarianceEncoding"],
return_with_encoding=dataset_max_div)
self.__logger.info("Finished to create dataset encoding with: " + model_dir,
"CsnnVisualizationExperiment:execute")
self.__logger.info("Starting to create mean activities for: " + model_dir,
"CsnnVisualizationExperiment:execute")
enc_for_label = {}
for i in range(0, len(encoding["y_test"])):
enc_for_label.setdefault(np.argmax(encoding["y_test"][i]), []).append(encoding["x_test"][i])
mean_ecs_for_label = []
for key, value in sorted(enc_for_label.items()):
mean_ec_for_label = np.mean(np.mean(np.mean(value, axis=0), axis=0), axis=0)
grid = csnn_config["layers"][-1]["somGrid"]
mean_ec_for_label = np.reshape(mean_ec_for_label, [grid[0], grid[1], grid[2]])
mean_ecs_for_label.append(mean_ec_for_label)
# Dir to save and reload model.
save_path = os.path.dirname(
sys.modules['__main__'].__file__) + "/experimentResults" + model_dir
if not os.path.exists(save_path):
os.makedirs(save_path)
np.save(save_path + "/test_mean_acts", np.array(mean_ecs_for_label))
self.__logger.info("Finised to create mean activities for: " + model_dir,
"CsnnVisualizationExperiment:execute")
self.__logger.info("Starting to create mean sub activities for: " + model_dir,
"CsnnVisualizationExperiment:execute")
sub_encs = []
for enc in mean_ecs_for_label:
sub_encs_for_label = []
for kenc2 in mean_ecs_for_label:
sub_enc = enc - kenc2
sub_encs_for_label.append(sub_enc)
sub_encs.append(sub_encs_for_label)
np.save(save_path + "/test_sub_mean_acts", np.array(sub_encs))
self.__logger.info("Finished to create mean sub activities for: " + model_dir,
"CsnnVisualizationExperiment:execute")
except Exception:
print(traceback.format_exc()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, config, **kwargs):\n config_parameters = utils.parse_config_or_kwargs(config, **kwargs)\n experiment_path = self.train(config, **kwargs)\n evaluation_logger = utils.getfile_outlogger(\n Path(experiment_path, 'evaluation.log'))\n for testdata, testlabel in zip(config_parameters['testdata'],\n config_parameters['testlabel']):\n evaluation_logger.info(\n f'Evaluting {testdata} with {testlabel} in {experiment_path}')\n # Scores for later evaluation\n scores_file = Path(experiment_path,\n 'scores_' + Path(testdata).stem + '.tsv')\n evaluation_result_file = Path(\n experiment_path) / 'evaluation_{}.txt'.format(\n Path(testdata).stem)\n self.score(experiment_path,\n result_file=scores_file,\n label=testlabel,\n data=testdata)\n self.evaluate_eer(scores_file,\n ground_truth_file=testlabel,\n evaluation_res_file=evaluation_result_file)",
"def train(self):\n acc_time = []\n data_test = self.val_data[0][0][0]\n labels_test = self.val_data[0][0][1]\n for i, train_batch in enumerate(self.dataset):\n \n writerDIM = SummaryWriter('runs/experiment_DIM'+str(i))\n data,labels, t = train_batch\n\n index_tr,index_cv,coreset = data_split(data.shape[0],777)\n\n # adding eventual replay patterns to the current batch\n if i == 0:\n ext_mem = [data[coreset], labels[coreset]]\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n else:\n dataP = ext_mem[0]\n labP = ext_mem[1]\n\n ext_mem = [\n np.concatenate((data[coreset], ext_mem[0])),\n np.concatenate((labels[coreset], ext_mem[1]))]\n if self.replay:\n dataC = np.concatenate((data[index_tr], data[index_cv],dataP),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv],labP),axis=0)\n else:\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n\n\n\n print(\"----------- batch {0} -------------\".format(i))\n print(\"Task Label: \", t)\n trC,cvC = data_split_Tr_CV(dataC.shape[0],777)\n\n train_set = LoadDataset(dataC,labC,transform=self.tr,indices=trC)\n val_set = LoadDataset(dataC,labC,transform=self.tr,indices=cvC)\n print('Training set: {0} \\nValidation Set {1}'.format(train_set.__len__(),val_set.__len__()))\n batch_size=32\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaders = {'train':train_loader,'val':valid_loader}\n \n ####### Set hyperparameters for the training\n if i ==0: \n prior = False\n ep=40\n dim_model = DIM_model(batch_s=32,num_classes =128,feature=True) \n dim_model.to(self.device)\n classifierM = _classifier(n_input=128,n_class=50,n_neurons=[256,256,128])\n classifierM = classifierM.to(self.device)\n writer = SummaryWriter('runs/experiment_C'+str(i))\n lr_new = 0.00001\n lrC=0.0001\n \n else:\n prior = True\n ep=6\n \n lr_new =0.000005\n lrC = 0.00005\n\n optimizer = torch.optim.Adam(dim_model.parameters(),lr=lr_new)\n scheduler = lr_scheduler.StepLR(optimizer,step_size=40,gamma=0.1) #there is also MultiStepLR\n\n tr_dict_enc = {'ep':ep,'writer':writerDIM,'best_loss':1e10,'t_board':True,\n 'gamma':.5,'beta':.5,'Prior_Flag':prior,'discriminator':classifierM} \n tr_dict_cl = {'ep':40,'writer':writer,'best_loss':1e10,'t_board':True,'gamma':1}#40\n\n if i==0 and self.load:\n print('Load DIM model weights first step')\n dim_model.load_state_dict(torch.load(self.path + 'weights/weightsDIM_T0.pt'))\n else:\n ############################## Train Encoder########################################\n dim_model,self.stats = trainEnc_MI(self.stats,dim_model, optimizer, scheduler,dataloaders,self.device,tr_dict_enc)\n ####################################################################################\n if i==0:\n torch.save(dim_model.state_dict(), self.path + 'weights/weightsDIM_T'+str(i)+'.pt')\n\n ####\n #Conversion of image into latent space representation for classifier training\n ####\n dim_model.requires_grad_(False)\n for phase in ['train','val']:\n dataF = None\n labF = None\n for inputs, labels in dataloaders[phase]:\n torch.cuda.empty_cache()\n if len(inputs.shape)==5:\n\n inputs = inputs[:,:,:,:,0].to(self.device)\n else:\n inputs = inputs.to(self.device)\n\n _,_,pred = dim_model(inputs)\n pred_l = pred.data.cpu().numpy()\n if dataF is None:\n dataF = pred_l\n labF = labels.data.cpu().numpy()\n else:\n dataF = np.concatenate((dataF,pred_l),axis=0)\n labF = np.concatenate((labF,labels.data.cpu().numpy()),axis=0)\n\n if phase == 'train':\n dataTr_f = dataF\n labTr_f = labF\n else:\n dataCv_f = dataF\n labCv_f = labF\n \n dim_model.requires_grad_(True)\n train_set = LoadFeat(dataTr_f,labTr_f)\n val_set = LoadFeat(dataCv_f,labCv_f)\n batch_size=32\n\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaderC = {'train':train_loader,'val':valid_loader}\n\n optimizerC = torch.optim.Adam(classifierM.parameters(),lr=lrC)\n schedulerC = lr_scheduler.StepLR(optimizerC,step_size=40,gamma=0.1)\n classifierM.requires_grad_(True)\n\n ############################## Train Classifier ########################################\n classifierM,self.stats = train_classifier(self.stats,classifierM, optimizerC, schedulerC,dataloaderC,self.device,tr_dict_cl) \n #################################### #################################### ##############\n\n torch.save(classifierM.state_dict(), self.path + 'weights/weightsC_T'+str(i)+'.pt')\n dim_model.eval()\n classifierM.eval()\n #### Cross_val Testing\n \n test_set = LoadDataset(data_test,labels_test,transform=self.trT)\n batch_size=32\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)\n score= []\n\n for inputs, labels in test_loader:\n torch.cuda.empty_cache()\n inputs = inputs.to(self.device)\n labels = labels.to(self.device) \n _,_,ww =dim_model(inputs)\n pred = classifierM(ww)\n pred_l = pred.data.cpu().numpy()\n score.append(np.sum(np.argmax(pred_l,axis=1)==labels.data.cpu().numpy())/pred_l.shape[0])\n print('TEST PERFORMANCES:', np.asarray(score).mean())\n acc_time.append(np.asarray(score).mean())\n del test_set,test_loader\n \n self.dim_model = dim_model\n self.classifierM = classifierM\n acc_time = np.asarray(acc_time)\n return self.stats,acc_time",
"def experiment(**config):\n from ..training.train import training\n \n training(config)",
"def perform_experiment(experiment_config):\n\n # Initializing seeds for the random numbers generators.\n if 'Numpy seed' in experiment_config:\n initial_seed = experiment_config['Numpy seed']\n else:\n initial_seed = 5\n\n np.random.seed(initial_seed) # Initializing numpy seed.\n\n if 'TensorFlow seed' in experiment_config:\n initial_seed = experiment_config['TensorFlow seed']\n else:\n initial_seed = 5\n\n set_random_seed(initial_seed) # Initializing TensorFlow seed.\n\n # Reading the config data from the file.\n optimizer = experiment_config['Optimizer']\n loss = experiment_config['Loss func']\n epochs = experiment_config['Epochs num']\n batch_size = experiment_config['Batch size']\n\n x_train, y_train, x_test, y_test = create_dataset(experiment_config['Dataset'])\n\n initial_weights_file = None\n if 'Initial weights file' in experiment_config:\n initial_weights_file = path.join(weights_files_path, experiment_config['Initial weights file'])\n sampled_metrics = experiment_config['Performance metrics']\n\n # Creating the model the experiment will be performed on.\n model = create_model(experiment_config['Model name'], initial_weights_file)\n model.summary()\n\n # Train the new model. Training time is measured.\n start_time = time.time()\n model.train(x_train, y_train, epochs, batch_size, optimizer, loss, sampled_metrics, x_test, y_test,\n log_training=True, log_tensorboard=True)\n learning_time = time.time() - start_time\n\n # Reading all the performance details of this model, including inner layers outputs.\n train_score = model.evaluate(x_train, y_train, verbose=0)\n test_score = model.evaluate(x_test, y_test, verbose=0)\n y_pred = model.predict(x_test, batch_size=batch_size, verbose=0)\n confusion_mat = confusion_matrix(y_test, y_pred)\n layers_training_output = model.get_layers_output(x_train, learning_phase='Testing')\n layers_testing_output = model.get_layers_output(x_test, learning_phase='Testing')\n\n # Saving all results to a single dictionary. Later it will be saved to external files.\n results = {\n 'Training Time [sec]': learning_time,\n 'Test Loss': test_score[0],\n 'Test Accuracy': test_score[1],\n 'Test Mean Precision': test_score[2],\n 'Test Mean Recall': test_score[3],\n 'Precision per class': np.array2string(precision_score(y_test, y_pred, average=None)),\n 'Recall per class': np.array2string(recall_score(y_test, y_pred, average=None)),\n 'Confusion Matrix': confusion_mat,\n 'Layers Training Output': layers_training_output,\n 'Layers Testing Output': layers_testing_output,\n 'Train Loss': train_score[0],\n 'Train Accuracy': train_score[1],\n 'Train Mean Precision': train_score[2],\n 'Train Mean Recall': train_score[3]\n }\n\n return results, model",
"def _run_single_config(self, train_ratio, config):\n X_train, X_test = self._generate_embeddings(config)\n model_str = config[\"model\"]\n model_tuples = [(model_str, MODELS[model_str])]\n is_multi = (self.class_label == \"Multiclass\")\n metrics_df = evaluate_classifiers(model_tuples, X_train, self.tr_label, X_test, self.te_label, multiclass=is_multi, show_confusion_matrix=True, verbose=False)\n metrics_df[\"class\"] = self.class_label\n #append parameter values to dataframe\n for key, value in config.items():\n metrics_df[key] = value\n return metrics_df",
"def run(self, *args, **kwargs) -> None:\n loop = tqdm(self.configs, desc='Configurations')\n for cfg in loop:\n loop.set_postfix_str(cfg.experiment_cfg['name'])\n for i in range(cfg.num_models):\n filename = None\n run_id = None\n if cfg.filenames is not None:\n if isinstance(cfg.filenames, str):\n filename = cfg.filenames\n else:\n filename = cfg.filenames[i]\n elif cfg.run_ids is not None:\n run_id = cfg.run_ids[i]\n\n run_cfg = modelgen_cfg_to_runner_cfg(cfg, run_id=run_id, filename=filename)\n runner = Runner(run_cfg, persist_metadata=cfg.experiment_cfg)\n runner.run()\n\n # clear up memory between runs\n torch.cuda.empty_cache()",
"def dummy(args):\n\n task_ids = {'1': LossTypes.mse, '2': LossTypes.mse, '3': LossTypes.cross_entropy}\n input_dimension = 5000 # Dimensionality of each training set\n num_inputs_train = 750\n num_inputs_validate = 100\n num_inputs_test = 150\n\n # Training set\n x_train = np.random.random((num_inputs_train, input_dimension))\n y_train = {}\n\n # Validation set\n x_validate = np.random.random((num_inputs_validate, input_dimension))\n y_validate = {}\n\n # Testing set\n x_test = np.random.random((num_inputs_test, input_dimension))\n y_test = {}\n\n for task_id, loss_type in task_ids.iteritems():\n if loss_type is LossTypes.mse:\n y_train[task_id] = np.random.random((num_inputs_train, 1))\n y_validate[task_id] = np.random.random((num_inputs_validate, 1))\n y_test[task_id] = np.random.random((num_inputs_test, 1))\n elif loss_type is LossTypes.cross_entropy:\n # Training labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_train).reshape(1, num_inputs_train)\n y_train[task_id] = convert_to_one_hot(labels)\n\n # Validation labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_validate).reshape(1, num_inputs_validate)\n y_validate[task_id] = convert_to_one_hot(labels)\n\n # Testing labels -- 2-dimensional one-hot vectors for each example.\n labels = np.random.binomial(1, 0.8, num_inputs_test).reshape(1, num_inputs_test)\n y_test[task_id] = convert_to_one_hot(labels)\n\n exp = Experiment(expt_name=\"synthetic\", task_ids=task_ids, x_train=x_train, x_validate=x_validate,\n x_test=x_test, y_train=y_train, y_validate=y_validate, y_test=y_test,\n model_class=LowLevelSharingModel, learning_rate=args.learning_rate,\n batch_size=args.batch_size, num_epochs=args.num_epochs)\n exp.initialize_network()\n exp.train()\n sys.stderr.write(\"Training complete. Logs, outputs, and model saved in \" + os.getcwd())",
"def train_and_eval(config, babas_data):\n\n if config.resume_from_checkpoint is not None:\n try:\n if config.augment_background == 'background':\n bg = config.augment_background\n else:\n bg = None\n rfc = config.resume_from_checkpoint\n ic = config.include_validation\n print 'Loading saved config: %s' % config.saved_config\n config = np.load(config.saved_config).item()\n config.resume_from_checkpoint = rfc\n config.include_validation = ic\n if not hasattr(config, 'augment_background'):\n config.augment_background = 'constant'\n if not hasattr(config, 'background_folder'):\n config.background_folder = 'backgrounds'\n if bg is not None:\n print 'Overriding saved config to add kinect backgrounds to training.'\n config.augment_background = bg\n results_dir = rfc\n except:\n print 'Relying on default config file.'\n\n if babas_data: # Shitty naive training method\n config.tfrecord_dir = '/media/data_cifs/monkey_tracking/data_for_babas/tfrecords_from_babas'\n config.babas_tfrecord_dir = config.tfrecord_dir\n config.steps_before_validation = 20\n config.epochs = 2000\n config.convert_labels_to_pixel_space = False\n config.augment_background = 'constant'\n\n # Import your model\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n model_file = import_cnn(config.model_type)\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = '%s_%s' % (config.model_type, dt_stamp)\n if config.selected_joints is not None:\n dt_dataset = '_%s' % (config.selected_joints) + dt_dataset\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, dt_dataset)\n results_dir = os.path.join(config.npy_dir, dt_dataset)\n print 'Saving Dmurphy\\'s online updates to: %s' % results_dir\n dir_list = [config.train_checkpoint, config.summary_dir, results_dir]\n [tf_fun.make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, config.train_tfrecords)\n if config.babas_tfrecord_dir is not None:\n train_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.train_tfrecords)\n if config.include_validation or config.include_validation is None:\n val_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.val_tfrecords)\n else:\n train_babas_tfrecord_dir = None\n val_babas_tfrecord_dir = None\n\n if isinstance(config.include_validation, basestring):\n validation_data = config.include_validation\n elif config.include_validation == True:\n validation_data = os.path.join(\n config.tfrecord_dir,\n config.val_tfrecords)\n else:\n validation_data = None\n\n print 'Using training set: %s' % train_data\n print 'Using validation set: %s' % validation_data\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_data_dict = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background=config.augment_background,\n background_folder=config.background_folder,\n randomize_background=config.randomize_background,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=train_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n train_data_dict['deconv_label_size'] = len(config.labels)\n\n val_data_dict = inputs(\n tfrecord_file=validation_data,\n batch_size=config.validation_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background='none',\n background_folder=config.background_folder,\n randomize_background=None,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=val_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n val_data_dict['deconv_label_size'] = len(config.labels)\n\n # Check output_shape\n if config.selected_joints is not None:\n print 'Targeting joint: %s' % config.selected_joints\n joint_shape = len(config.selected_joints) * config.keep_dims\n if (config.num_classes // config.keep_dims) > (joint_shape):\n print 'New target size: %s' % joint_shape\n config.num_classes = joint_shape\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n print 'Creating training graph:'\n model = model_file.model_struct(\n weight_npy_path=config.weight_npy_path)\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n rgb=train_data_dict['image'],\n target_variables=train_data_dict,\n train_mode=train_mode,\n batchnorm=config.batch_norm)\n train_mu, train_var = tf.nn.moments(train_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"train image mean\", train_mu)\n tf.summary.histogram(\"train image std\", tf.sqrt(train_var))\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv train', model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(model.deconv, axis=3), tf.float32), 3))\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n print 'Creating validation graph:'\n val_model = model_file.model_struct()\n val_model.build(\n rgb=val_data_dict['image'],\n target_variables=val_data_dict)\n\n # Calculate validation accuracy\n val_mu, val_var = tf.nn.moments(val_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"validation image mean\", val_mu)\n tf.summary.histogram(\"validation image std\", tf.sqrt(val_var))\n if 'label' in val_data_dict.keys():\n # val_score = tf.reduce_mean(\n # tf_fun.l2_loss(\n # val_model.output, val_data_dict['label']))\n if config.keep_dims == 3:\n z_mask = tf.expand_dims(tf.tile([1, 1, 0], [int(val_data_dict['label'].get_shape()[-1]) // 3]), axis=0)\n z_mask = tf.cast(z_mask, tf.float32)\n val_model.output = val_model.output * z_mask\n val_data_dict['label'] = val_data_dict['label'] * z_mask \n val_score = tf.reduce_mean(tf.nn.l2_loss(val_model.output - val_data_dict['label']))\n tf.summary.scalar(\"validation mse\", val_score)\n if 'fc' in config.aux_losses:\n tf.summary.image('FC val activations', val_model.final_fc)\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv val', val_model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(val_model.deconv, axis=3),\n tf.float32), 3))\n tf.summary.image(\n 'validation images',\n tf.cast(val_data_dict['image'], tf.float32))\n\n # Prepare the loss functions:::\n loss_list, loss_label = [], []\n if 'label' in train_data_dict.keys():\n # 1. Joint localization loss\n if config.calculate_per_joint_loss == 'thomas':\n label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton and joint':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n loss_label += ['skeleton loss']\n delta = model['output'] - train_data_dict['label']\n proc_weights = np.asarray(\n config.dim_weight)[None,:].repeat(\n len(config.joint_names), axis=0).reshape(1, -1)\n delta *= proc_weights\n # label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n # model=model,\n # train_data_dict=train_data_dict,\n # config=config,\n # y_key='label',\n # yhat_key='output')\n # loss_list += [label_loss]\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n else:\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n loss_label += ['combined head']\n for al in loss_helper.potential_aux_losses():\n loss_list, loss_label = loss_helper.get_aux_losses(\n loss_list=loss_list,\n loss_label=loss_label,\n train_data_dict=train_data_dict,\n model=model,\n aux_loss_dict=al,\n domain_adaptation=train_babas_tfrecord_dir)\n loss = tf.add_n(loss_list)\n\n # Add wd if necessary\n if config.wd_penalty is not None:\n _, l2_wd_layers = tf_fun.fine_tune_prepare_layers(\n tf.trainable_variables(), config.wd_layers)\n l2_wd_layers = [\n x for x in l2_wd_layers if 'biases' not in x.name]\n if config.wd_type == 'l1':\n loss += (config.wd_penalty * tf.add_n(\n [tf.reduce_sum(tf.abs(x)) for x in l2_wd_layers]))\n elif config.wd_type == 'l2':\n loss += (config.wd_penalty * tf.add_n(\n [tf.nn.l2_loss(x) for x in l2_wd_layers]))\n\n optimizer = loss_helper.return_optimizer(config.optimizer)\n optimizer = optimizer(config.lr)\n\n if hasattr(config, 'fine_tune_layers') and config.fine_tune_layers is not None:\n print 'Finetuning learning for: %s' % config.fine_tune_layers\n train_op, grads = tf_fun.finetune_learning(\n loss,\n trainables=tf.trainable_variables(),\n fine_tune_layers=config.fine_tune_layers,\n config=config\n )\n else:\n # Op to calculate every variable gradient\n grads = optimizer.compute_gradients(\n loss, tf.trainable_variables())\n # Op to update all variables according to their gradient\n train_op = optimizer.apply_gradients(\n grads_and_vars=grads)\n\n # Summarize all gradients and weights\n [tf.summary.histogram(\n var.name + '/gradient', grad)\n for grad, var in grads if grad is not None]\n # train_op = optimizer.minimize(loss)\n\n # Summarize losses\n [tf.summary.scalar(lab, il) for lab, il in zip(\n loss_label, loss_list)]\n\n # Summarize images and l1 weights\n tf.summary.image(\n 'train images',\n tf.cast(train_data_dict['image'], tf.float32))\n tf_fun.add_filter_summary(\n trainables=tf.trainable_variables(),\n target_layer='conv1_1_filters')\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n tf.add_to_collection('output', model.output)\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Create list of variables to run through training model\n train_session_vars = {\n 'train_op': train_op,\n 'loss_value': loss,\n 'im': train_data_dict['image'],\n 'yhat': model.output,\n 'ytrue': train_data_dict['label']\n }\n if hasattr(model, 'deconv'):\n train_session_vars['deconv'] = model.deconv\n if hasattr(model, 'final_fc'):\n train_session_vars['fc'] = model.final_fc\n\n # Create list of variables to run through validation model\n val_session_vars = {\n 'val_acc': val_score,\n 'val_pred': val_model.output,\n 'val_ims': val_data_dict['image'],\n 'val_true': val_data_dict['label'],\n }\n\n # Create list of variables to save to numpys\n save_training_vars = [\n 'im',\n 'yhat',\n 'ytrue',\n 'yhat'\n ]\n\n for al in loss_helper.potential_aux_losses():\n if al.keys()[0] in train_data_dict.keys():\n y_key = '%s' % al.keys()[0]\n train_session_vars[y_key] = train_data_dict[al.values()[0]['y_name']]\n save_training_vars += [y_key]\n\n yhat_key = '%s_hat' % al.keys()[0]\n train_session_vars[yhat_key] = model[al.values()[0]['model_name']]\n save_training_vars += [yhat_key]\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, losses = 0, []\n num_joints = int(\n train_data_dict['label'].get_shape()[-1]) // config.keep_dims\n normalize_vec = tf_fun.get_normalization_vec(config, num_joints)\n if config.resume_from_checkpoint is not None:\n if '.ckpt' in config.resume_from_checkpoint:\n ckpt = config.resume_from_checkpoint\n 'Restoring specified checkpoint: %s' % config.resume_from_checkpoint\n else:\n ckpt = tf.train.latest_checkpoint(config.resume_from_checkpoint)\n print 'Evaluating checkpoint: %s' % ckpt\n saver.restore(sess, ckpt)\n try:\n while not coord.should_stop():\n start_time = time.time()\n train_out_dict = sess.run(train_session_vars.values())\n train_out_dict = {k: v for k, v in zip(\n train_session_vars.keys(), train_out_dict)}\n losses.append(train_out_dict['loss_value'])\n duration = time.time() - start_time\n assert not np.isnan(\n train_out_dict['loss_value']), 'Model diverged with loss = NaN'\n if step % config.steps_before_validation == 0:\n if validation_data is not False:\n val_out_dict = sess.run(\n val_session_vars.values())\n val_out_dict = {k: v for k, v in zip(\n val_session_vars.keys(), val_out_dict)}\n # if config.normalize_labels:\n # val_out_dict['val_pred'] *= normalize_vec\n # val_out_dict['val_true'] *= normalize_vec\n np.savez(\n os.path.join(\n results_dir, '%s_val_coors' % step),\n val_pred=val_out_dict['val_pred'],\n val_ims=val_out_dict['val_ims'],\n val_true=val_out_dict['val_true'],\n normalize_vec=normalize_vec)\n with open(\n os.path.join(\n results_dir, '%s_config.p' % step), 'wb') as fp:\n pickle.dump(config, fp)\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy attach 9177\n format_str = (\n '%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch) | '\n 'Validation l2 loss = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, train_out_dict['loss_value'],\n config.train_batch / duration, float(duration),\n val_out_dict['val_acc'],\n config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if config.normalize_labels:\n train_out_dict['yhat'] *= normalize_vec\n train_out_dict['ytrue'] *= normalize_vec\n [save_training_data(\n output_dir=results_dir,\n data=train_out_dict[k],\n name='%s_%s' % (k, step)) for k in save_training_vars]\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch)')\n print (format_str % (\n datetime.now(),\n step,\n train_out_dict['loss_value'],\n config.train_batch / duration,\n float(duration)))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%s_training_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()",
"def define_datasets(cfg, to_run=None):\n\n # Create the training dataset with info from the cfg file.\n train_dataset = dp.AlexNetDataset(\n file_names=cfg.train_tfrecord_filepaths,\n batch_size=cfg.batch_size,\n num_epochs=cfg.num_epochs,\n shuffle_buffer_size=cfg.shuffle_buffer_size,\n image_size=cfg.image_size,\n rgb_mean=cfg.rgb_mean,\n keep_prob=cfg.keep_prob,\n num_threads=cfg.num_dataset_threads,\n to_run=to_run\n )\n\n # Add random crop, random flip and pixel distortions to the images.\n train_dataset.random_crop(cfg.crop_image_size)\n if not cfg.flip_constrain_fc6:\n train_dataset.random_flip()\n if cfg.rgb_distort:\n train_dataset.rgb_distort(\n rgb_eigenvectors=cfg.rgb_eigenvectors,\n rgb_eigenvalues=cfg.rgb_eigenvalues,\n stddev=cfg.rgb_stddev\n )\n\n # Create the training eval dataset with info from the cfg file.\n train_eval_dataset = dp.AlexNetDataset(\n file_names=cfg.train_tfrecord_filepaths,\n batch_size=cfg.batch_size,\n max_iterations=cfg.train_eval_max_iterations,\n shuffle_buffer_size=cfg.shuffle_buffer_size,\n image_size=cfg.image_size,\n rgb_mean=cfg.rgb_mean,\n keep_prob=1.0,\n num_threads=cfg.num_dataset_threads,\n to_run=to_run\n )\n\n # Center crop the images\n train_eval_dataset.center_crop(cfg.crop_image_size)\n\n # Create the training dataset with info from the cfg file.\n val_eval_dataset = dp.AlexNetDataset(\n file_names=cfg.validation_tfrecord_filepaths,\n batch_size=cfg.batch_size,\n num_epochs=1,\n shuffle_buffer_size=cfg.shuffle_buffer_size,\n image_size=cfg.image_size,\n rgb_mean=cfg.rgb_mean,\n keep_prob=1.0,\n num_threads=cfg.num_dataset_threads,\n to_run=to_run\n )\n\n # Center crop the images\n val_eval_dataset.center_crop(cfg.crop_image_size)\n\n # Create the iterator shared by all three dataset\n data_iter = train_dataset.reinitializable_iterator()\n\n # Create the initializers of the datasets\n train_initializer = train_dataset.iterator_initializer(data_iter)\n train_eval_initializer = train_eval_dataset.iterator_initializer(data_iter)\n val_eval_initializer = val_eval_dataset.iterator_initializer(data_iter)\n\n return data_iter, train_dataset, train_initializer, train_eval_initializer, val_eval_initializer",
"def main(config_file):\n \n # Load the configuration from json file\n assert os.path.isfile(\n config_file), \"No json configuration file found at {}\".format(config_file)\n config = utils.LoadConfig(config_file)\n\n # use GPU if available\n config.cuda = torch.cuda.is_available()\n\n # Set the random seed for reproducible experiments\n torch.manual_seed(config.general['seed'])\n if config.cuda:\n torch.cuda.manual_seed(config.general['seed'])\n \n #Generate output path if it does not exist\n out_dir = config.general['out_dir']\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n #Save config file\n config.save(os.path.join(out_dir, 'experiment_config.json'))\n\n # Set the logger\n utils.set_logger(os.path.join(out_dir, 'train.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the datasets...\")\n\n # Load data\n train, test = read_and_format_full_dataset()\n train_kaggle, test_kaggle = read_and_format_kaggle_dataset()\n \n #Using kaggle's training data for training\n train, val = split_train_val_partition(train_kaggle, config.data['split_train_percentage'],config.general['seed'])\n \n #Adding data augmentation to training\n # train = MNISTDatasetLabels(train,\n # transform=transforms.Compose([\n # Normalization(),\n # transforms.RandomHorizontalFlip(0.5),\n # transforms.RandomVerticalFlip(0.5),\n # transforms.RandomPerspective(),\n # transforms.RandomRotation(30)])) \n \n train = MNISTDatasetLabels(train,\n transform=transforms.Compose([\n Normalization(),\n transforms.RandomRotation(15)])) \n \n val = MNISTDatasetLabels(val,\n transform=transforms.Compose([Normalization()])) \n \n test = MNISTDatasetLabels(test,\n transform=transforms.Compose([Normalization()])) \n \n test_kaggle = MNISTDatasetNoLabels(test_kaggle,\n transform=transforms.Compose([Normalization()])) \n \n train_dataloader = DataLoader(train, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n val_dataloader = DataLoader(val, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n test_dataloader = DataLoader(test, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n test_kaggle_dataloader = DataLoader(test_kaggle, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n\n logging.info(\"- done.\")\n \n # Train the model\n logging.info(\"Starting training for {} epoch(s)\".format(config.CNN_train['num_epochs']))\n train_wraper(train_dataloader, val_dataloader, config)\n logging.info(\"- done.\")\n \n #Evaluate the model test set \n # Using Kaggle's test set unknown labels (can have true labels or not (Kaggle's case))\n logging.info(\"Starting the model evaluation on Kaggle's test data\")\n eval_out_kaggle = evaluate_return_labels(test_kaggle_dataloader, config)\n #Save the results\n eval_out_kaggle.to_csv(os.path.join(out_dir, 'test_result_kaggle.csv'),index=False)\n logging.info(\"- done.\")\n \n # Using test set with known labels\n logging.info(\"Starting the model evaluation on test data\")\n eval_out = evaluate_return_labels(test_dataloader, config)\n #Save the results\n eval_out.to_csv(os.path.join(out_dir, 'test_result.csv'),index=False)\n logging.info(\"- done.\")\n \n # Compute metrics\n if 'TrueLabel' in eval_out:\n #Evaluate the model with test set (known labels)\n logging.info(\"Calculating final metrics\")\n # Get unique true labels in dataset\n classes = eval_out.TrueLabel.unique()\n # Sort them\n classes.sort()\n # Calculate accuracy\n accuracy_total = accuracy(eval_out)\n # Calculate error rate\n error_rate_total = error_rate(eval_out)\n # Confussion matrix\n c_matrix = confusion_matrix(eval_out, classes)\n plot_confusion_matrix(c_matrix, classes, 'CNN', out_dir)\n # Overall metrics\n metrics_per_class, metrics_overall = confusion_matrix_metrics(c_matrix)\n metrics_overall['accuracy_percent'] = accuracy_total\n metrics_overall['error_rate_percent'] = error_rate_total\n \n metrics_per_class.to_csv(os.path.join(out_dir, 'CNN_results_per_class.csv'))\n metrics_overall.to_csv(os.path.join(out_dir, 'CNN_results_overall.csv'))\n \n logging.info(\"- done.\")",
"def train(self, config, **kwargs):\n\n config_parameters = utils.parse_config_or_kwargs(config, **kwargs)\n outputdir = Path(\n config_parameters['outputpath'], config_parameters['model'],\n \"{}_{}\".format(\n datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'),\n uuid.uuid1().hex[:8]))\n # Early init because of creating dir\n checkpoint_handler = ModelCheckpoint(\n outputdir,\n 'run',\n n_saved=1,\n require_empty=False,\n create_dir=True,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n save_as_state_dict=False,\n score_name='loss')\n logger = utils.getfile_outlogger(Path(outputdir, 'train.log'))\n logger.info(\"Storing files in {}\".format(outputdir))\n # utils.pprint_dict\n utils.pprint_dict(config_parameters, logger.info)\n logger.info(\"Running on device {}\".format(DEVICE))\n labels_df = pd.read_csv(config_parameters['trainlabel'], sep=' ')\n labels_df['encoded'], encoder = utils.encode_labels(\n labels=labels_df['bintype'])\n train_df, cv_df = utils.split_train_cv(labels_df)\n\n transform = utils.parse_transforms(config_parameters['transforms'])\n utils.pprint_dict({'Classes': encoder.classes_},\n logger.info,\n formatter='pretty')\n utils.pprint_dict(transform, logger.info, formatter='pretty')\n if 'sampler' in config_parameters and config_parameters[\n 'sampler'] == 'MinimumOccupancySampler':\n # Asserts that each \"batch\" contains at least one instance\n train_sampler = dataset.MinimumOccupancySampler(\n np.stack(train_df['encoded'].values))\n\n sampling_kwargs = {\"sampler\": train_sampler, \"shuffle\": False}\n elif 'shuffle' in config_parameters and config_parameters['shuffle']:\n sampling_kwargs = {\"shuffle\": True}\n else:\n sampling_kwargs = {\"shuffle\": False}\n\n logger.info(\"Using Sampler {}\".format(sampling_kwargs))\n\n colname = config_parameters.get('colname', ('filename', 'encoded')) #\n trainloader = dataset.getdataloader(\n train_df,\n config_parameters['traindata'],\n transform=transform,\n batch_size=config_parameters['batch_size'],\n colname=colname, # For other datasets with different key names\n num_workers=config_parameters['num_workers'],\n **sampling_kwargs)\n cvdataloader = dataset.getdataloader(\n cv_df,\n config_parameters['traindata'],\n transform=None,\n shuffle=False,\n colname=colname, # For other datasets with different key names\n batch_size=config_parameters['batch_size'],\n num_workers=config_parameters['num_workers'])\n if 'pretrained' in config_parameters and config_parameters[\n 'pretrained'] is not None:\n model = models.load_pretrained(config_parameters['pretrained'],\n outputdim=len(encoder.classes_))\n else:\n model = getattr(models, config_parameters['model'],\n 'LightCNN')(inputdim=trainloader.dataset.datadim,\n outputdim=len(encoder.classes_),\n **config_parameters['model_args'])\n\n if config_parameters['optimizer'] == 'AdaBound':\n try:\n import adabound\n optimizer = adabound.AdaBound(\n model.parameters(), **config_parameters['optimizer_args'])\n except ImportError:\n logger.info(\n \"Adabound package not found, install via pip install adabound. Using Adam instead\"\n )\n config_parameters['optimizer'] = 'Adam'\n config_parameters['optimizer_args'] = {\n } # Default adam is adabount not found\n else:\n optimizer = getattr(\n torch.optim,\n config_parameters['optimizer'],\n )(model.parameters(), **config_parameters['optimizer_args'])\n\n utils.pprint_dict(optimizer, logger.info, formatter='pretty')\n utils.pprint_dict(model, logger.info, formatter='pretty')\n if DEVICE.type != 'cpu' and torch.cuda.device_count() > 1:\n logger.info(\"Using {} GPUs!\".format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n criterion = torch.nn.CrossEntropyLoss().to(DEVICE)\n model = model.to(DEVICE)\n\n precision = Precision()\n recall = Recall()\n f1_score = (precision * recall * 2 / (precision + recall)).mean()\n metrics = {\n 'Loss': Loss(criterion),\n 'Precision': precision.mean(),\n 'Recall': recall.mean(),\n 'Accuracy': Accuracy(),\n 'F1': f1_score,\n }\n\n # batch contains 3 elements, X,Y and filename. Filename is only used\n # during evaluation\n def _prep_batch(batch, device=DEVICE, non_blocking=False):\n x, y, _ = batch\n return (convert_tensor(x, device=device,\n non_blocking=non_blocking),\n convert_tensor(y, device=device,\n non_blocking=non_blocking))\n\n train_engine = create_supervised_trainer(model,\n optimizer=optimizer,\n loss_fn=criterion,\n prepare_batch=_prep_batch,\n device=DEVICE)\n inference_engine = create_supervised_evaluator(\n model, metrics=metrics, prepare_batch=_prep_batch, device=DEVICE)\n\n RunningAverage(output_transform=lambda x: x).attach(\n train_engine, 'run_loss') # Showing progressbar during training\n pbar = ProgressBar(persist=False)\n pbar.attach(train_engine, ['run_loss'])\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n patience=3,\n factor=0.1)\n\n @inference_engine.on(Events.COMPLETED)\n def update_reduce_on_plateau(engine):\n val_loss = engine.state.metrics['Loss']\n if 'ReduceLROnPlateau' == scheduler.__class__.__name__:\n scheduler.step(val_loss)\n else:\n scheduler.step()\n\n early_stop_handler = EarlyStopping(\n patience=5,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n trainer=train_engine)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n early_stop_handler)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n checkpoint_handler, {\n 'model': model,\n 'encoder': encoder,\n 'config': config_parameters,\n })\n\n @train_engine.on(Events.EPOCH_COMPLETED)\n def compute_validation_metrics(engine):\n inference_engine.run(cvdataloader)\n results = inference_engine.state.metrics\n output_str_list = [\n \"Validation Results - Epoch : {:<5}\".format(engine.state.epoch)\n ]\n for metric in metrics:\n output_str_list.append(\"{} {:<5.3f}\".format(\n metric, results[metric]))\n logger.info(\" \".join(output_str_list))\n pbar.n = pbar.last_print_n = 0\n\n train_engine.run(trainloader, max_epochs=config_parameters['epochs'])\n return outputdir",
"def train_on_tasks(config):\n seed = config['seed']\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n tasks = config.pop('tasks')\n\n task_vis_params = config.pop('vis_params')\n\n # all_stats = []\n transfer_matrix = defaultdict(list)\n total_steps = 0\n\n if 'learner' in config:\n learner = config.pop('learner')\n else:\n learner_path = config.pop('learner_path')\n learner = torch.load(learner_path)\n task_level_tuning = config.pop('task_level_tuning')\n if task_level_tuning:\n ray_params = config.pop('ray_params')\n local_mode = config.pop('local_mode')\n redis_address = config.pop('redis_address')\n all_analysis = []\n selected_tags = []\n for t_id, (task, vis_p) in enumerate(zip(tasks, task_vis_params)):\n #todo sync transfer matrix\n static_params = dict(\n t_id=t_id, task=task, tasks=tasks, vis_p=vis_p,\n transfer_matrix=transfer_matrix, total_steps=total_steps\n )\n\n if task_level_tuning:\n if not ray.is_initialized():\n if local_mode:\n ray.init(local_mode=local_mode)\n else:\n ray.init(redis_address,\n log_to_driver=False,\n logging_level=logging.ERROR)\n\n config['static_params'] = static_params\n config['learner_path'] = learner_path\n config['seed'] += t_id\n\n # reporter = CLIReporter(max_progress_rows=10)\n # print(reporter._metric_columns)\n # print(reporter.DEFAULT_COLUMNS)\n # reporter.add_metric_column('avg_acc_val')\n # reporter.add_metric_column('total_params')\n # reporter.add_metric_column('fw_t')\n # reporter.add_metric_column('data_t')\n # reporter.add_metric_column('eval_t')\n # reporter.add_metric_column('epoch_t')\n # reporter.add_metric_column('total_t')\n # ray_params['progress_reporter'] = reporter\n analysis = tune.run(train_t, config=config, **ray_params)\n\n all_analysis.append(analysis)\n\n def get_key(trial):\n # return trial.last_result['avg_acc_val_so_far']\n return trial.last_result['best_val']\n best_trial = max(analysis.trials, key=get_key)\n for trial in analysis.trials:\n if trial != best_trial:\n trial_path = trial.logdir\n shutil.rmtree(trial_path)\n # am = np.argmax(list(map(get_key, analysis.trials)))\n # print(\"BEST IS {}: {}\".format(am, best_trial.last_result['avg_acc_val']))\n\n # t = best_trial.last_result['duration_iterations']\n total_steps = best_trial.last_result['total_steps']\n selected_tags.append(best_trial.experiment_tag)\n best_learner_path = os.path.join(best_trial.logdir, 'learner.pth')\n learner = torch.load(best_learner_path, map_location='cpu')\n shutil.rmtree(best_trial.logdir)\n\n #todo UPDATE LEARNER AND SAVE\n torch.save(learner, learner_path)\n else:\n rescaled, t, metrics, b_state_dict, \\\n stats = train_single_task(config=deepcopy(config), learner=learner,\n **static_params)\n\n # all_stats.append(stats)\n # update_rescaled(list(rescaled.values()), list(rescaled.keys()), tag,\n # g_task_vis, False)\n\n if task_level_tuning:\n return all_analysis, selected_tags\n else:\n save_path = path.join(tune.get_trial_dir(), 'learner.pth')\n logger.info('Saving {} to {}'.format(learner, save_path))\n torch.save(learner, save_path)",
"def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%",
"def main(config):\n config = load_config_from_yaml(args.config_filepath)\n\n cv_folds_dir = config['input']['cv_folds']\n image_dir = config['input']['image_dir']\n image_name = image_dir.strip('/').split('/')[-1]\n output_dir = os.path.join(cv_folds_dir, image_name)\n if not os.path.exists(output_dir):\n print(f'Generating output directory {output_dir}')\n os.makedirs(output_dir)\n\n stratify = config['data'].get('stratify')\n min_samples = config['data'].get('min_samples', 50)\n df_mela = data_utils.load_data(config['input']['train'],\n duplicate_path=config['input'].get('duplicates'),\n cv_folds_dir=config['input'].get('cv_folds'),\n external_filepaths=config['input'].get('external_filepaths'),\n keep_prob=config['default'].get('keep_prob', 1))\n df_mela['image_dir'] = df_mela['source'].map(config['input']['image_map'])\n df_mela['meta_dir'] = df_mela['image_dir'].apply(lambda x: os.path.join(x, 'metadata'))\n\n fold_ids = config['default'].get('fold_ids', list(set(df_mela['fold'].tolist())))\n for fold in fold_ids:\n fold_dir = os.path.join(output_dir, f'fold_{fold}')\n if not os.path.exists(fold_dir):\n os.makedirs(fold_dir)\n df_train = df_mela.loc[(df_mela['fold'] != fold) &\n (df_mela['fold'] != 'test')].reset_index(drop=True)\n\n fold_stats = {}\n if stratify is not None:\n groups = list(product(*[list(df_train[col].unique()) for col in stratify]))\n for group in groups:\n n = len(group)\n for i in range(n):\n col = stratify[i]\n v = group[i]\n if col not in fold_stats.keys():\n fold_stats[col] = {}\n if v not in fold_stats[col].keys():\n df_train = data_utils.fill_na(df_train, col, how='unknown')\n if v in ['missing', 'unknown']:\n # use all samples to compute image statistics for missing values\n df_value = df_train\n else:\n df_value = df_train.loc[df_train[col] == v]\n if len(df_value) < min_samples:\n df_value = df_train\n fold_stats[col][v] = generate_img_stats(df_value,\n name=f'fold {fold} = ({col} == {v})')\n for j in range(i+1, n):\n # subset the train data to samples with multiple conditions\n # need to handle when a value is missing/unknown\n sub_cols = stratify[i:j+1]\n sub_group = group[i:j+1]\n conditions = lambda df: [\n df[sub_cols[i]] == sub_group[i]\n for i\n in range(len(sub_cols))\n if sub_group[i] not in ['missing', 'unknown']\n ]\n df_sub = utils.subset_df(df_train, conditions)\n if len(df_sub) < min_samples:\n df_sub = df_train\n # add the image stats for the subset to our dict\n sub_dict = utils.deep_get(fold_stats, *[v for l in zip(sub_cols[:j], sub_group[:j]) for v in l])\n if sub_cols[j] not in sub_dict.keys():\n sub_dict[sub_cols[j]] = {}\n name = f'fold {fold} - ' + ' & '.join([f'({sub_cols[i]} == {sub_group[i]})' for i in range(len(sub_cols))])\n sub_dict[sub_cols[j]][sub_group[j]] = generate_img_stats(\n df_sub,\n name=name\n )\n\n fold_stats_full = generate_img_stats(df_train,\n name=f'fold {fold}')\n fold_stats = dict(fold_stats, **fold_stats_full)\n\n with open(os.path.join(fold_dir, 'img_stats.json'), 'w') as f:\n json.dump(str(fold_stats), f)\n print(f'Saved to fold {fold} img stats to {fold_dir}')",
"def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc",
"def run(self, df: pd.DataFrame, model: Any):\n print(f'Running cross validation with the following model:\\n{model}')\n\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n\n date_1 = datetime.datetime(year=2016, month=1, day=1)\n date_2 = datetime.datetime(year=2016, month=4, day=1)\n date_3 = datetime.datetime(year=2016, month=7, day=1)\n date_4 = datetime.datetime(year=2016, month=10, day=1)\n date_5 = datetime.datetime(year=2017, month=1, day=1)\n\n summaries: List[FoldSummary] = []\n\n for train_start, train_end, test_start, test_end in [\n (date_1, date_2, date_2, date_3),\n # (date_1, date_3, date_3, date_4),\n # (date_1, date_4, date_4, date_5)\n ]:\n print('Calculating train and test datasets')\n train_df = df[(df['timestamp'] >= train_start) & (df['timestamp'] < train_end)]\n test_df = df[(df['timestamp'] >= test_start) & (df['timestamp'] < test_end)]\n\n columns = list(train_df.columns)\n columns.remove('timestamp')\n columns.remove('meter_reading')\n\n print(columns)\n\n train_data = train_df[columns]\n test_data = test_df[columns]\n\n print(f'Fitting the model on train dataset of size {len(train_data)}')\n model.fit(train_data, train_df['meter_reading'])\n print(f'Predicting for test dataset of size {len(test_data)}')\n predictions = model.predict(test_data)\n\n score = self._calculate_score(predictions, test_df['meter_reading'])\n print(f'Score: {score}')\n\n summaries.append(FoldSummary(\n train_start=train_start,\n train_end=train_end,\n test_start=test_start,\n test_end=test_end,\n score=score\n ))\n\n filename = f'../resources/runs/{time.time()}.txt'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w+') as f:\n f.write(f'{model.__class__.__name__}\\n')\n f.write(f'{str(model.get_params())}\\n')\n for summary in summaries:\n f.write(f'Summary (\\n'\n f'\\ttrain start: {summary.train_start}\\n'\n f'\\ttrain end: {summary.train_end}\\n'\n f'\\ttest start: {summary.test_start}\\n'\n f'\\ttest end: {summary.test_end}\\n'\n f'\\tscore: {summary.score}\\n'\n f')\\n')\n\n print(summaries)\n\n return model",
"def train(path_to_config, verbose=False):\n config, paths, session_id = setup(path_to_config)\n assert isinstance(config, ExperimentConfig)\n logger = logging.getLogger(\"%s.train\" % config.name)\n\n results = []\n\n # Average results over `config.num_runs` runs\n for i in range(config.num_runs):\n logger.info(\"*\" * 80)\n logger.info(\"* %d. run for experiment %s\", (i + 1), config.name)\n logger.info(\"*\" * 80)\n network = Network(config, paths, session_id, i)\n\n network.build()\n num_actual_epochs, stopped_early = network.train(verbose=verbose, log_results_on_dev=True)\n run_results = network.evaluate(data_type=DATA_TYPE_DEV)\n results.append(run_results)\n\n for task_name, result_list in list(run_results.items()):\n assert isinstance(task_name, str)\n assert isinstance(result_list, ResultList)\n # Write a CSV file per task because each task may have different evaluation metrics\n csv_out_path = os.path.join(paths[\"session_out\"], \"session_results.task_%s.csv\" % task_name)\n network.log_result_list_csv(task_name, result_list, csv_out_path, {\n \"# planned epochs\": config.epochs,\n \"# actual epochs\": num_actual_epochs,\n \"stopped early?\": stopped_early,\n \"run\": i + 1\n })\n\n logger.info(\"*\" * 80)\n logger.info(\"\")\n\n # Reset tensorflow variables\n tf.compat.v1.reset_default_graph()\n\n logger.info(\"\")\n logger.info(\"Results after %d runs:\", config.num_runs)\n\n timestamp = time.strftime(\"%Y-%m-%d %H:%M\")\n\n for task in config.tasks:\n task_name = task.name\n task_results = [result[task_name] for result in results]\n # Write a CSV file per task because each task may have different evaluation metrics\n csv_file_path = os.path.join(paths[\"experiment_out\"], \"results.task_%s.csv\" % task_name)\n logger.info(\" - Task %s\", task_name)\n\n headers = [\"timestamp\", \"session_id\", \"num_runs\", \"task_name\"]\n values = [timestamp, session_id, config.num_runs, task_name]\n\n for metric in set(config.eval_metrics + task.eval_metrics):\n metric_values_sum = 0\n\n for result in task_results:\n metric_values_sum += result.compute_metric_by_name(metric)\n\n logger.info(\n \" - Average %s at task %s is %.3f\",\n metric.title(),\n task_name,\n metric_values_sum / float(config.num_runs)\n )\n\n headers += [\"AVG:%s\" % metric.title()]\n values += [metric_values_sum / float(config.num_runs)]\n\n append_to_csv(csv_file_path, headers=headers, values=values)",
"def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")",
"def run_training_and_tests(test_name, dataset, models, n_images = 1000, training_split = 0.7, \n n_training_images = None, n_test_images = None, \n n_iterations = 1, dimensions = (50, 50)):\n\n aggregate_metrics = {}\n\n # Run specified number of iterations\n for i in range(n_iterations):\n print(\"\\nTest iteration\", i+1)\n\n # Handle if specific training and test set size isn't given\n if (n_training_images is None):\n n_training_images = n_images * training_split\n if (n_test_images is None):\n n_test_images = n_images * (1-training_split)\n\n # Load training and test sets from single dataset\n train_data, train_labels, test_data, test_labels = image_utils.read_dataset(\n n_training_images, \n n_test_images, \n './datasets/' + dataset, \n dimensions[0], \n dimensions[1]\n )\n\n # Train and run tests for each model\n for model in models:\n print(\"Working with model '\" + model.label + \"'\")\n\n # Train model\n start = time.time()\n model.train(copy.deepcopy(train_data), train_labels)\n end = time.time()\n training_time = round(end - start, 3)\n\n # Run predictions on test set\n start = time.time()\n predicted = model.run(copy.deepcopy(test_data))\n end = time.time()\n test_time = round(end - start, 3)\n\n # Calculate metrics and store for aggregate calculations\n metrics = Metrics(test_labels, predicted, training_time, test_time)\n if model.label in aggregate_metrics:\n aggregate_metrics[model.label].append(metrics)\n else:\n aggregate_metrics[model.label] = [metrics]\n\n # Print results\n print(\"Results\\n\" + \"------\")\n print(str(metrics))\n\n # Save model\n filepath = \"./test/\" + test_name + \"/\" + model.label + \"/iteration\" + str(i+1) + \"/\"\n print(\"Saving model to '\" + filepath + model.label + \".joblib'\")\n os.makedirs(os.path.dirname(filepath), exist_ok = True)\n with open(filepath + model.label + '.joblib', 'wb') as file:\n dump(model, file)\n\n # Save results\n print(\"Saving results to '\" + filepath + \"results.txt'\\n\")\n with open(filepath + \"results.txt\", 'w') as file:\n file.write(str(metrics))\n\n # Calculate, print and write aggregate metrics\n print(\n 'Aggregate Results' + '\\n' +\n '-----------------'\n )\n for model in models:\n aggregate = combine_metrics(aggregate_metrics[model.label])\n print(model.label)\n print(aggregate)\n filepath = \"./test/\" + test_name + \"/\" + model.label + \"/\"\n print(\"Saving results to '\" + filepath + \"aggregate_results.txt'\" + \"\\n -- -\\n\")\n with open(filepath + \"aggregate_results.txt\", 'w') as file:\n file.write(str(aggregate))",
"def test_veco_xnli(self):\n\n from datasets import load_dataset\n langs = ['en']\n langs_eval = ['en']\n train_datasets = []\n from datasets import DownloadConfig\n dc = DownloadConfig()\n dc.local_files_only = False\n for lang in langs:\n train_datasets.append(\n load_dataset('xnli', lang, split='train', download_config=dc))\n eval_datasets = []\n for lang in langs_eval:\n eval_datasets.append(\n load_dataset(\n 'xnli', lang, split='validation', download_config=dc))\n train_len = sum([len(dataset) for dataset in train_datasets])\n labels = ['0', '1', '2']\n\n def cfg_modify_fn(cfg):\n cfg.task = 'nli'\n cfg['preprocessor'] = {'type': 'nli-tokenizer'}\n cfg['dataset'] = {\n 'train': {\n 'first_sequence': 'premise',\n 'second_sequence': 'hypothesis',\n 'labels': labels,\n 'label': 'label',\n }\n }\n cfg['train'] = {\n 'work_dir':\n '/tmp',\n 'max_epochs':\n 2,\n 'dataloader': {\n 'batch_size_per_gpu': 16,\n 'workers_per_gpu': 0\n },\n 'optimizer': {\n 'type': 'AdamW',\n 'lr': 2e-5,\n 'options': {\n 'cumulative_iters': 8,\n }\n },\n 'lr_scheduler': {\n 'type': 'LinearLR',\n 'start_factor': 1.0,\n 'end_factor': 0.0,\n 'total_iters': int(train_len / 16) * 2,\n 'options': {\n 'by_epoch': False\n }\n },\n 'hooks': [{\n 'type': 'CheckpointHook',\n 'interval': 1,\n }, {\n 'type': 'TextLoggerHook',\n 'interval': 1\n }, {\n 'type': 'IterTimerHook'\n }, {\n 'type': 'EvaluationHook',\n 'by_epoch': False,\n 'interval': 500\n }]\n }\n cfg['evaluation'] = {\n 'dataloader': {\n 'batch_size_per_gpu': 128,\n 'workers_per_gpu': 0,\n 'shuffle': False\n }\n }\n return cfg\n\n self.finetune(\n 'damo/nlp_veco_fill-mask-large',\n train_datasets,\n eval_datasets,\n name=Trainers.nlp_veco_trainer,\n cfg_modify_fn=cfg_modify_fn)",
"def run_experiment ( X, y, model_call, param_grid = None, scoring_func = accuracy,cv = KFoldStratifiedCV ( number_of_folds = 5 ),):\n\n scores = []\n iteration = 0\n # Iterate through the split\n for train, test in cv.split ( y ):\n # If first iteration and k values are passed, get the best one\n if iteration == 0 and param_grid:\n k = choose_k (\n X [ train ], y [ train ], model_call, param_grid, scoring_func, cv = cv )\n logger.info ( f\"Choosing k= { k } \" )\n else:\n # Defaults to 1 for condensed.\n k = 1\n\n iteration += 1\n\n # Instantiate the model with the value of k\n model = model_call ( k = k )\n\n # Standardize the data\n standardizer = Standardizer ( mean = True, std = True )\n\n # Fit the model\n model.fit ( X = standardizer.fit_transform ( X [ train ] ), y = y [ train ] )\n\n # make test set predictions\n y_pred = model.predict ( X = standardizer.transform ( X [ test ] ) )\n\n # Append the score\n scores.append ( scoring_func ( y [ test ], y_pred ) )\n \n logger.info ( f\"Avg Score: { np.mean ( scores ) } \" )\n \n return model\n # End run_experiment()",
"def train(self, config: ConfigurationNode = None):\n if config is None:\n config = self.config\n # Create writable timestamp for easier record keeping\n timestamp = datetime.now().isoformat(sep=\"T\", timespec=\"auto\")\n name_timestamp = timestamp.replace(\":\", \"_\")\n\n # Start the mlflow run:\n mlflow.start_run(run_name=name_timestamp)\n\n # Check valid output path, set path from the path_cfg_override modules respectively\n assert config.OUTPUT_PATH != ''\n path_output = config.OUTPUT_PATH # output folder\n path_train = config.DATASET.TRAIN_DATA_PATH # training data folder\n path_val = config.DATASET.VAL_DATA_PATH # validation data folder\n\n # Make output dir and its parents if not exist.\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Make result folders if they do not exist.\n self.results_dir = (Path(path_output) / name_timestamp)\n if not os.path.exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n # Make backup folders if they do not exist.\n self.backup_dir = os.path.join(self.results_dir, 'model_backups')\n if not os.path.exists(self.backup_dir):\n os.makedirs(self.backup_dir)\n\n writer_tensorboard = SummaryWriter(log_dir=Path(self.results_dir / \"logs_tensorflow\"))\n\n # Now that CFG has been properly merged with new data along the way, time to dump a version of it into a string for trackability purposes.\n config.dump(stream=open(os.path.join(self.results_dir, f'config{name_timestamp}.yaml'), 'w'))\n\n # file path to store the state of the model.\n state_fpath = os.path.join(self.results_dir, f'model{name_timestamp}.pt')\n\n # ????\n perf_path = os.path.join(self.results_dir, f'trace{name_timestamp}.p')\n perf_trace = []\n\n # Load data, create the data loader objects from them.\n data_train = pickle.load(open(path_train, 'rb'))\n data_val = pickle.load(open(path_val, 'rb'))\n self.loader_train = build_data_loader(data_train, config.DATASET, True)\n self.loader_val = build_data_loader(data_val, config.DATASET, False)\n\n # Build the model using configue dict node\n self.model = build_model(config.MODEL)\n\n # Enable parallel multi GPU mode if the config specify it.\n if config.MODEL.PARALLEL:\n print(\"Utilized parallel processing\")\n self.model = torch.nn.DataParallel(self.model)\n\n current_epoch = 0\n\n # For resuming training (i.e. load checkpoint)\n if config.RESUME_PATH != \"\":\n checkpoint = torch.load(config.RESUME_PATH, map_location='cpu')\n current_epoch = checkpoint['epoch']\n self.model.load_state_dict(checkpoint[\"model_state\"])\n _ = self.model.cuda()\n\n # SOLVER EVALUATOR\n cfg_solver = config.MODEL.SOLVER\n\n # Build optimizer (between train/validation, using the solver portion of the configuration.\n optimizer = build_optimizer(self.model, cfg_solver)\n\n # Build evaluator (between train/validation, using the solver portion of the configuration.\n evaluator = build_evaluator(cfg_solver)\n\n evaluator.float().cuda()\n total_epochs = cfg_solver.TOTAL_EPOCHS\n\n\n # Main training epoch loop starts here.\n for epoch in range(current_epoch, total_epochs):\n\n # Train a single epoch\n self.train_epoch(epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard)\n\n mlflow.end_run()",
"def main(_):\n utility.set_up_logging()\n if not FLAGS.config:\n raise KeyError('You must specify a configuration.')\n logdir = FLAGS.logdir and os.path.expanduser(os.path.join(\n FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))\n try:\n config = utility.load_config(logdir)\n except IOError:\n config = tools.AttrDict(getattr(configs, FLAGS.config)())\n config = utility.save_config(config, logdir)\n for score in train(config, FLAGS.env_processes):\n tf.logging.info('Score {}.'.format(score))",
"def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor",
"def main(config_path):\n # load the config file\n cfg = AttrDict.from_json_path(config_path)\n\n # Make Outputs directories\n out_path = os.path.join(cfg.path.output, cfg.exp_name)\n out_path_selfsup = os.path.join(out_path, 'classification_pretrain/')\n out_path_sup = os.path.join(out_path, 'supervised_train/')\n os.makedirs(out_path_selfsup, exist_ok=True)\n for k in range(cfg.Sup.split.n_fold):\n os.makedirs(os.path.join(out_path_sup, f'Fold_{k+1}/pred/'), exist_ok=True)\n\n # Initialize random seed\n if cfg.seed != -1:\n random.seed(cfg.seed)\n np.random.seed(cfg.seed)\n torch.manual_seed(cfg.seed)\n torch.cuda.manual_seed(cfg.seed)\n torch.cuda.manual_seed_all(cfg.seed)\n torch.backends.cudnn.deterministic = True\n\n # Set number of thread\n if cfg.n_thread > 0: torch.set_num_threads(cfg.n_thread)\n # set device\n if cfg.device:\n cfg.device = torch.device(cfg.device)\n else:\n cfg.device = get_available_device()\n\n ###################################################\n # Self-supervised training on Context Restoration #\n ###################################################\n # Initialize Logger\n logger = initialize_logger(os.path.join(out_path_selfsup, 'log.txt'))\n if os.path.exists(os.path.join(out_path_selfsup, f'checkpoint.pt')):\n logger.info('\\n' + '#'*30 + f'\\n Recovering Session \\n' + '#'*30)\n logger.info(f\"Experiment : {cfg.exp_name}\")\n\n # Load RSNA data csv\n df_rsna = pd.read_csv(os.path.join(cfg.path.data.SSL, 'slice_info.csv'), index_col=0)\n\n # Keep only fractions sample\n if cfg.SSL.dataset.n_data_0 >= 0:\n df_rsna_noICH = df_rsna[df_rsna.Hemorrhage == 0].sample(n=cfg.SSL.dataset.n_data_0, random_state=cfg.seed)\n else:\n df_rsna_noICH = df_rsna[df_rsna.Hemorrhage == 0]\n if cfg.SSL.dataset.n_data_1 >= 0:\n df_rsna_ICH = df_rsna[df_rsna.Hemorrhage == 1].sample(n=cfg.SSL.dataset.n_data_1, random_state=cfg.seed)\n else:\n df_rsna_ICH = df_rsna[df_rsna.Hemorrhage == 1]\n df_rsna = pd.concat([df_rsna_ICH, df_rsna_noICH], axis=0)\n\n # Split data to keep few for evaluation in a strafied way\n train_df, test_df = train_test_split(df_rsna, test_size=cfg.SSL.dataset.frac_eval, stratify=df_rsna.Hemorrhage, random_state=cfg.seed)\n logger.info('\\n' + str(get_split_summary_table(df_rsna, train_df, test_df)))\n\n # Make dataset : Train --> BinaryClassification, Test --> BinaryClassification\n train_RSNA_dataset = RSNA_dataset(train_df, cfg.path.data.SSL,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.SSL.dataset.augmentation.train.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size,\n mode='binary_classification')\n test_RSNA_dataset = RSNA_dataset(test_df, cfg.path.data.SSL,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.SSL.dataset.augmentation.eval.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size,\n mode='binary_classification')\n\n logger.info(f\"Data will be loaded from {cfg.path.data.SSL}.\")\n logger.info(f\"CT scans will be windowed on [{cfg.data.win_center-cfg.data.win_width/2} ; {cfg.data.win_center + cfg.data.win_width/2}]\")\n logger.info(f\"CT scans will be resized to {cfg.data.size}x{cfg.data.size}\")\n logger.info(f\"Training online data transformation: \\n\\n {str(train_RSNA_dataset.transform)}\\n\")\n logger.info(f\"Evaluation online data transformation: \\n\\n {str(test_RSNA_dataset.transform)}\\n\")\n\n # Make U-Net-Encoder architecture\n net_ssl = UNet_Encoder(**cfg.SSL.net).to(cfg.device)\n net_params = [f\"--> {k} : {v}\" for k, v in cfg.SSL.net.items()]\n logger.info(\"UNet like Binary Classifier \\n\\t\" + \"\\n\\t\".join(net_params))\n logger.info(f\"The Binary Classifier has {sum(p.numel() for p in net_ssl.parameters())} parameters.\")\n\n # Make Model\n cfg.SSL.train.model_param.lr_scheduler = getattr(torch.optim.lr_scheduler, cfg.SSL.train.model_param.lr_scheduler) # convert scheduler name to scheduler class object\n cfg.SSL.train.model_param.loss_fn = getattr(torch.nn, cfg.SSL.train.model_param.loss_fn) # convert loss_fn name to nn.Module class object\n w_ICH = train_df.Hemorrhage.sum() / len(train_df) # define CE weighting from train dataset\n cfg.SSL.train.model_param.loss_fn_kwargs['weight'] = torch.tensor([1 - w_ICH, w_ICH], device=cfg.device).float() # add weighting to CE kwargs\n\n classifier = BinaryClassifier(net_ssl, device=cfg.device, print_progress=cfg.print_progress, **cfg.SSL.train.model_param)\n\n train_params = [f\"--> {k} : {v}\" for k, v in cfg.SSL.train.model_param.items()]\n logger.info(\"Classifer Training Parameters \\n\\t\" + \"\\n\\t\".join(train_params))\n\n # Load weights if specified\n if cfg.SSL.train.model_path_to_load:\n model_path = cfg.SSL.train.model_path_to_load\n classifier.load_model(model_path, map_location=cfg.device)\n logger.info(f\"Classifer Model succesfully loaded from {cfg.SSL.train.model_path_to_load}\")\n\n # train if needed\n if cfg.SSL.train.model_param.n_epoch > 0:\n classifier.train(train_RSNA_dataset, valid_dataset=test_RSNA_dataset,\n checkpoint_path=os.path.join(out_path_selfsup, f'checkpoint.pt'))\n\n # evaluate\n auc, acc, recall, precision, f1 = classifier.evaluate(test_RSNA_dataset, save_tsne=False, return_scores=True)\n logger.info(f\"Classifier Test AUC : {auc:.2%}\")\n logger.info(f\"Classifier Test Accuracy : {acc:.2%}\")\n logger.info(f\"Classifier Test Recall : {recall:.2%}\")\n logger.info(f\"Classifier Test Precision : {precision:.2%}\")\n logger.info(f\"Classifier Test F1-score : {f1:.2%}\")\n\n # save model, outputs\n classifier.save_model(os.path.join(out_path_selfsup, 'pretrained_unet_enc.pt'))\n logger.info(\"Pre-trained U-Net encoder on binary classification saved at \" + os.path.join(out_path_selfsup, 'pretrained_unet_enc.pt'))\n classifier.save_outputs(os.path.join(out_path_selfsup, 'outputs.json'))\n logger.info(\"Classifier outputs saved at \" + os.path.join(out_path_selfsup, 'outputs.json'))\n test_df.reset_index(drop=True).to_csv(os.path.join(out_path_selfsup, 'eval_data_info.csv'))\n logger.info(\"Evaluation data info saved at \" + os.path.join(out_path_selfsup, 'eval_data_info.csv'))\n\n # delete any checkpoints\n if os.path.exists(os.path.join(out_path_selfsup, f'checkpoint.pt')):\n os.remove(os.path.join(out_path_selfsup, f'checkpoint.pt'))\n logger.info('Checkpoint deleted.')\n\n # get weights state dictionnary\n pretrained_unet_weights = classifier.get_state_dict()\n\n ###################################################################\n # Supervised fine-training of U-Net with K-Fold Cross-Validation #\n ###################################################################\n # load annotated data csv\n data_info_df = pd.read_csv(os.path.join(cfg.path.data.Sup, 'ct_info.csv'), index_col=0)\n patient_df = pd.read_csv(os.path.join(cfg.path.data.Sup, 'patient_info.csv'), index_col=0)\n\n # Make K-Fold spolit at patient level\n skf = StratifiedKFold(n_splits=cfg.Sup.split.n_fold, shuffle=cfg.Sup.split.shuffle, random_state=cfg.seed)\n\n # define scheduler and loss_fn as object\n cfg.Sup.train.model_param.lr_scheduler = getattr(torch.optim.lr_scheduler, cfg.Sup.train.model_param.lr_scheduler) # convert scheduler name to scheduler class object\n cfg.Sup.train.model_param.loss_fn = getattr(src.models.optim.LossFunctions, cfg.Sup.train.model_param.loss_fn) # convert loss_fn name to nn.Module class object\n\n # iterate over folds\n for k, (train_idx, test_idx) in enumerate(skf.split(patient_df.PatientNumber, patient_df.Hemorrhage)):\n # check if fold's results already exists\n if not os.path.exists(os.path.join(out_path_sup, f'Fold_{k+1}/outputs.json')):\n # initialize logger\n logger = initialize_logger(os.path.join(out_path_sup, f'Fold_{k+1}/log.txt'))\n if os.path.exists(os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt')):\n logger.info('\\n' + '#'*30 + f'\\n Recovering Session \\n' + '#'*30)\n logger.info(f\"Experiment : {cfg['exp_name']}\")\n logger.info(f\"Cross-Validation fold {k+1:02}/{cfg['Sup']['split']['n_fold']:02}\")\n\n # extract train/test slice dataframe\n train_df = data_info_df[data_info_df.PatientNumber.isin(patient_df.loc[train_idx,'PatientNumber'].values)]\n test_df = data_info_df[data_info_df.PatientNumber.isin(patient_df.loc[test_idx,'PatientNumber'].values)]\n # samples train dataframe to adjuste negative/positive fractions\n n_remove = int(max(0, len(train_df[train_df.Hemorrhage == 0]) - cfg.Sup.dataset.frac_negative * len(train_df[train_df.Hemorrhage == 1])))\n df_remove = train_df[train_df.Hemorrhage == 0].sample(n=n_remove, random_state=cfg.seed)\n train_df = train_df[~train_df.index.isin(df_remove.index)]\n logger.info('\\n' + str(get_split_summary_table(data_info_df, train_df, test_df)))\n\n # Make datasets\n train_dataset = public_SegICH_Dataset2D(train_df, cfg.path.data.Sup,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.Sup.dataset.augmentation.train.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size)\n test_dataset = public_SegICH_Dataset2D(test_df, cfg.path.data.Sup,\n augmentation_transform=[getattr(tf, tf_name)(**tf_kwargs) for tf_name, tf_kwargs in cfg.Sup.dataset.augmentation.eval.items()],\n window=(cfg.data.win_center, cfg.data.win_width), output_size=cfg.data.size)\n logger.info(f\"Data will be loaded from {cfg.path.data.Sup}.\")\n logger.info(f\"CT scans will be windowed on [{cfg.data.win_center-cfg.data.win_width/2} ; {cfg.data.win_center + cfg.data.win_width/2}]\")\n logger.info(f\"CT scans will be resized to {cfg.data.size}x{cfg.data.size}\")\n logger.info(f\"Training online data transformation: \\n\\n {str(train_dataset.transform)}\\n\")\n logger.info(f\"Evaluation online data transformation: \\n\\n {str(test_dataset.transform)}\\n\")\n\n # Make U-Net architecture\n unet_sup = UNet(**cfg.Sup.net).to(cfg.device)\n net_params = [f\"--> {k} : {v}\" for k, v in cfg.Sup.net.items()]\n logger.info(\"UNet-2D params \\n\\t\" + \"\\n\\t\".join(net_params))\n logger.info(f\"The U-Net2D has {sum(p.numel() for p in unet_sup.parameters())} parameters.\")\n\n # Make Model\n unet2D = UNet2D(unet_sup, device=cfg.device, print_progress=cfg.print_progress, **cfg.Sup.train.model_param)\n\n train_params = [f\"--> {k} : {v}\" for k, v in cfg.Sup.train.model_param.items()]\n logger.info(\"UNet-2D Training Parameters \\n\\t\" + \"\\n\\t\".join(train_params))\n\n # ????? load model if specified ?????\n\n # transfer weights learn with context restoration\n logger.info('Initialize U-Net2D with weights learned with context_restoration on RSNA.')\n unet2D.transfer_weights(pretrained_unet_weights, verbose=True)\n\n # Train U-net\n eval_dataset = test_dataset if cfg.Sup.train.validate_epoch else None\n unet2D.train(train_dataset, valid_dataset=eval_dataset, checkpoint_path=os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt'))\n\n # Evaluate U-Net\n unet2D.evaluate(test_dataset, save_path=os.path.join(out_path_sup, f'Fold_{k+1}/pred/'))\n\n # Save models and outputs\n unet2D.save_model(os.path.join(out_path_sup, f'Fold_{k+1}/trained_unet.pt'))\n logger.info(\"Trained U-Net saved at \" + os.path.join(out_path_sup, f'Fold_{k+1}/trained_unet.pt'))\n unet2D.save_outputs(os.path.join(out_path_sup, f'Fold_{k+1}/outputs.json'))\n logger.info(\"Trained statistics saved at \" + os.path.join(out_path_sup, f'Fold_{k+1}/outputs.json'))\n\n # delete checkpoint if exists\n if os.path.exists(os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt')):\n os.remove(os.path.join(out_path_sup, f'Fold_{k+1}/checkpoint.pt'))\n logger.info('Checkpoint deleted.')\n\n # save mean +/- 1.96 std Dice over Folds\n save_mean_fold_dice(out_path_sup, cfg.Sup.split.n_fold)\n logger.info('Average Scores saved at ' + os.path.join(out_path_sup, 'average_scores.txt'))\n\n # Save all volumes prediction csv\n df_list = [pd.read_csv(os.path.join(out_path_sup, f'Fold_{i+1}/pred/volume_prediction_scores.csv')) for i in range(cfg.Sup.split.n_fold)]\n all_df = pd.concat(df_list, axis=0).reset_index(drop=True)\n all_df.to_csv(os.path.join(out_path_sup, 'all_volume_prediction.csv'))\n logger.info('CSV of all volumes prediction saved at ' + os.path.join(out_path_sup, 'all_volume_prediction.csv'))\n\n # Save config file\n cfg.device = str(cfg.device)\n cfg.SSL.train.model_param.lr_scheduler = str(cfg.SSL.train.model_param.lr_scheduler)\n cfg.Sup.train.model_param.lr_scheduler = str(cfg.Sup.train.model_param.lr_scheduler)\n cfg.SSL.train.model_param.loss_fn = str(cfg.SSL.train.model_param.loss_fn)\n cfg.Sup.train.model_param.loss_fn = str(cfg.Sup.train.model_param.loss_fn)\n cfg.SSL.train.model_param.loss_fn_kwargs.weight = cfg.SSL.train.model_param.loss_fn_kwargs.weight.cpu().data.tolist()\n with open(os.path.join(out_path, 'config.json'), 'w') as fp:\n json.dump(cfg, fp)\n logger.info('Config file saved at ' + os.path.join(out_path, 'config.json'))\n\n # Analyse results\n analyse_supervised_exp(out_path_sup, cfg.path.data.Sup, n_fold=cfg.Sup.split.n_fold,\n config_folder=out_path, save_fn=os.path.join(out_path, 'results_supervised_overview.pdf'))\n logger.info('Results overview figure saved at ' + os.path.join(out_path, 'results_supervised_overview.pdf'))\n analyse_representation_exp(out_path_selfsup, save_fn=os.path.join(out_path, 'results_self-supervised_overview.pdf'))\n logger.info('Results overview figure saved at ' + os.path.join(out_path, 'results_self-supervised_overview.pdf'))",
"def test(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n \r\n dataset.set_split('test')\r\n batch_generator = generate_nmt_batches(dataset, \r\n batch_size=len(dataset), \r\n device=args.device)\r\n\r\n acc_sum = 0.0\r\n model.eval()\r\n \r\n for batch_index, batch_dict in enumerate(batch_generator):\r\n # step 1. compute the output\r\n if isinstance(model,NMTModelWithMLTM):\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_mltm_vector'],\r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n else:\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n\r\n acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index)\r\n acc_sum += acc_t\r\n \r\n return acc_sum / (batch_index+1)",
"def test_run(self):\n sut = ExperimentEmail()\n train = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n val = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n outdir = tempfile.mkdtemp()\n\n # Act\n sut.run(train, val, outdir, batch_size=32, epochs=2)",
"def train(self, training_data, cfg, **kwargs):\n pass",
"def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))",
"def fewshot_eval_model(experiment_name, task_name, mt, eval_data, batch_size, \n k=0, random_seed=0, n=None, prompt_data=None, \n instructions=None, answers=None, template_id=0, cot_reasons=None,\n max_decode_steps=128, extract_answers=None,\n trigger_phrase=None,\n print_examples=0, print_all_wrong=False):\n # argument checks\n if k > 0 and prompt_data is None: \n assert len(prompt_data) >= 1, f\"need to provide prompt data of at least len {k}\"\n # define stats\n n_correct = 0\n n_str_em = 0\n n_datapoints = 0\n all_preds = []\n all_labels = []\n # task specific info\n task_name_to_hendrycks_em_group_by = {\n 'commonsense': 1,\n 'deontology': 4,\n 'justice': 4,\n 'utilitarianism': 1,\n 'virtue': 1, # we treat as multiple choice\n 'trolley' : 1,\n 'factual' : 1,\n 'counterfact' : 1,\n }\n if 'virtue' in task_name:\n assert answers is None, \"do not use answers with virtue subset\"\n if answers and not extract_answers:\n extract_answers = answers\n # subsample eval data if requested\n if n is not None:\n eval_data_loop = eval_data.sample(n=n, random_state=random_seed, replace=False)\n else:\n eval_data_loop = eval_data\n # begin eval loop\n # calculate query batch size based on if len(inputs) * len(answers) can fit in BATCH_SIZE query to model\n effective_batch_size = batch_size if not answers else batch_size // len(extract_answers)\n n_chunks = np.ceil(len(eval_data_loop) / effective_batch_size)\n for batch_num, batch in enumerate(np.array_split(eval_data_loop, n_chunks)):\n if batch_num > 0:\n running_acc = n_correct / n_datapoints \n check_answers = extract_answers if answers is None else answers\n prop_invalid_preds = compute_prop_invalid_preds(all_preds, check_answers)\n start = '\\r' # '\\n' if batch_num < 3 else \n print(f\"{start}Batch {batch_num-1} | Acc: {100*running_acc:.2f} | Invalid: {100*prop_invalid_preds:.2f}\", end=\"\")\n # make inputs and labels:\n query_inputs = []\n for test_input in batch.input:\n query_input = format_prompt_from_df(prompt_data, test_input, answers=answers, instructions=instructions, cot_reasons=cot_reasons, separator='\\n', template_id=template_id)\n query_inputs.append(query_input)\n labels = batch.label_str\n # make multiple choice answers for virtue\n if 'virtue' in task_name:\n answers = []\n for answer_list in batch.answers:\n answers.append(answer_list.split(','))\n answers = np.array(answers)\n # query model. query inputs may be editing when doing chain_of_thought multiple choice\n with torch.no_grad():\n preds, scores, query_inputs = predict_model(mt, \n query_inputs, \n answers, \n trigger_phrase=trigger_phrase, \n max_decode_steps=max_decode_steps)\n # record stats\n # first case is when we are generating predictions and extracting answers from them\n if answers is None and extract_answers is not None:\n batch_n_correct, correct_vec = first_appearance_fewshot_accuracy_sum(preds, labels, \n extract_answers=extract_answers, \n trigger_phrase=trigger_phrase,\n return_vec=True)\n else:\n batch_n_correct, correct_vec = fewshot_accuracy_sum(preds, labels, return_vec=True)\n n_correct += batch_n_correct\n n_str_em += em_accuracy_sum(preds, labels)\n n_datapoints += len(batch)\n all_preds.extend(list(preds))\n all_labels.extend(list(labels))\n if (print_examples>0 and batch_num == 0):\n print_idx = np.arange(min(print_examples, len(batch)))\n elif print_all_wrong:\n print_idx = np.argwhere(1-correct_vec).reshape(-1)\n else:\n print_idx = np.array([])\n if len(print_idx) > 0:\n print(f\"\\nExamples from batch {batch_num}...\")\n print(\"--------\")\n for i in print_idx:\n print(f\"Example {i}\")\n print(f\"point: \\n{batch.input.iloc[i]}\")\n print(f\"prompt: \\n{query_inputs[i]}\")\n print(\"pred: \", preds[i])\n print(\"label: \", labels.iloc[i])\n if isinstance(answers, np.ndarray):\n print(\"anwers: \", answers[i])\n print(\"exact scores: \", scores[i])\n print(\"correct: \", correct_vec[i])\n if 'completion' in batch.columns:\n print(\"gpt completion: \", batch.completion.iloc[i])\n print(\"--------\")\n print(f\"Examples acc: {correct_vec[print_idx].mean():.2f}\")\n print(\"--------\\n\")\n del batch, preds, labels, scores\n # calculate EM from Hendrycks et al paper\n group_by = task_name_to_hendrycks_em_group_by[task_name]\n hendrycks_em = get_hendrycks_em(all_preds, all_labels, answers, group_by)\n # make df with results\n results_dict = {\n 'exp_name' : experiment_name,\n 'task_name' : task_name,\n 'k' : k,\n 'n' : n,\n 'seed' : random_seed,\n 'acc' : n_correct / n_datapoints,\n 'acc_em' : n_str_em / n_datapoints,\n 'hendrycks_em': hendrycks_em,\n 'prop_invalid': compute_prop_invalid_preds(all_preds, answers)\n }\n results = pd.DataFrame.from_dict({k : [v] for k,v in results_dict.items()})\n print(\"\\nRESULTS:\")\n for k,v in results_dict.items():\n if any([x in k for x in ['acc', 'em', 'prop']]):\n v = f\"{100*v:.2f}\"\n print(f\" {k}: {str(v):10s}\")\n return results"
]
| [
"0.7001025",
"0.6034314",
"0.6033947",
"0.5972015",
"0.5899449",
"0.58167803",
"0.5807292",
"0.57877874",
"0.576847",
"0.57546633",
"0.5752527",
"0.57516176",
"0.5747905",
"0.57385457",
"0.57250816",
"0.57234114",
"0.5702539",
"0.5689327",
"0.5673698",
"0.5648171",
"0.56365484",
"0.56198615",
"0.5616211",
"0.56073177",
"0.56049967",
"0.5602834",
"0.5602525",
"0.5596302",
"0.55843216",
"0.55613554"
]
| 0.6170723 | 1 |
Get Sign Documents This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def app_get_sign_documents_post(self, request, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.app_get_sign_documents_post_with_http_info(request, **kwargs)
else:
(data) = self.app_get_sign_documents_post_with_http_info(request, **kwargs)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app_get_sign_documents_post_with_http_info(self, request, **kwargs):\n\n all_params = ['request']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method app_get_sign_documents_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'request' is set\n if ('request' not in params) or (params['request'] is None):\n raise ValueError(\"Missing the required parameter `request` when calling `app_get_sign_documents_post`\")\n\n\n collection_formats = {}\n\n resource_path = '/App/GetSignDocuments'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'request' in params:\n body_params = params['request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/xml'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='GetSignDocumentsResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def app_sign_documents_post_with_http_info(self, request, **kwargs):\n\n all_params = ['request']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method app_sign_documents_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'request' is set\n if ('request' not in params) or (params['request'] is None):\n raise ValueError(\"Missing the required parameter `request` when calling `app_sign_documents_post`\")\n\n\n collection_formats = {}\n\n resource_path = '/App/SignDocuments'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'request' in params:\n body_params = params['request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/xml'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='SignDocumentsResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def app_sign_documents_post(self, request, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.app_sign_documents_post_with_http_info(request, **kwargs)\n else:\n (data) = self.app_sign_documents_post_with_http_info(request, **kwargs)\n return data",
"def get_documents(self):\n documents = self.tree.execute(\"$.documents\")\n for doc in documents:\n sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}\n self.document_dict[doc['@id']] = {'sentences': sentences,\n 'location': doc['location']}\n return",
"def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]",
"def getDocument(cred, documentPath):\n url = cred.base_url + \"documents/\" + documentPath\n\n return makeRequest(cred, url, 'GET')",
"def get_documents(self, project_id, param=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' \n response = zoho_http_client.get(url, self.details, param)\n return parser.get_documents(response)",
"def retrieve_document(self, format):\n self._validate_extension(format.upper(), self.ALLOWED_DOCUMENT_EXT)\n return self.client.service.RetrieveDocument(format=format.upper()).decode('base64')",
"def get_all_signing_certs(self, marker=None, max_items=None,\r\n user_name=None):\r\n params = {}\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('ListSigningCertificates',\r\n params, list_marker='Certificates')",
"def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))",
"def getDocuments(self):\n return self.objectValues('Multifile')",
"def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]",
"def retrieveDocuments(self):\n documents = {}\n for reuter in self.REUTERS_FILES:\n print(reuter)\n reuter_stream = open(self.BASEPATH + reuter, encoding=\"latin-1\")\n reuter_content = reuter_stream.read()\n soup = BeautifulSoup(reuter_content, \"html.parser\")\n articles = soup.find_all('reuters')\n for article in articles:\n body = \"\"\n title = \"\"\n words = \"\"\n newid = article['newid']\n if not article.title is None:\n title = article.title.string\n if not article.body is None:\n body = article.body.string\n words = title + \" \" + body\n documents[newid] = words\n print(f\"Retrieval Complete! - Total Documents: {len(documents)}\")\n return documents",
"def get_document_list(\n self,\n project_id: int,\n url_parameters: dict = {}\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/docs{url_parameters}'.format(\n project_id=project_id,\n url_parameters=self.build_url_parameter(url_parameters)\n )\n )",
"def get_document(self, *args, **kwargs):\n return self._documents_manager.get_document(*args, **kwargs)",
"def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p",
"def getDocumentAll(self, query = {}, keys = []):\n query = query or {}\n if \"include_docs\" not in query:\n query[\"include_docs\"] = True\n\n if not keys:\n return self.client.get(self.name +\"/_all_docs\", query)\n else:\n return self.client.post(self.name +\"/_all_docs\", query,\n {\"keys\": keys}).getBodyData()",
"def gist_documents(self, username, max_docs=None):\n r = self.requests_session.get(self.gist_path.format(username=username))\n if r.status_code != 200:\n self.log(f\"Couldn't get gists for {username}\", \"ERROR\")\n return\n\n docs_fetched = 0\n for d in r.json():\n\n docs_fetched += 1\n yield d\n \n if docs_fetched >= self.items_per_page:\n # this will only print once \n # TODO pagination\n msg = (f\"TODO pagination not enabled so gists by user:{username} might have be \"\n f\"skipped as they have written more than {self.items_per_page} gists.\"\n )\n self.log(msg, \"WARNING\")\n\n if max_docs is not None and docs_fetched > max_docs:\n return",
"def get_documents(doc_type):\n doc_type = 1 if doc_type == 'registration' else 2\n return Documents.query.filter_by(type=doc_type).all()",
"def get_documents(self, engine_name, document_ids):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(document_ids)\n return self.swiftype_session.request('get', endpoint, data=data)",
"def authentication_document(self):\n return Response(\n self.manager.authentication_for_opds_document,\n 200,\n {\n \"Content-Type\": AuthenticationForOPDSDocument.MEDIA_TYPE\n }\n )",
"def build_documents(self, *args, **kwargs):\n docs_manager = self._documents_manager\n resources_tree = self._resources_manager.resources_tree\n return docs_manager.build_documents(resources_tree, *args, **kwargs)",
"def getMyDocuments( self, REQUEST=None ):\n membership = getToolByName( self, 'portal_membership', None )\n if membership is None:\n return\n\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n\n total_objects, documents = self.searchResults( type='documents', with_limit=1, REQUEST=REQUEST, \\\n creator=uname, implements='isHTMLDocument', sort_on='created', sort_order='reverse', \\\n sort_limit=50 )\n\n return ( total_objects, documents, )",
"def get_signing_redirect(template_num):\n\n # Create the signer recipient model\n signer = Signer( # The signer\n email = signer_email, name = signer_name, recipient_id = \"1\",\n client_user_id = client_user_id, role_name = \"doctor\"# Setting the client_user_id marks the signer as embedded\n )\n\n # Create the server and inline templates for the envelope\n\n server_template1 = ServerTemplate(\n sequence = \"1\",\n template_id = template_ids[0]\n )\n\n server_template2 = ServerTemplate(\n sequence = \"2\",\n template_id = template_ids[1]\n )\n\n inline_template = InlineTemplate(\n sequence = \"3\",\n recipients = Recipients(signers = [signer])\n )\n\n # Create the composite template\n composite_template1 = CompositeTemplate(\n inline_templates = [inline_template],\n server_templates = [server_template1]\n )\n\n composite_template2 = CompositeTemplate(\n inline_templates = [inline_template],\n server_templates = [server_template2]\n )\n\n composite_templates = [composite_template1] if template_num == 0 else [composite_template2] if template_num == 1 else [composite_template1, composite_template2]\n\n # Next, create the top level envelope definition and populate it.\n envelope_definition = EnvelopeDefinition(\n email_subject = \"Please sign this document sent from the Python SDK\",\n composite_templates = composite_templates,\n status = \"sent\" # requests that the envelope be created and sent.\n )\n\n #\n # Step 2. Create/send the envelope.\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.create_envelope(account_id, envelope_definition=envelope_definition)\n\n #\n # Step 3. The envelope has been created.\n # Request a Recipient View URL (the Signing Ceremony URL)\n #\n redirect_url = base_url + '/signing_complete' if template_num == 0 else base_url + '/report_complete'\n envelope_id = results.envelope_id\n recipient_view_request = RecipientViewRequest(\n authentication_method = authentication_method, client_user_id = client_user_id,\n recipient_id = '1', return_url = redirect_url,\n user_name = signer_name, email = signer_email\n )\n\n results = envelope_api.create_recipient_view(account_id, envelope_id,\n recipient_view_request = recipient_view_request)\n\n #\n # Step 4. The Recipient View URL (the Signing Ceremony URL) has been received.\n # Redirect the user's browser to it.\n #\n return results.url",
"def get_documents(self, index, **kwargs):\n return self._build_search(index, **kwargs).params(request_timeout=2000).scan()",
"def getDocument(self, *args):\n return _libsbml.SBMLValidator_getDocument(self, *args)",
"def documents(self):\r\n return doc.Documents(self)",
"async def get_document(self):\n try:\n document = await self.request.json()\n except json.JSONDecodeError:\n document = {}\n\n return document",
"def getDocument(self, *args):\n return _libsbml.SBMLConverter_getDocument(self, *args)",
"def documents(self) -> list[str]:\n return self._documents"
]
| [
"0.681581",
"0.6148953",
"0.5785778",
"0.54764587",
"0.5414924",
"0.534441",
"0.5274601",
"0.5208355",
"0.5207844",
"0.5171612",
"0.51489496",
"0.50897735",
"0.5016967",
"0.49052817",
"0.489447",
"0.488639",
"0.48830014",
"0.48812646",
"0.48810694",
"0.48215222",
"0.48051473",
"0.47768903",
"0.47729856",
"0.4739622",
"0.47355708",
"0.47177836",
"0.46817932",
"0.46770525",
"0.4665682",
"0.46423653"
]
| 0.73364204 | 0 |
Get Sign Documents This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def app_get_sign_documents_post_with_http_info(self, request, **kwargs):
all_params = ['request']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method app_get_sign_documents_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'request' is set
if ('request' not in params) or (params['request'] is None):
raise ValueError("Missing the required parameter `request` when calling `app_get_sign_documents_post`")
collection_formats = {}
resource_path = '/App/GetSignDocuments'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in params:
body_params = params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/xml'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSignDocumentsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app_get_sign_documents_post(self, request, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.app_get_sign_documents_post_with_http_info(request, **kwargs)\n else:\n (data) = self.app_get_sign_documents_post_with_http_info(request, **kwargs)\n return data",
"def app_sign_documents_post_with_http_info(self, request, **kwargs):\n\n all_params = ['request']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method app_sign_documents_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'request' is set\n if ('request' not in params) or (params['request'] is None):\n raise ValueError(\"Missing the required parameter `request` when calling `app_sign_documents_post`\")\n\n\n collection_formats = {}\n\n resource_path = '/App/SignDocuments'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'request' in params:\n body_params = params['request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/xml'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='SignDocumentsResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def app_sign_documents_post(self, request, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.app_sign_documents_post_with_http_info(request, **kwargs)\n else:\n (data) = self.app_sign_documents_post_with_http_info(request, **kwargs)\n return data",
"def get_documents(self):\n documents = self.tree.execute(\"$.documents\")\n for doc in documents:\n sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}\n self.document_dict[doc['@id']] = {'sentences': sentences,\n 'location': doc['location']}\n return",
"def Documents(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('documents', default)\n return [HEP.DocumentObject(i) for i in tmp]",
"def getDocument(cred, documentPath):\n url = cred.base_url + \"documents/\" + documentPath\n\n return makeRequest(cred, url, 'GET')",
"def get_documents(self, project_id, param=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' \n response = zoho_http_client.get(url, self.details, param)\n return parser.get_documents(response)",
"def retrieve_document(self, format):\n self._validate_extension(format.upper(), self.ALLOWED_DOCUMENT_EXT)\n return self.client.service.RetrieveDocument(format=format.upper()).decode('base64')",
"def get_all_signing_certs(self, marker=None, max_items=None,\r\n user_name=None):\r\n params = {}\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('ListSigningCertificates',\r\n params, list_marker='Certificates')",
"def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))",
"def getDocuments(self):\n return self.objectValues('Multifile')",
"def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]",
"def retrieveDocuments(self):\n documents = {}\n for reuter in self.REUTERS_FILES:\n print(reuter)\n reuter_stream = open(self.BASEPATH + reuter, encoding=\"latin-1\")\n reuter_content = reuter_stream.read()\n soup = BeautifulSoup(reuter_content, \"html.parser\")\n articles = soup.find_all('reuters')\n for article in articles:\n body = \"\"\n title = \"\"\n words = \"\"\n newid = article['newid']\n if not article.title is None:\n title = article.title.string\n if not article.body is None:\n body = article.body.string\n words = title + \" \" + body\n documents[newid] = words\n print(f\"Retrieval Complete! - Total Documents: {len(documents)}\")\n return documents",
"def get_document_list(\n self,\n project_id: int,\n url_parameters: dict = {}\n ) -> requests.models.Response:\n return self.get(\n 'v1/projects/{project_id}/docs{url_parameters}'.format(\n project_id=project_id,\n url_parameters=self.build_url_parameter(url_parameters)\n )\n )",
"def get_document(self, *args, **kwargs):\n return self._documents_manager.get_document(*args, **kwargs)",
"def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p",
"def getDocumentAll(self, query = {}, keys = []):\n query = query or {}\n if \"include_docs\" not in query:\n query[\"include_docs\"] = True\n\n if not keys:\n return self.client.get(self.name +\"/_all_docs\", query)\n else:\n return self.client.post(self.name +\"/_all_docs\", query,\n {\"keys\": keys}).getBodyData()",
"def get_documents(doc_type):\n doc_type = 1 if doc_type == 'registration' else 2\n return Documents.query.filter_by(type=doc_type).all()",
"def gist_documents(self, username, max_docs=None):\n r = self.requests_session.get(self.gist_path.format(username=username))\n if r.status_code != 200:\n self.log(f\"Couldn't get gists for {username}\", \"ERROR\")\n return\n\n docs_fetched = 0\n for d in r.json():\n\n docs_fetched += 1\n yield d\n \n if docs_fetched >= self.items_per_page:\n # this will only print once \n # TODO pagination\n msg = (f\"TODO pagination not enabled so gists by user:{username} might have be \"\n f\"skipped as they have written more than {self.items_per_page} gists.\"\n )\n self.log(msg, \"WARNING\")\n\n if max_docs is not None and docs_fetched > max_docs:\n return",
"def get_documents(self, engine_name, document_ids):\n endpoint = \"engines/{}/documents\".format(engine_name)\n data = json.dumps(document_ids)\n return self.swiftype_session.request('get', endpoint, data=data)",
"def authentication_document(self):\n return Response(\n self.manager.authentication_for_opds_document,\n 200,\n {\n \"Content-Type\": AuthenticationForOPDSDocument.MEDIA_TYPE\n }\n )",
"def build_documents(self, *args, **kwargs):\n docs_manager = self._documents_manager\n resources_tree = self._resources_manager.resources_tree\n return docs_manager.build_documents(resources_tree, *args, **kwargs)",
"def getMyDocuments( self, REQUEST=None ):\n membership = getToolByName( self, 'portal_membership', None )\n if membership is None:\n return\n\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n\n total_objects, documents = self.searchResults( type='documents', with_limit=1, REQUEST=REQUEST, \\\n creator=uname, implements='isHTMLDocument', sort_on='created', sort_order='reverse', \\\n sort_limit=50 )\n\n return ( total_objects, documents, )",
"def get_signing_redirect(template_num):\n\n # Create the signer recipient model\n signer = Signer( # The signer\n email = signer_email, name = signer_name, recipient_id = \"1\",\n client_user_id = client_user_id, role_name = \"doctor\"# Setting the client_user_id marks the signer as embedded\n )\n\n # Create the server and inline templates for the envelope\n\n server_template1 = ServerTemplate(\n sequence = \"1\",\n template_id = template_ids[0]\n )\n\n server_template2 = ServerTemplate(\n sequence = \"2\",\n template_id = template_ids[1]\n )\n\n inline_template = InlineTemplate(\n sequence = \"3\",\n recipients = Recipients(signers = [signer])\n )\n\n # Create the composite template\n composite_template1 = CompositeTemplate(\n inline_templates = [inline_template],\n server_templates = [server_template1]\n )\n\n composite_template2 = CompositeTemplate(\n inline_templates = [inline_template],\n server_templates = [server_template2]\n )\n\n composite_templates = [composite_template1] if template_num == 0 else [composite_template2] if template_num == 1 else [composite_template1, composite_template2]\n\n # Next, create the top level envelope definition and populate it.\n envelope_definition = EnvelopeDefinition(\n email_subject = \"Please sign this document sent from the Python SDK\",\n composite_templates = composite_templates,\n status = \"sent\" # requests that the envelope be created and sent.\n )\n\n #\n # Step 2. Create/send the envelope.\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.create_envelope(account_id, envelope_definition=envelope_definition)\n\n #\n # Step 3. The envelope has been created.\n # Request a Recipient View URL (the Signing Ceremony URL)\n #\n redirect_url = base_url + '/signing_complete' if template_num == 0 else base_url + '/report_complete'\n envelope_id = results.envelope_id\n recipient_view_request = RecipientViewRequest(\n authentication_method = authentication_method, client_user_id = client_user_id,\n recipient_id = '1', return_url = redirect_url,\n user_name = signer_name, email = signer_email\n )\n\n results = envelope_api.create_recipient_view(account_id, envelope_id,\n recipient_view_request = recipient_view_request)\n\n #\n # Step 4. The Recipient View URL (the Signing Ceremony URL) has been received.\n # Redirect the user's browser to it.\n #\n return results.url",
"def get_documents(self, index, **kwargs):\n return self._build_search(index, **kwargs).params(request_timeout=2000).scan()",
"def getDocument(self, *args):\n return _libsbml.SBMLValidator_getDocument(self, *args)",
"def documents(self):\r\n return doc.Documents(self)",
"async def get_document(self):\n try:\n document = await self.request.json()\n except json.JSONDecodeError:\n document = {}\n\n return document",
"def getDocument(self, *args):\n return _libsbml.SBMLConverter_getDocument(self, *args)",
"def documents(self) -> list[str]:\n return self._documents"
]
| [
"0.7337725",
"0.615188",
"0.5788486",
"0.5480119",
"0.54177696",
"0.5345478",
"0.52754647",
"0.52073777",
"0.5207363",
"0.51747423",
"0.5151259",
"0.5091837",
"0.5017626",
"0.49071118",
"0.4895874",
"0.48886582",
"0.48839498",
"0.4883136",
"0.48830694",
"0.48236075",
"0.4807431",
"0.4779615",
"0.47757605",
"0.47402617",
"0.47384867",
"0.47190282",
"0.4685125",
"0.46781805",
"0.46672288",
"0.46460032"
]
| 0.6817782 | 1 |
Sign Documents This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def app_sign_documents_post(self, request, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.app_sign_documents_post_with_http_info(request, **kwargs)
else:
(data) = self.app_sign_documents_post_with_http_info(request, **kwargs)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def app_get_sign_documents_post(self, request, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.app_get_sign_documents_post_with_http_info(request, **kwargs)\n else:\n (data) = self.app_get_sign_documents_post_with_http_info(request, **kwargs)\n return data",
"def app_sign_documents_post_with_http_info(self, request, **kwargs):\n\n all_params = ['request']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method app_sign_documents_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'request' is set\n if ('request' not in params) or (params['request'] is None):\n raise ValueError(\"Missing the required parameter `request` when calling `app_sign_documents_post`\")\n\n\n collection_formats = {}\n\n resource_path = '/App/SignDocuments'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'request' in params:\n body_params = params['request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/xml'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='SignDocumentsResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def app_get_sign_documents_post_with_http_info(self, request, **kwargs):\n\n all_params = ['request']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method app_get_sign_documents_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'request' is set\n if ('request' not in params) or (params['request'] is None):\n raise ValueError(\"Missing the required parameter `request` when calling `app_get_sign_documents_post`\")\n\n\n collection_formats = {}\n\n resource_path = '/App/GetSignDocuments'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'request' in params:\n body_params = params['request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/xml'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='GetSignDocumentsResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def _sign_document(self):\n return False",
"def embedded_signing_ceremony():\n \n if ds_token_ok(3):\n account_id = session['ds_account_id']\n base_path = session['ds_base_path']\n access_token = session['ds_access_token']\n\n with open(os.path.join(APP_PATH, file_name_path), \"rb\") as file:\n content_bytes = file.read()\n base64_file_content = base64.b64encode(content_bytes).decode('ascii')\n\n # Create the document model\n document = Document( # create the DocuSign document object\n document_base64=base64_file_content,\n name='Example document', # can be different from actual file name\n file_extension='pdf', # many different document types are accepted\n document_id=1 # a label used to reference the doc\n )\n\n # Create the signer recipient model\n signer = Signer( # The signer\n email=signer_email, name=signer_name, recipient_id=\"1\", routing_order=\"1\",\n client_user_id=client_user_id, # Setting the client_user_id marks the signer as embedded\n )\n\n # Create a sign_here tab (field on the document)\n sign_here = SignHere( # DocuSign SignHere field/tab\n document_id='1', page_number='2', recipient_id='1', tab_label='SignHereTab',\n x_position='100', y_position='250')\n\n # Add the tabs model (including the sign_here tab) to the signer\n signer.tabs = Tabs(sign_here_tabs=[sign_here]) # The Tabs object wants arrays of the different field/tab types\n\n # Next, create the top level envelope definition and populate it.\n envelope_definition = EnvelopeDefinition(\n email_subject=\"Please sign this document sent from the Python SDK\",\n documents=[document], # The order in the docs array determines the order in the envelope\n recipients=Recipients(signers=[signer]), # The Recipients object wants arrays for each recipient type\n status=\"sent\" # requests that the envelope be created and sent.\n )\n\n #\n # Step 2. Create/send the envelope.\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.create_envelope(account_id, envelope_definition=envelope_definition)\n\n #\n # Step 3. The envelope has been created.\n # Request a Recipient View URL (the Signing Ceremony URL)\n #\n envelope_id = results.envelope_id\n recipient_view_request = RecipientViewRequest(\n authentication_method=authentication_method, client_user_id=client_user_id,\n recipient_id='1', return_url=base_url + '/dsreturn',\n user_name=signer_name, email=signer_email\n )\n\n results = envelope_api.create_recipient_view(account_id, envelope_id,\n recipient_view_request=recipient_view_request)\n\n #\n # Step 4. The Recipient View URL (the Signing Ceremony URL) has been received.\n # Redirect the user's browser to it.\n #\n return redirect(results.url, code=302)\n else:\n session['eg'] = url_for('resign')\n return redirect(url_for('ds_login'))",
"def sign(self, body, external_aad, private_key):",
"def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)",
"def sign(self, *signers: Keypair) -> None:\n underlying_signers = [signer.to_solders() for signer in signers]\n self._solders.sign(underlying_signers, self._solders.message.recent_blockhash)",
"def add_sign(self):\n if self.is_signed():\n self.remove_sign()\n \n data = self._document.read()\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n hash_value = encrypted[-16:]\n self._document.write(self._seperator.encode() + hash_value + self._seperator.encode())\n print(\"The document is signed!\")",
"def sign(self, payload):\n raise NotImplementedError",
"def get_signing_redirect(template_num):\n\n # Create the signer recipient model\n signer = Signer( # The signer\n email = signer_email, name = signer_name, recipient_id = \"1\",\n client_user_id = client_user_id, role_name = \"doctor\"# Setting the client_user_id marks the signer as embedded\n )\n\n # Create the server and inline templates for the envelope\n\n server_template1 = ServerTemplate(\n sequence = \"1\",\n template_id = template_ids[0]\n )\n\n server_template2 = ServerTemplate(\n sequence = \"2\",\n template_id = template_ids[1]\n )\n\n inline_template = InlineTemplate(\n sequence = \"3\",\n recipients = Recipients(signers = [signer])\n )\n\n # Create the composite template\n composite_template1 = CompositeTemplate(\n inline_templates = [inline_template],\n server_templates = [server_template1]\n )\n\n composite_template2 = CompositeTemplate(\n inline_templates = [inline_template],\n server_templates = [server_template2]\n )\n\n composite_templates = [composite_template1] if template_num == 0 else [composite_template2] if template_num == 1 else [composite_template1, composite_template2]\n\n # Next, create the top level envelope definition and populate it.\n envelope_definition = EnvelopeDefinition(\n email_subject = \"Please sign this document sent from the Python SDK\",\n composite_templates = composite_templates,\n status = \"sent\" # requests that the envelope be created and sent.\n )\n\n #\n # Step 2. Create/send the envelope.\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.create_envelope(account_id, envelope_definition=envelope_definition)\n\n #\n # Step 3. The envelope has been created.\n # Request a Recipient View URL (the Signing Ceremony URL)\n #\n redirect_url = base_url + '/signing_complete' if template_num == 0 else base_url + '/report_complete'\n envelope_id = results.envelope_id\n recipient_view_request = RecipientViewRequest(\n authentication_method = authentication_method, client_user_id = client_user_id,\n recipient_id = '1', return_url = redirect_url,\n user_name = signer_name, email = signer_email\n )\n\n results = envelope_api.create_recipient_view(account_id, envelope_id,\n recipient_view_request = recipient_view_request)\n\n #\n # Step 4. The Recipient View URL (the Signing Ceremony URL) has been received.\n # Redirect the user's browser to it.\n #\n return results.url",
"def sign(self, ser, pubs=None, verfers=None, indexed=True, indices=None):\n signers = []\n\n if pubs is None and verfers is None:\n raise ValueError(\"pubs or verfers required\")\n\n if pubs:\n for pub in pubs:\n verfer = coring.Verfer(qb64=pub) # needed to know if nontrans\n raw = self.keeper.getPri(key=pub)\n if raw is None:\n raise ValueError(\"Missing prikey in db for pubkey={}\".format(pub))\n signer = coring.Signer(qb64b=bytes(raw),\n transferable=verfer.transferable)\n signers.append(signer)\n\n else:\n for verfer in verfers:\n pub = verfer.qb64\n raw = self.keeper.getPri(key=pub)\n if raw is None:\n raise ValueError(\"Missing prikey in db for pubkey={}\".format(pub))\n signer = coring.Signer(qb64b=bytes(raw),\n transferable=verfer.transferable)\n signers.append(signer)\n\n if indices and len(indices) != len(signers):\n raise ValueError(\"Mismatch length indices={} and resultant signers \"\n \"list={}\".format(len(indices), len(signers)))\n\n if indexed or indices:\n sigers = []\n for i, signer in enumerate(signers):\n if indices:\n i = indices[i] # get index from indices\n sigers.append(signer.sign(ser, index=i)) # assigns .verfer to siger\n return sigers\n else:\n cigars = []\n for signer in signers:\n cigars.append(signer.sign(ser)) # assigns .verfer to cigar\n return cigars",
"def sign(cls, upload, location=None):\n path = \"uploader/sign/%s\" % upload[\"id\"]\n kwargs = {\"md5\": upload[\"md5\"], \"location\": location}\n try:\n return Backend.put(path, kwargs, headers=Backend.headers())\n except requests.HTTPError as err:\n if err.response.status_code == 410:\n LOGGER.warning(\"Cannot Touch file %s. Already finished \\\n (not active) (410)\", upload[\"id\"])\n raise err\n except:\n raise",
"def upload(self, documents: List[Document], vectorise_func) -> None:\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n # Update document class conveniently\n if issubclass(type(documents[0]), ChunkedDocument):\n self._doc_class = ChunkedDocument\n\n for batch in batches:\n vectorise_func(batch, self)\n self.documents += batch",
"def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")",
"def client_upload_to_doc_service(self, doc_to_upload):\n return self.doc_service_client.upload(file_name, create_file(doc_to_upload), 'application/yaml',\n headers={'X-Client-Request-ID': doc_to_upload[\"meta\"][\"id\"]})",
"def sign_partial(self, *partial_signers: Keypair) -> None:\n underlying_signers = [signer.to_solders() for signer in partial_signers]\n self._solders.partial_sign(underlying_signers, self._solders.message.recent_blockhash)",
"def sign_blob(blob, deadline=None):\n # app_identity.sign_blob is producing RSA+SHA256 signature. Sadly, it isn't\n # documented anywhere. But it should be relatively stable since this API is\n # used by OAuth2 libraries (and so changing signature method may break a lot\n # of stuff).\n return app_identity.sign_blob(blob, deadline)",
"def sign_command(args):\n if args.files:\n die(\"Unexpected positional arguments\")\n\n # Load certificate request\n if not args.request:\n die(\"Need --request\")\n subject_csr = load_req(args.request)\n\n reset_info = None\n if args.reset:\n reset_info = info_from_args(args)\n\n # Load CA info\n if not args.ca_info:\n die(\"Need --ca-info\")\n if args.ca_info.endswith('.csr'):\n issuer_obj = load_req(args.ca_info)\n else:\n issuer_obj = load_cert(args.ca_info)\n\n # Load CA private key\n issuer_key = load_key(args.ca_key, load_password(args.password_file))\n if not same_pubkey(issuer_key, issuer_obj):\n die(\"--ca-private-key does not match --ca-info data\")\n\n # Certificate generation\n cert = do_sign(subject_csr, issuer_obj, issuer_key, args.days, args.path_length, args.request, reset_info=reset_info)\n\n # Write certificate\n do_output(cert_to_pem(cert), args, 'x509')",
"def document_upload():\n form = SourceTextForm()\n if form.validate_on_submit():\n user = current_user\n\n doc = {}\n doc[\"file\"] = form.filename.data\n doc[\"author\"] = form.author.data\n doc[\"title\"] = form.title.data\n doc[\"language\"] = form.language.data\n\n params = {}\n params[\"email\"] = user.email\n params[\"new_page\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"PAGE_LIMIT\"]\n params[\"line_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"LINE_SIZE\"]\n params[\"early_cutoff\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"EARLY_CUTOFF\"]\n params[\"batch_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"BATCH_SIZE\"]\n params[\"tokenizer\"] = current_app.config[\"TOKENIZER\"].select(doc[\"language\"])\n params[\"resource\"] = create_book\n doc_uploader = DocumentUploader(params)\n \n could_upload = True\n try:\n doc_uploader.upload(doc)\n except Exception as e:\n traceback.print_exc()\n could_upload = False\n error_msg = \"Error uploading document. Please try again.\"\n flash(error_msg)\n\n if could_upload:\n success_msg = \"Document successfully uploaded.\"\n flash(success_msg)\n\n return render_template('content_management/document_upload.html', form=form)",
"def embedded_signing_ceremony():\n\n #\n # Step 1. The envelope definition is created.\n # One signHere tab is added.\n # The document path supplied is relative to the working directory\n #\n with open(os.path.join(APP_PATH, file_name_path), \"rb\") as file:\n content_bytes = file.read()\n base64_file_content = base64.b64encode(content_bytes).decode('ascii')\n\n # Create the document model\n document = Document( # create the DocuSign document object\n document_base64 = base64_file_content,\n name = 'Example document', # can be different from actual file name\n file_extension = 'pdf', # many different document types are accepted\n document_id = 1 # a label used to reference the doc\n )\n\n # Create the signer recipient model\n signer = Signer( # The signer\n email = signer_email, name = signer_name, recipient_id = \"1\", routing_order = \"1\",\n client_user_id = client_user_id, # Setting the client_user_id marks the signer as embedded\n )\n\n # Create a sign_here tab (field on the document)\n sign_here = SignHere( # DocuSign SignHere field/tab\n document_id = '1', page_number = '1', recipient_id = '1', tab_label = 'SignHereTab',\n x_position = '195', y_position = '147')\n\n # Add the tabs model (including the sign_here tab) to the signer\n signer.tabs = Tabs(sign_here_tabs = [sign_here]) # The Tabs object wants arrays of the different field/tab types\n\n # Next, create the top level envelope definition and populate it.\n envelope_definition = EnvelopeDefinition(\n email_subject = \"Please sign this document sent from the Python SDK\",\n documents = [document], # The order in the docs array determines the order in the envelope\n recipients = Recipients(signers = [signer]), # The Recipients object wants arrays for each recipient type\n status = \"sent\" # requests that the envelope be created and sent.\n )\n\n #\n # Step 2. Create/send the envelope.\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.create_envelope(account_id, envelope_definition=envelope_definition)\n\n #\n # Step 3. The envelope has been created.\n # Request a Recipient View URL (the Signing Ceremony URL)\n #\n envelope_id = results.envelope_id\n recipient_view_request = RecipientViewRequest(\n authentication_method = authentication_method, client_user_id = client_user_id,\n recipient_id = '1', return_url = base_url + '/dsreturn',\n user_name = signer_name, email = signer_email\n )\n\n results = envelope_api.create_recipient_view(account_id, envelope_id,\n recipient_view_request = recipient_view_request)\n\n #\n # Step 4. The Recipient View URL (the Signing Ceremony URL) has been received.\n # Redirect the user's browser to it.\n #\n return results.url",
"def sign(self):\n daskD.wait(self.client.map(_call_sign, self.vecDask, pure=False))\n return self",
"def setSignPDF(self, keystoreURL, keyAlias, keystorePassword, keystoreType, signingMode):\n self.PDFreactorConfiguration.in1[\"signPdfKeystoreURL\"] = keystoreURL\n self.PDFreactorConfiguration.in1[\"signPdfKeyAlias\"] = keyAlias\n self.PDFreactorConfiguration.in1[\"signPdfKeystorePassword\"] = keystorePassword\n self.PDFreactorConfiguration.in1[\"signPdfKeystoreType\"] = keystoreType\n self.PDFreactorConfiguration.in1[\"signPdfSigningMode\"] = signingMode",
"def store_documents(self, documents: list):\n results = app.Results()\n entries = [\n { \n 'Id': str(uuid1()),\n 'MessageBody': json.dumps(doc)\n }\n for doc in documents\n ]\n ids = [ e['Id'] for e in entries ]\n self.Logger.info(f'Store {ids} in sqs')\n self.Logger.debug(f'Saving {entries} in sqs {self.sqs_queue_url}')\n self.sqs_client.send_message_batch(\n QueueUrl=self.sqs_queue_url,\n Entries=entries\n )\n results.ActionStatus = 0\n results.Results = ids\n return results",
"def sign_dylibs(self, signer, path):\n for dylib_path in glob.glob(join(path, '*.dylib')):\n dylib = signable.Dylib(self, dylib_path, signer)\n dylib.sign(self, signer)",
"def sign(self, bytes):\r\n if not self.hasPrivateKey():\r\n raise AssertionError()\r\n paddedBytes = self._addPKCS1Padding(bytes, 1)\r\n m = bytesToNumber(paddedBytes)\r\n if m >= self.n:\r\n raise ValueError()\r\n c = self._rawPrivateKeyOp(m)\r\n sigBytes = numberToByteArray(c, numBytes(self.n))\r\n return sigBytes",
"def sign_data(data):\n\n rv = \"\"\n\n for i in signing_keys:\n sk = ecdsa.SigningKey.from_der(i)\n\n if sk is not None and sk.verifying_key is not None:\n sig = sk.sign(data)\n rv += encode_line(\"signature\", sk.verifying_key.to_der(), sig)\n\n return rv",
"def remove_sign(self):\n if self.is_signed():\n file_size = os.stat(self._file_name).st_size\n self._document.truncate(file_size - self._append_size)\n print(\"Sign removed from the document!\")\n else:\n print(\"The document is not signed!\")",
"def get_signed(self, **payload):\n param = ''\n for k in payload:\n param += '&' + k + '=' + str(payload[k])\n param = param.lstrip('&')\n signature = hmac.new(self.secret, param, digestmod=hashlib.sha256).hexdigest()\n\n return signature",
"def signed(self, encoded):\n signature = self.sign(encoded)\n return encoded + signature"
]
| [
"0.72554654",
"0.6946287",
"0.674169",
"0.58869725",
"0.54398656",
"0.5326935",
"0.52467406",
"0.5143745",
"0.50698924",
"0.4994678",
"0.49314767",
"0.49239925",
"0.48445404",
"0.4831794",
"0.48213002",
"0.48093095",
"0.48047242",
"0.47751486",
"0.47599822",
"0.47585905",
"0.47372657",
"0.46898705",
"0.4684847",
"0.46281606",
"0.45278162",
"0.45166957",
"0.45105597",
"0.45083398",
"0.45009503",
"0.44927135"
]
| 0.7130099 | 1 |
Prettyprint a matrix() 18x18 covariance. | def print_covariance(P):
def b(string):
"""
Turns a given string blue.
"""
return "\033[94m{0}\033[0m".format(string)
out = " "
# Print out header with state variables
for var in STATE_VARS:
out += "{0:9s} ".format(var)
# Print out correlation / covariance matrix
for row in range(18):
out += "\n{0:3s} ".format(STATE_VARS[row])
for col in range(18):
# Print correlations on lower diagnal
if col < row:
out += "{0: 2.2f}, ".format(float(P[row,col]/(sqrt(P[row,row]) * sqrt(P[col,col]) )))
# Highlight variances in blue
elif row == col:
out += "{0: 2.2e}, ".format(float(P[row,col]))
else:
out += "{0: 2.2e}, ".format(float(P[row,col]))
out += "\n"
print out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))",
"def information_matrix(self):\n return self._cov.inv()",
"def pretty_print(self):\r\n out = \"\"\r\n\r\n rows,cols = self.matrix.shape\r\n\r\n for row in xrange(0,rows):\r\n out += \"[\"\r\n\r\n for col in xrange(0,cols):\r\n out += \"%+0.2f \"%self.matrix[row][col]\r\n out += \"]\\n\"\r\n\r\n return out",
"def print_latex_cov(P):\n out = ''\n # Print out header with state variables\n for var in STATE_VARS:\n out += \" & ${0:9s}$ \".format(var)\n\n out += '\\\\\\\\ \\n'\n\n\n # Print out correlation / covariance matrix \n for row in range(18):\n out += \"${0:3s}$ \".format(STATE_VARS[row])\n for col in range(18):\n # Print correlations on lower diagnal\n if col < row:\n out += \" & {0: 2.2f} \".format(float(P[row,col]/(sqrt(P[row,row]) * sqrt(P[col,col]) )))\n # Highlight variances in blue\n elif row == col:\n out += \" & {0: 2.2e} \".format(float(P[row,col]))\n else:\n out += \"& {0: 2.2e} \".format(float(P[row,col]))\n out += '\\\\\\\\ \\n'\n\n print out",
"def __repr__(self):\n return repr(self.matrix)",
"def _print_matrix(self):\n print(self.matrix)",
"def getCovarianceNoiseMatrix(self):\n return np.dot ( self.getB().T, self.getB() )",
"def covariance_matrix(self):\n\n self._order_observations()\n self.cov_matrix = self._compute_covariance_matrix(\n self.list_observations, self.list_observations)\n\n self.cov_matrix += np.diag(np.array([self.noise] * self.n_observation))\n\n return self.cov_matrix",
"def __repr__(self) -> str:\n\t\treturn \",\".join(\"\".join(str(n) for n in m) for m in self.matrix)",
"def __str__(self) -> str:\n\t\treturn f\"dim {self.dimM},{self.dimN}\" +\"\\n\" \\\n\t\t\t+ \"\\n\".join(\"\".join(str(n) for n in m) for m in self.matrix)",
"def print_matrix(M):\n print(\"printing Matrix\")\n for row in M:\n for val in row:\n print'{:3}'.format(val),\n print",
"def _matrixToStr(self, name, mat):\n r = []\n r.append(\"\\n\" + name)\n for i in range(len(self.answer['a priori state vector'])):\n r.append(\", \".join([\"{0:=+10.4g}\".format(float(v)) \n for v in mat[:, i]]))\n return \"\\n\".join(r)",
"def matrixToString(matrix):\n nRows = len(matrix)\n if nRows == 0:\n return '[0,0](())'\n nCols = len(matrix[0])\n string = '[%d,%d](' % (nRows, nCols)\n for r in range(nRows):\n string += '('\n for c in range(nCols):\n string += str(float(matrix[r][c]))\n if c != nCols - 1:\n string += ','\n string += ')'\n if r != nRows - 1:\n string += ','\n string += ')'\n return string",
"def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data",
"def print_matrix(matrix):\n\n print(result_is)\n max_len = max((len(str(round(n))) for row in matrix for n in row))\n cell_pattern = \"{{:{pos}.{part}f}}\"\\\n .format(pos=max_len + max_decimals + 2, part=max_decimals)\n for row in matrix:\n row_gen = (cell_pattern.format(cell) for cell in row)\n print(*row_gen)",
"def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat),mat)/(nPts-1)",
"def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat), mat) / (nPts - 1)",
"def correlation_matrix(self):\n correlation_matrix = self.model.covariance.copy()\n sigmaD = np.sqrt(np.diag(correlation_matrix))\n for ii in range(correlation_matrix.shape[0]):\n for jj in range(correlation_matrix.shape[1]):\n correlation_matrix[ii, jj] /= sigmaD[ii] * sigmaD[jj]\n return correlation_matrix",
"def _get_variance_covariance_table(self):\n\n # variance-covariance matrix\n res = self._model.fit()\n X = self._model.exog\n x_prime_x_inverse = np.linalg.inv(np.matmul(X.transpose(), X))\n var_cov_matrix = res.mse_resid * x_prime_x_inverse\n var_cov_table = SimpleTable(data=var_cov_matrix,\n headers=self._model.exog_names,\n stubs=self._model.exog_names,\n title='Variance-covariance matrix')\n\n return var_cov_table",
"def __str__(self):\n return str(self.asMatrix())",
"def __repr__(self):\n return self.matrix and '\\n'.join([\"|%s|\" % s for s in [' '.join([\"%-6.3f\" % e for e in w]) for w in self.matrix]]) or \"<pusta macierz>\"",
"def print_matrix(matrix):\n [print(*line) for line in matrix]",
"def print_matrix(matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n print(matrix[i][j], end='\\t')\n print('')",
"def getCovarianceMatrix(self):\n #ypost = np.dot ( self.getA().T, self.priorX )\n\n theta = np.mat ( self.getA() )\n Xm = np.mat ( self.priorX )\n\n ypost = Xm * theta\n yprior = self.priorY\n error = ypost - yprior\n #error = error - np.mean ( error, axis = 0 )\n return np.dot ( error.T, error )",
"def print_matrix(M, decimals=3):\n for row in M:\n print([round(x,decimals)+0 for x in row])",
"def PrintMatrix(self):\n # loop through the rows\n for i in range(self.rows):\n # intialise the matrix\n mat = []\n # loop through the column\n for j in range(self.cols):\n # append matrix element\n mat.append(self.matrix[i][j])\n # print the matrix\n print(mat)",
"def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov",
"def _pretty_print_2d_array(rows):\n s = [[str(e) for e in row] for row in rows]\n lens = [max(map(len, col)) for col in zip(*s)]\n fmt = \"\\t\".join(\"{{:{}}}\".format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n return \"\\n\" + \"\\n\".join(table)",
"def print_matrix(\n mat, # input matrix\n var_str):\n #---------------------------------------------------------------------#\n # Looping over columns and rows #\n #---------------------------------------------------------------------#\n if mat.shape[1] is not False:\n out = '' # initialize string\n for I in range(0, mat.shape[0]):\n for J in range(0, mat.shape[1]):\n out += '%12.5f' %(mat[I,J])\n out += '\\n'\n if mat.shape[1] is False:\n for I in range(0, mat.shape[0]):\n out += '%12.5f' %(mat[I])\n out += '\\n'\n print(var_str)\n print(out)",
"def format_matrix(x):\n return ' '.join([format_vector(y) for y in x])"
]
| [
"0.6498037",
"0.64375573",
"0.64108604",
"0.64056873",
"0.6357736",
"0.6317185",
"0.6234524",
"0.6215412",
"0.62113374",
"0.6162891",
"0.61263704",
"0.6123183",
"0.60731006",
"0.6032474",
"0.59799695",
"0.5935359",
"0.59276396",
"0.5920301",
"0.5918287",
"0.5914103",
"0.59120667",
"0.5858037",
"0.58471805",
"0.5820046",
"0.57913935",
"0.57750785",
"0.577216",
"0.5758615",
"0.57187027",
"0.5718092"
]
| 0.6838222 | 0 |
Compute the Fisher information. | def _compute_fisher_information(result: 'MaximumLikelihoodAmplitudeEstimationResult',
num_sum_terms: Optional[int] = None,
observed: bool = False) -> float:
a = result.estimation
# Corresponding angle to the value a (only use real part of 'a')
theta_a = np.arcsin(np.sqrt(np.real(a)))
# Get the number of hits (shots_k) and one-hits (h_k)
one_hits = result.good_counts
all_hits = [result.shots] * len(one_hits)
# Include all sum terms or just up to a certain term?
evaluation_schedule = result.evaluation_schedule
if num_sum_terms is not None:
evaluation_schedule = evaluation_schedule[:num_sum_terms]
# not necessary since zip goes as far as shortest list:
# all_hits = all_hits[:num_sum_terms]
# one_hits = one_hits[:num_sum_terms]
# Compute the Fisher information
if observed:
# Note, that the observed Fisher information is very unreliable in this algorithm!
d_loglik = 0
for shots_k, h_k, m_k in zip(all_hits, one_hits, evaluation_schedule):
tan = np.tan((2 * m_k + 1) * theta_a)
d_loglik += (2 * m_k + 1) * (h_k / tan + (shots_k - h_k) * tan)
d_loglik /= np.sqrt(a * (1 - a))
fisher_information = d_loglik ** 2 / len(all_hits)
else:
fisher_information = sum(shots_k * (2 * m_k + 1)**2
for shots_k, m_k in zip(all_hits, evaluation_schedule))
fisher_information /= a * (1 - a)
return fisher_information | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _getFisher(self):\n Ctot = self.vd.getGP().getCovar()\n Ki = sp.linalg.inv(Ctot.K())\n n_scales = self.vd.getNumberScales()\n out = sp.zeros((n_scales, n_scales))\n for m in range(n_scales):\n out[m, m] = 0.5 * sp.trace(\n sp.dot(Ki,\n sp.dot(\n Ctot.Kgrad_param(m), sp.dot(Ki, Ctot.Kgrad_param(\n m)))))\n for n in range(m):\n out[m, n] = 0.5 * sp.trace(\n sp.dot(Ki,\n sp.dot(\n Ctot.Kgrad_param(m),\n sp.dot(Ki, Ctot.Kgrad_param(n)))))\n out[n, m] = out[m, n]\n return out",
"def calculate_fisher_information(self,\n x_filename,\n n_events=1):\n\n if self.model is None:\n raise ValueError('No model -- train or load model before evaluating it!')\n\n # Load training data\n logging.info('Loading evaluation data')\n xs = load_and_check(x_filename)\n n_samples = xs.shape[0]\n\n # Estimate scores\n if self.method in ['sally', 'sallino']:\n logging.info('Starting score evaluation')\n\n t_hats = evaluate_local_score_model(\n model=self.model,\n xs=xs\n )\n else:\n raise NotImplementedError('Fisher information calculation only implemented for SALLY estimators')\n\n # Calculate Fisher information\n n_parameters = t_hats.shape[1]\n fisher_information = np.zeros((n_parameters, n_parameters))\n for t_hat in t_hats:\n fisher_information += np.outer(t_hat, t_hat)\n fisher_information = float(n_events) / float(n_samples) * fisher_information\n\n # Calculate expected score\n expected_score = np.mean(t_hats, axis=0)\n logging.info('Expected score (should be close to zero): %s', expected_score)\n\n return fisher_information",
"def test_fisher(self):\r\n self.assertFloatEqual(fisher([0.073, 0.086, 0.10, 0.080, 0.060]),\r\n 0.0045957946540917905)",
"def fisherExact(table):\n a,b = table[0]\n c,d = table[1]\n return binomialCoefficient(a+b,a)*binomialCoefficient(c+d,c)/binomialCoefficient(a+b+c+d,a+c)",
"def fisher(_x,_y,P):\n \n x, y = np.meshgrid(np.linspace(-1,1,100),np.linspace(-1,1,100))\n x = x.flatten()\n y = y.flatten()\n\n A, sig, x0, y0, B = P\n r = (x - x0)**2 + (y - y0)**2\n\n f = np.exp(-0.5*r/sig**2)\n d0 = f\n d1 = r/sig**3 * f\n d2 = A * (x - x0)/sig**2 * f \n d3 = A * (y - y0)/sig**2 * f\n d4 = np.ones(f.size)\n derivs = [d0, d1, d2,d3, d4]\n F = np.zeros((len(derivs), len(derivs)))\n for i in range(len(derivs)):\n for j in range(len(derivs)):\n F[i,j] = np.sum(derivs[i]*derivs[j])\n return F",
"def test_fisher_alpha(self):\n c = array([4,3,4,0,1,0,2])\n obs = fisher_alpha(c)\n self.assertFloatEqual(obs, 2.7823795367398798)",
"def fishers(x):\n return sts.combine_pvalues(x.astype(np.float), method='fisher')",
"def get_fisher_set(self,include_priors=True):\n result = np.zeros(3,dtype=object)\n if include_priors:\n result[0] = self.get_fisher(f_spec_g,f_return_sw_par)\n result[1] = self.get_fisher(f_spec_no_mit,f_return_sw_par)\n if self.do_mit:\n result[2] = self.get_fisher(f_spec_mit,f_return_sw_par)\n else:\n result[2] = result[1]\n else:\n result[0] = self.get_fisher(f_spec_g_noprior,f_return_sw_par)\n result[1] = self.get_fisher(f_spec_no_mit_noprior,f_return_sw_par)\n if self.do_mit:\n result[2] = self.get_fisher(f_spec_mit_noprior,f_return_sw_par)\n else:\n result[2] = result[1]\n return result",
"def log_fisher(self):\n raise NotImplementedError(\"the log_fisher property should \"\n \"be defined in the Estimator sub-class\")",
"def information(self, fdist):\n freq = fdist.get(self.string)\n if not freq:\n freq = 0\n return 1 - (log(freq + 1) / log(fdist.N() + 1))",
"def fisher(params, log_prob_func=None, jitter=None, normalizing_const=1., softabs_const=1e6, metric=Metric.HESSIAN):\n\n log_prob = log_prob_func(params)\n if util.has_nan_or_inf(log_prob):\n print('Invalid log_prob: {}, params: {}'.format(log_prob, params))\n raise util.LogProbError()\n if metric == Metric.JACOBIAN_DIAG:\n # raise NotImplementedError()\n # import pdb; pdb.set_trace()\n jac = util.jacobian(log_prob, params, create_graph=True, return_inputs=False)\n jac = torch.cat([j.flatten() for j in jac])\n # util.flatten(jac).view(1,-1)\n fish = torch.matmul(jac.view(-1,1),jac.view(1,-1)).diag().diag()#/ normalizing_const #.diag().diag() / normalizing_const\n else:\n hess = torch.autograd.functional.hessian(log_prob_func, params, create_graph=True)\n fish = - hess #/ normalizing_const\n if util.has_nan_or_inf(fish):\n print('Invalid hessian: {}, params: {}'.format(fish, params))\n raise util.LogProbError()\n if jitter is not None:\n params_n_elements = fish.shape[0]\n fish += (torch.eye(params_n_elements) * torch.rand(params_n_elements) * jitter).to(fish.device)\n if (metric is Metric.HESSIAN) or (metric is Metric.JACOBIAN_DIAG):\n return fish, None\n elif metric == Metric.SOFTABS:\n eigenvalues, eigenvectors = torch.linalg.eigh(fish, UPLO='L')\n abs_eigenvalues = (1./torch.tanh(softabs_const * eigenvalues)) * eigenvalues\n fish = torch.matmul(eigenvectors, torch.matmul(abs_eigenvalues.diag(), eigenvectors.t()))\n return fish, abs_eigenvalues\n else:\n # if metric == Metric.JACOBIAN:\n # jac = jacobian(log_prob, params, create_graph=True)\n # fish = torch.matmul(jac.t(),jac) / normalizing_const\n raise ValueError('Unknown metric: {}'.format(metric))",
"def test_fishers_vec():\n a, b, c, d = _gen_rand_abcd()\n n = len(a)\n \n for alt in ['two-sided', 'less', 'greater']:\n ORs, pvalues = fishersapi.fishers_vec(a, b, c, d, alternative=alt)\n scipy_pvalues, scipy_ORs = np.zeros(n), np.zeros(n)\n for i in range(n):\n scipy_ORs[i], scipy_pvalues[i] = stats.fisher_exact([[a[i], b[i]], [c[i], d[i]]], alternative=alt)\n npt.assert_allclose(ORs, scipy_ORs, rtol=1e-4)\n npt.assert_allclose(pvalues, scipy_pvalues, rtol=1e-4)",
"def compute_statistics(self):",
"def generateStats(self):\n\t\tn = float(self.n)\n\t\tm = float(self.m)\n\t\tk = float(self.k)\n\t\tp_fp = math.pow(1.0 - math.exp(-(k*n)/m), k)\n\t\tprint \"Probability of false positives: \", p_fp\n\t\tprint \"Predicted false positive rate: \", p_fp * 100.0\n\t\tprint \"Number of elements entered in filter: \", n\n\t\tprint \"Number of bits in filter: \", m\n\t\tprint \"Number of hashes in filter: \", k",
"def get_titanic_fea(dataset):\n dataset['Name_length'] = dataset['Name'].apply(len)\n\n # Mapping Sex 不在map定义的 就是NaN\n dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n dataset['Has_Cabin'] = dataset['Cabin'].apply(lambda x: 0 if type(x) == float else 1)\n dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1\n\n dataset['IsAlone'] = 0\n dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1\n\n # [Embarked]\n dataset['Embarked'] = dataset['Embarked'].fillna('0')\n dataset['Fare'] = dataset['Fare'].fillna(0)\n # Mapping Embarked\n dataset['Embarked'] = dataset['Embarked'].map({'0': 0, 'S': 1, 'C': 2, 'Q': 3}).astype(int)\n\n # [Fare]\n dataset['CategoricalFare'] = pd.qcut(dataset['Fare'], 4)\n # Mapping Fare\n dataset.loc[dataset['Fare'] <= 7.91, 'Fare'] = 0\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\n dataset.loc[dataset['Fare'] > 31, 'Fare'] = 3\n dataset['Fare'] = dataset['Fare'].astype(int)\n\n # [Age]\n age_avg = dataset['Age'].mean()\n age_std = dataset['Age'].std()\n age_null_count = dataset['Age'].isnull().sum()\n age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)\n dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list\n dataset['Age'] = dataset['Age'].astype(int)\n dataset['CategoricalAge'] = pd.cut(dataset['Age'], 5)\n # Mapping Age\n dataset.loc[dataset['Age'] <= 16, 'Age'] = 0\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\n dataset.loc[dataset['Age'] > 64, 'Age'] = 4\n\n # [Name]\n # 称谓 Mr 、Miss 等\n def get_title(name):\n title_search = re.search(' ([A-Za-z]+)\\.', name)\n # If the title exists, extract and return it.\n if title_search:\n return title_search.group(1)\n return \"\"\n dataset['Title'] = dataset['Name'].apply(get_title)\n\n # 只保留4类Title\n dataset['Title'] = dataset['Title'].replace(\n ['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\n # Mapping titles\n title_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\n dataset['Title'] = dataset['Title'].map(title_mapping)\n dataset['Title'] = dataset['Title'].fillna(0)\n\n # Feature selection\n drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']\n dataset = dataset.drop(drop_elements, axis=1)\n dataset = dataset.drop(['CategoricalAge', 'CategoricalFare'], axis=1)\n\n return dataset",
"def _compute_empirical_ffreq(self):\n empirical_ffreq = dict().fromkeys(self.features)\n for feature in self.features:\n empirical_ffreq[feature] = sum(\n [self.indicator_func((_feature, feature))\n for _x, _y in self.train_set\n for _feature in _x]\n )\n return empirical_ffreq",
"def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n return mi",
"def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n\n vi = h1 + h2 - 2 * mi\n return vi",
"def _compute_empirical_fcfreq(self):\n empirical_fcfreq = dict().fromkeys(self.fcs)\n\n for feature, cls in self.fcs:\n empirical_fcfreq[feature, cls] = sum(\n [self.indicator_func((_feature, feature), (_y, cls))\n for _x, _y in self.train_set\n for _feature in _x]\n )\n\n return empirical_fcfreq",
"def compute(\n self,\n ) -> Dict[str, Tensor]:\n results = torch.stack((self.tp, self.fp, self.tn, self.fn), dim=1)\n\n return {f\"group_{i}\": group / group.sum() for i, group in enumerate(results)}",
"def compute_feature_properties(self):\n\n self.valuecounts = {}\n self.unique_values = {}\n self.missing_ratios = {}\n self.counts = {}\n self.codemaps = {}\n for f in self.features:\n # Compute various things\n all_values = [self.data[l].get(f,\"?\") for l in self.data]\n missing_data_ratio = all_values.count(\"?\") / (1.0*len(all_values))\n non_q_values = [v for v in all_values if v != \"?\"]\n counts = {}\n for v in non_q_values:\n counts[v] = non_q_values.count(v)\n unique_values = list(set(non_q_values))\n # Sort unique_values carefully.\n # Possibly all feature values are numeric strings, e.g. \"1\", \"2\", \"3\".\n # If we sort these as strings then we get weird things like \"10\" < \"2\".\n # This can actually matter for things like ordinal models.\n # So convert these to ints first...\n if all([v.isdigit() for v in unique_values]):\n unique_values = list(map(int, unique_values))\n unique_values.sort()\n unique_values = list(map(str, unique_values))\n # ...otherwise, just sort normally\n else:\n unique_values.sort()\n self.unique_values[f] = unique_values\n\n N = len(unique_values)\n self.valuecounts[f] = N\n self.missing_ratios[f] = missing_data_ratio\n self.counts[f] = counts\n self.codemaps[f] = self.build_codemap(unique_values)",
"def _update_info_and_n(self, y_i, h_tilde, phi_p, msr_cov):\n h_i = np.matmul(h_tilde, phi_p)\n # update fisher_info\n L = np.matmul(np.transpose(h_i), np.matmul(msr_cov, h_i)) # placeholder matrix for computations\n self.fisher_info.append(np.add(self.fisher_info[-1], L))\n # update N\n M = np.matmul(np.transpose(h_i), np.matmul(msr_cov, np.transpose(y_i))) #placeholder matrix for computations\n self.N.append(np.add(self.N[-1], M))",
"def test_calculate_fisher_bounds(self):\n\n _bounds = calculate_fisher_bounds(0.03548, 0.00005721408, 0.9)\n self.assertAlmostEqual(_bounds[0], 0.02699778)\n self.assertAlmostEqual(_bounds[1], 0.04662719)",
"def fisher(probs):\r\n stat = -2 * log(array(probs)).sum()\r\n if isnan(stat):\r\n return nan\r\n else:\r\n try:\r\n return chi_high(stat, 2 * len(probs))\r\n except OverflowError as e:\r\n return nan",
"def run(self) -> Dict[str, Union[float, str]]:\n try:\n self.is_run = True\n deque(self, maxlen=0) # feed the entire iterator into a zero-length deque\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n finally:\n self.is_run = False\n\n return info",
"def build_fisher_contingency_table(overlap_count, user_count, gene_count, count):\n table = np.zeros(shape=(2, 2))\n table[0, 0] = overlap_count\n table[0, 1] = user_count - overlap_count\n table[1, 0] = gene_count - overlap_count\n table[1, 1] = count - user_count - gene_count + overlap_count\n\n return table",
"def quality_fis(self,fis):\n correct = 0\n count = 0\n for cl_state in self.classes:\n r,c = cl_state.quality_fis(fis)\n print \"For\",cl_state.name,r,\"/\",c\n correct += r\n count += c\n return (correct,count)",
"def eval_fis(self,fis):\n #res = 0.0\n #for cl_state in self.classes:\n # res += cl_state.eval_fis(fis)\n #print \"=>\",res\n #return 1.0/res\n try:\n correct,count = self.quality_fis(fis)\n except Exception as err:\n print err\n correct = 0\n return correct",
"def ForbesI_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = (TP + FP) * (TP + FN)\n return (n * TP) / part1\n except Exception:\n return \"None\"",
"def get_perfect_information(self):\n raise NotImplementedError"
]
| [
"0.6735223",
"0.66042775",
"0.62934285",
"0.6065687",
"0.59460366",
"0.58402395",
"0.5761419",
"0.5759288",
"0.57380706",
"0.56777894",
"0.5614086",
"0.5607752",
"0.5601926",
"0.56015635",
"0.55751127",
"0.5574964",
"0.5544454",
"0.5512649",
"0.5482136",
"0.5473166",
"0.5464432",
"0.5455423",
"0.5448921",
"0.5441799",
"0.5431817",
"0.5423886",
"0.5415063",
"0.53960234",
"0.5382902",
"0.5326487"
]
| 0.7226743 | 0 |
Parses the 1d data set into three separate data sets for each axis. Each trial's data is split into [0th slice, ..., (dim1)th slice] for each x, y, and z. To get only the Nth slices of an axis across all trials, traverse through data_dim_axis by index [N, N + dim1, N + 2dim1, ...] | def convert_1d_to_3d(data_X, data_Y):
data_X = data_X.tocsr()
data_dim_x = [] # slices along x-axis (has shape of (total_trials * dim_x, dim_z, dim_y))
data_dim_x_label = [] # contains (total_trials * dim_x) labels
data_dim_y = [] # slices along y-axis (has shape of (total_trials * dim_y, dim_z, dim_x))
data_dim_y_label = [] # contains (total_trials * dim_y) labels
data_dim_z = [] # slices along z-axis (has shape of (total_trials * dim_z, dim_y, dim_x))
data_dim_z_label = [] # contains (total_trials * dim_z) labels
for num_trial in range(data_X.shape[0]):
label = data_Y[num_trial]
data_1d = data_X[num_trial]
data_3d = np.squeeze(np.asarray(data_1d.todense())).reshape((dim_z, dim_y, dim_x))
for x in range(dim_x):
x_slice = data_3d[:,:,x]
# append only if the slice is not empty
if x_slice.sum() != 0:
data_dim_x.append(data_3d[:, :, x])
data_dim_x_label.append(label)
for y in range(dim_y):
y_slice = data_3d[:, y, :]
if y_slice.sum() != 0:
data_dim_y.append(data_3d[:, y, :])
data_dim_y_label.append(label)
for z in range(dim_z):
z_slice = data_3d[:, :, z]
if z_slice.sum() != 0:
data_dim_z.append(data_3d[z, :, :])
data_dim_z_label.append(label)
return np.array(data_dim_x), np.array(data_dim_x_label), \
np.array(data_dim_y), np.array(data_dim_y_label), \
np.array(data_dim_z), np.array(data_dim_z_label) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_slice_other_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)+shape[1:]",
"def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])",
"def test_slice_zero_length_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n out = dset[:]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n if len(shape) > 1:\n out = dset[:, :1]\n assert isinstance(out, np.ndarray)\n assert out.shape[:2] == (0, 1)",
"def slice_timeseries(n_slices,dataset):\n\n n,l=np.shape(dataset)\n\n X = np.reshape(dataset,(n*n_slices,l//n_slices))\n\n print('sliced data shape (nr. of slices, slice length):',np.shape(X))\n print('#####################################')\n \n return X",
"def slice_dims(data_array: sc.DataArray, slices: Dict[str, slice]) -> sc.DataArray:\n out = data_array\n for dim, sl in slices.items():\n out = out[dim, sl]\n return out",
"def test_slice_zero_length_dimension(self):\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n out = dset[:]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n if len(shape) > 1:\n out = dset[:, :1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape[:2], (0, 1))",
"def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)",
"def _chunk_data(X, slices):\n\n # from object array to list\n slices = [sl for sl in slices if len(sl)]\n selected_times = np.hstack([np.ravel(sl) for sl in slices])\n start = np.min(selected_times)\n stop = np.max(selected_times) + 1\n slices_chunk = [sl - start for sl in slices]\n X_chunk = X[:, :, start:stop]\n return X_chunk, slices_chunk",
"def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]",
"def test_write_slices(self):\n dt = np.dtype('(3,)i')\n\n data1 = np.ones((2,), dtype=dt)\n data2 = np.ones((4,5), dtype=dt)\n\n dset = self.f.create_dataset('x', (10,9,11), dtype=dt)\n\n dset[0,0,2:4] = data1\n self.assertArrayEqual(dset[0,0,2:4], data1)\n\n dset[3, 1:5, 6:11] = data2\n self.assertArrayEqual(dset[3, 1:5, 6:11], data2)",
"def test_3d_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/ft/test001.ft3\")\n\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n\n # and the first slice\n assert sdata.shape == (128, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 25980.13\n assert round(sdata[22,5],2) == -8336.05\n check_ppm_limits(sdic,sdata,0,[147.42, 93.01])\n check_ppm_limits(sdic,sdata,1,[254.92, -142.83])\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)",
"def _extract_data_with_axis(self, data, axis):\n if axis is None:\n return data\n else:\n if len(data.shape) == 2:\n \n print(\"Data shape:\", data.shape)\n if data.shape[1] == 2:\n print(\"Extracting from two columns\")\n axis.data = data[:,0]\n return data[:,1]\n elif data.shape[1] > 2:\n axis.data = data[:,0]\n return data[:,1:]\n else:\n raise Exception()\n \n else:\n raise Exception(\"Other shapes than (N,) and (N,M)\"+\n \" not implemented\")",
"def dataset(self, timestep, data):\n dataX, dataY = [], []\n for i in range(len(data) - timestep):\n a = data[i:i+timestep]\n dataX.append(a)\n dataY.append(data[i + timestep])\n return np.array(dataX), np.array(dataY)",
"def create_dataset(dataset,time_step=1):\n dataX,dataY=[],[]\n for i in range(len(dataset)-time_step):\n a=dataset[i:i+time_step]\n dataX.append(a)\n dataY.append(dataset[i+time_step])\n return np.asarray(dataX),np.asarray(dataY)",
"def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il",
"def mesh_slice(V,n,X,Y,Z):\n from matplotlib import cm\n import mpl_toolkits.mplot3d.axes3d as p3\n import time\n order=np.array([(1,2,0),(2,0,1),(0,1,2)])\n q=np.transpose(V,(order[n])) # See projection for why we could also use take instead.\n if n==0: # Make a less cumbersome and more consistent version of this?\n i,j=X,Y\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n if n==1:\n i,j=Y,Z\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n if n==2:\n i,j=Z,X\n i,j=np.array([i]),np.array([j]).T\n I,J=i,j\n for m in range(j.shape[0]-1): # -1 because we already have the first row as I.\n I=np.vstack((I,i))\n for m in range(i.shape[1]-1):\n J=np.hstack((J,j))\n labels={\n 0:('horizontal axial (mm)','height (mm)'),\n 1:('horizontal radial (mm)','horizontal axial (mm)'),\n 2:('height (mm)','horizontal radial (mm)')\n } \n class animated(object): # 4D, plots f(x,y,z0) specific to mesh_slice.\n def __init__(self,I,J,q):\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection='3d')\n self.I,self.J=I,J\n self.q=q[:,0,:]\n self.surf=self.ax.plot_surface(self.J,self.I,self.q,cmap=cm.coolwarm,antialiased=False)\n def drawNow(self,ii,q,n):\n self.surf.remove()\n self.slc=q[:,ii,:]\n self.surf=self.ax.plot_surface(self.J,self.I,self.slc,cmap=cm.coolwarm,antialiased=False)\n plt.ylabel(labels[n][1])\n plt.xlabel(labels[n][0])\n #plt.title(ii) #Optional: this moves down during animation.\n plt.draw() # redraw the canvas\n time.sleep(0.01)\n self.fig.show()\n anim=animated(I,J,q)\n for ii in range(q.shape[1]):\n if ii==q.shape[1]-1:\n plt.title('Animation complete.')\n anim.drawNow(ii,q,n)\n return plt.show()",
"def get_dataset_index(\n dataset: xr.Dataset, time_dataset_index: xr.Dataset\n) -> xr.Dataset:\n dataset_index = xr.Dataset()\n n_steps = len(next(iter(time_dataset_index.values())))\n for dim, var in dataset.items():\n if set(var.dims).intersection(time_dataset_index.keys()):\n time_dim = var.dims[0]\n assert time_dim in time_dataset_index, (\n f\"'{time_dim}' does not seems to be a time \"\n f\"dimensions in {time_dataset_index.keys()}. \"\n \"For the moment, only time dimension as first dim is supported.\"\n )\n dataset_index[dim] = time_dataset_index[time_dim]\n else:\n if not onp.shape(var):\n dataset_index[dim] = xr.DataArray(onp.arange(n_steps), dims=(\"step\",))\n else:\n values_atleast_1d = onp.atleast_1d(var.values)\n # grid = onp.indices(values_atleast_1d.shape)\n flat_idx = onp.arange(len(values_atleast_1d.ravel()))\n dataset_index[dim] = xr.DataArray(\n onp.outer(onp.arange(n_steps), flat_idx),\n dims=(\"step\", dim + \"_flat_idx\"),\n )\n return dataset_index",
"def _build_slices(dataset, patch_shape, stride_shape):\n slices = []\n if dataset.ndim == 4:\n in_channels, i_z, i_y, i_x = dataset.shape\n else:\n i_z, i_y, i_x = dataset.shape\n\n k_z, k_y, k_x = patch_shape\n s_z, s_y, s_x = stride_shape\n z_steps = SliceBuilder._gen_indices(i_z, k_z, s_z)\n for z in z_steps:\n y_steps = SliceBuilder._gen_indices(i_y, k_y, s_y)\n for y in y_steps:\n x_steps = SliceBuilder._gen_indices(i_x, k_x, s_x)\n for x in x_steps:\n slice_idx = (\n slice(z, z + k_z),\n slice(y, y + k_y),\n slice(x, x + k_x)\n )\n if dataset.ndim == 4:\n slice_idx = (slice(0, in_channels),) + slice_idx\n slices.append(slice_idx)\n return slices",
"def divide_with_stride(arr: np.ndarray) -> List[np.ndarray]:\n\n result_list: List[np.ndarray] = []\n # slice by z axis\n for z in range(0, z_len := arr.shape[0], 16):\n if z + 31 >= z_len:\n z = z_len - 16\n z_arr: np.ndarray = arr[z:z+16]\n\n # slice by y axis\n for y in range(0, y_len := arr.shape[1], 16):\n y_arr: np.ndarray = z_arr[:, y:y+16]\n\n # slice by x axis\n for x in range(0, x_len := arr.shape[2], 16):\n x_arr: np.ndarray = y_arr[:, :, x:x+16]\n if len(set(x_arr.shape)) == 1 and x_arr.shape[0] == 16:\n result_list.append(x_arr)\n \n return result_list",
"def _data_with_axis(self, axis):\n shpl = list(self.data.shape)\n \n if len(shpl) == 2:\n shpl[1] += 1\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1:] = self.data\n data[:,0] = axis.data \n elif len(shpl) == 1:\n shpl.append(2)\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1] = self.data\n data[:,0] = axis.data\n else:\n raise Exception(\"Other shapes than (N,) and (N,M) not implemented\")\n return data",
"def separate_array_by_last_dim(adata,bx,atype):\n\n if(len(atype)==1):\n cx=adata\n else:\n cx=tuple([adata[...,slice(bx[i],bx[i+1])].astype(atype[i]) for i in np.arange(0,len(bx)-1)])\n\n return cx",
"def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r",
"def time_slices(field=['uu1'], datadir='data/', proc=-1, extension='xz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='plane[0]', dtstep=1, deltat=0,\n oldfile=False, outfile=\"\"):\n\n import pylab as plt\n\n datadir = os.path.expanduser(datadir)\n if outfile != \"\":\n outslice = open(outfile, \"w\")\n filename = []\n if proc < 0:\n for i in field:\n filename += [datadir + '/slice_' + i + '.' + extension]\n else:\n for i in field:\n filename += [datadir + '/proc' +\n str(proc) + '/slice_' + i + '.' + extension]\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = []\n infile = []\n for i in filename:\n plane += [np.zeros((vsize, hsize), dtype=precision)]\n\n infile += [npfile(i, endian=format)]\n\n ifirst = True\n islice = 0\n plotplane = []\n dt = 0\n nextt = tmin\n while True:\n try:\n raw_data = []\n for i in infile:\n raw_data += [i.fort_read(precision)]\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[0][-1]\n for i in range(len(raw_data)):\n plane[i] = raw_data[i][:-1].reshape(vsize, hsize)\n else:\n t = raw_data[0][-2]\n for i in range(len(raw_data)):\n plane[i] = raw_data[i][:-2].reshape(vsize, hsize)\n\n exec('tempplane =' + transform)\n\n if t > tmin and t < tmax:\n if dt == 0:\n plotplane += tempplane.tolist()\n\n if ifirst:\n #print \"----islice----------t---------min-------max-------delta\" # Python 2\n print(\"----islice----------t---------min-------max-------delta\")\n #print \"%10i %10.3e %10.3e %10.3e %10.3e\" % \\ # Python 2\n #(islice, t, tempplane.min(), tempplane.max(), # Python 2\n #tempplane.max() - tempplane.min()) # Python 2\n print(\"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(islice, t, tempplane.min(), tempplane.max(), tempplane.max() - tempplane.min()))\n if outfile != \"\":\n outslice.write(\n #\"%10i %10.3e %10.3e %10.3e %10.3e\" % # Python 2\n #(islice, # Python 2\n #t, # Python 2\n #tempplane.min(), # Python 2\n #tempplane.max(), # Python 2\n #tempplane.max() - # Python 2\n #tempplane.min())) # Python 2\n \"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(\n islice,\n t,\n tempplane.min(),\n tempplane.max(),\n tempplane.max() -\n tempplane.min())) \n outslice.write(\"\\n\")\n\n ifirst = False\n islice += 1\n nextt = t + deltat\n if deltat == 0:\n dt = (dt + 1) % dtstep\n elif t >= nextt:\n dt = 0\n nextt = t + deltat\n else:\n dt = 1\n\n ax = plt.axes()\n ax.set_xlabel('t')\n ax.set_ylabel('y')\n ax.set_ylim\n plt.imshow(np.array(plotplane).reshape(islice, vsize).transpose(),\n vmin=amin, vmax=amax)\n manager = plt.get_current_fig_manager()\n manager.show()\n\n for i in infile:\n i.close()\n if outfile != \"\":\n outslice.close()",
"def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)",
"def Reshape(xdata, ydata, zdata):\r\n N = zdata.shape[0]\r\n Nx = list(ydata).count(ydata[0])\r\n Ny = N/Nx\r\n zz = np.copy(zdata)\r\n zz.shape = (Ny,Nx)\r\n xx = xdata[:Nx]\r\n yy = np.zeros(Ny)\r\n for u in range(Ny):\r\n yy[u] = ydata[Nx*u]\r\n return xx,yy,zz",
"def test_slice_of_length_zero(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, ), (2, 2, ), (2, 1, 5)]):\n dset = f.create_dataset('x%d'%i, data=np.zeros(shape, np.int32))\n assert dset.shape == shape\n out = dset[1:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (0,)+shape[1:]",
"def getSplittedDatasetInNumpy(self, xFold_step, xFold_type, depth_first=False, onehot=False):\n if not self.__read_in_dataset:\n (x_train, y_train), (x_eval, y_eval), (x_test, y_test) = self.loadDataset()\n\n self.__read_in_images, self.__read_in_labels = self.convertDatasetToNumpy(x_train, y_train, x_eval, y_eval,\n x_test, y_test, self._read_in_shape, self.__read_in_size,\n self.__num_classes, depth_first, onehot)\n self.__read_in_dataset = True\n\n return self.prepare_dataset(xFold_step, xFold_type)",
"def slice_to_cube(self, axis, chunk, **kwargs):\n if self.data.ndim == 3:\n raise cu.CubeError(4, 'Can only slice a hypercube into a cube')\n\n item = [slice(None, None, None) for _ in range(4)]\n if isinstance(chunk, tuple):\n if cu.iter_isinstance(chunk, (u.Quantity, u.Quantity)):\n pixel0 = cu.convert_point(chunk[0].value, chunk[0].unit,\n self.axes_wcs, axis)\n pixel1 = cu.convert_point(chunk[1].value, chunk[1].unit,\n self.axes_wcs, axis)\n item[axis] = slice(pixel0, pixel1, None)\n elif cu.iter_isinstance((chunk, int, int)):\n item[axis] = slice(chunk[0], chunk[1], None)\n else:\n raise cu.CubeError(5, \"Parameters must be of the same type\")\n newdata = self.data[item].sum(axis)\n else:\n unit = chunk.unit if isinstance(chunk, u.Quantity) else None\n pixel = cu.convert_point(chunk, unit, self.axes_wcs, axis)\n item[axis] = pixel\n newdata = self.data[item]\n wcs_indices = [0, 1, 2, 3]\n wcs_indices.remove(3 - axis)\n newwcs = wu.reindex_wcs(self.axes_wcs, np.array(wcs_indices))\n if axis == 2 or axis == 3:\n newwcs = wu.add_celestial_axis(newwcs)\n newwcs.was_augmented = True\n cube = Cube(newdata, newwcs, meta=self.meta, **kwargs)\n return cube",
"def test_slice_of_length_zero(self):\n for i, shape in enumerate([(3,), (2, 2,), (2, 1, 5)]):\n dset = self.f.create_dataset('x%d'%i, data=np.zeros(shape, int), maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[1:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (0,)+shape[1:])",
"def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]"
]
| [
"0.6175714",
"0.6072633",
"0.58613235",
"0.5805677",
"0.5799606",
"0.5719681",
"0.5651642",
"0.56276065",
"0.54468656",
"0.53935266",
"0.5339818",
"0.5322116",
"0.53019875",
"0.52620435",
"0.5252861",
"0.5247714",
"0.51936585",
"0.5185298",
"0.5181618",
"0.5177174",
"0.5160001",
"0.51541746",
"0.515059",
"0.5143168",
"0.5140722",
"0.513017",
"0.5120287",
"0.5104389",
"0.51037246",
"0.50939345"
]
| 0.6850255 | 0 |
This function loads 20newsgroups data from sklearn, subsamples the data set, assigns a label to each category, and splits data into train, val, and test sets. | def load_data():
# Load data from categories
comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \
shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))
science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \
shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))
politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \
shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))
religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \
shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))
recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \
shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))
# Print total number of documents
data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]
# Subsample classes to create a balanced dataset
sub_k = min(data_len)
comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]
science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]
politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]
religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]
recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]
# Subcategories labels
subcat_comp = np.array(comp.target)
subcat_scien = np.array(science.target) + len(comp.target_names)
subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)
subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)
subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)
# Assign labels to train data based on categories
y_comp = np.ones(len(comp.data))
y_scien = 2*np.ones(len(science.data))
y_polit = 3*np.ones(len(politics.data))
y_rel = 4*np.ones(len(religion.data))
y_rec = 5*np.ones(len(recreation.data))
labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)
# Computers
train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)
train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)
# Sciences
train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)
train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)
# Politics
train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)
train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)
# Religion
train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)
train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)
# Recreation
train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)
train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)
# Corpus from all categories in train set
newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec
#print(f"Total number of documents in all categories in the train set is {len(newsgroups_train)}.")
train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)
#print(train_labels.shape)
train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)
#print(train_subcat.shape)
# Corpus from all categories in test set
newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec
test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)
test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)
# Corpus from all categories in validation set
newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec
val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)
val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)
# Data Split
total = len(test_labels) + len(val_labels) + len(train_labels)
return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def splitData(groupList, trainSize):\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n\r\n groupList[0]['text'] = cleanRealTexts(list(groupList[0]['text']))\r\n\r\n classLabels = np.array([])\r\n for i, group in enumerate(groupList):\r\n classLabels = np.append(classLabels, np.repeat(i, len(group)))\r\n\r\n classData = pd.concat(groupList).reset_index(drop=True)\r\n\r\n splits = list(StratifiedShuffleSplit(n_splits=i,\r\n test_size=1-trainSize,\r\n train_size=trainSize,\r\n random_state=0).split(X=classData, y=classLabels))[0]\r\n trainIdx, testIdx = splits\r\n\r\n trainData = classData.iloc[trainIdx]\r\n testData = classData.iloc[testIdx]\r\n trainLabels = classLabels[trainIdx]\r\n testLabels = classLabels[testIdx]\r\n\r\n return [[trainData, trainLabels], [testData, testLabels]]",
"def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples",
"def splitfeatdata(rawdata, fold=10):\n\n labeldata = []\n for row in rawdata:\n\n # if row[2] > 0:\n # label = 'pos'\n # elif row[2] == 0:\n # label = 'neutral'\n # else:\n # label = 'neg'\n\n\n label = row[2]\n labeldata.append((row[4], label))\n\n\n random.shuffle(labeldata)\n\n size = int(math.floor(len(labeldata) / 10.0))\n # train = labeldata[:split]\n # test = labeldata[split:]\n\n # code for k-fold validation referred from:\n # http://stackoverflow.com/questions/16379313/how-to-use-the-a-10-fold-cross-validation-with-naive-bayes-classifier-and-nltk\n claccuracy = []\n for i in range(fold):\n test_this_round = labeldata[i*size:][:size]\n train_this_round = labeldata[:i*size] + labeldata[(i+1)*size:]\n\n acc = myclassifier(train_this_round, test_this_round)\n\n claccuracy.append(acc)\n\n\n\n print os.getcwd()\n\n\n mySentClassifier = nltk.NaiveBayesClassifier.train(labeldata)\n f = open('../../../mySentClassifier2.pickle', 'wb')\n dump(mySentClassifier, f)\n f.close()\n\n\n return claccuracy",
"def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)",
"def load_data_and_labels_another():\n\n y_train = []\n y_valid =[]\n y_test = []\n x_train =[]\n x_valid = []\n x_test = []\n index = []\n labels = {}\n topics = ['finance' , 'international', 'legal', 'social','tech','hemp']\n for idx, topic in enumerate(topics):\n folder_name = \"data/\" + topic\n all_files = os.listdir(folder_name)\n clean_news = []\n # read in files in each topic's folder\n for single_file in all_files:\n raw_data_file_name = os.path.join(folder_name, single_file)\n news = list(open(raw_data_file_name, mode = 'rb').readlines())\n clean_news = clean_news + news\n clean_news = [s.strip() for s in clean_news]\n clean_news = [clean_str(s) for s in clean_news]\n clean_news = [normalizing_str(s) for s in clean_news]\n clean_news = [extract_clean_words(s) for s in clean_news]\n clean_news = [s for s in clean_news if s is not None]\n length_of_news = len(clean_news)\n test_num = int(length_of_news * 0.3)\n valid_num = int(length_of_news * 0.3)\n train_num = length_of_news - test_num - valid_num\n clean_news = np.array(clean_news)\n\n \n \n #x_text = x_text + clean_news\n if topic == 'finance':\n y_topic = [[1,0,0,0,0,0] for _ in clean_news]\n elif topic == 'international':\n y_topic = [[0,1,0,0,0,0] for _ in clean_news]\n elif topic == 'legal':\n y_topic = [[0,0,1,0,0,0] for _ in clean_news]\n elif topic == 'social':\n y_topic = [[0,0,0,1,0,0] for _ in clean_news]\n elif topic == 'tech':\n y_topic = [[0,0,0,0,1,0] for _ in clean_news]\n elif topic == 'hemp':\n y_topic = [[0,0,0,0,0,1] for _ in clean_news]\n y_topic = np.array(y_topic)\n\n #randomly shuffle the data and divide them into train, valid and test\n np.random.seed(9)\n indices = np.random.permutation(clean_news.shape[0])\n training_idx = indices[:train_num]\n valid_idx = indices[train_num:train_num+valid_num]\n test_idx = indices[train_num+valid_num: ]\n # Testing: no record is missed.\n #tem = np.concatenate((training_idx, valid_idx, test_idx),axis = 0)\n #print(tem.sort()) \n train_piece_x = list(clean_news[training_idx])\n valid_piece_x = list(clean_news[valid_idx])\n test_piece_x = list(clean_news[test_idx])\n train_piece_y = list(y_topic[training_idx])\n valid_piece_y = list(y_topic[valid_idx])\n test_piece_y = list(y_topic[test_idx])\n y_train = y_train + train_piece_y\n y_valid = y_valid + valid_piece_y\n y_test = y_test + test_piece_y\n x_train = x_train + train_piece_x\n x_valid = x_valid + valid_piece_x\n x_test = x_test + test_piece_x\n\n # Store the data in data_pickle.\n y_train = np.array(y_train)\n y_valid = np.array(y_valid)\n y_test = np.array(y_test)\n file = open('data_pickle', 'wb')\n pickle.dump([x_train,x_valid,x_test,y_train,y_valid,y_test], file)\n file.close()\n print(\"-------------------------------------------------------\")\n print(\"*****Dumped Data_pickle*****\")",
"def divide_train_test(self, sentences, tags):\n logging.info('Dividindo dataset em 10 folds')\n kf = KFold(n_splits=10)\n train, test = [], []\n for train_index, test_index in kf.split(sentences):\n train.append(train_index)\n test.append(test_index)\n return train, test",
"def split_data(images, labels):\n images, labels = shuffle_data_pair(images, labels)\n\n num_covid_points = sum(map(lambda label: label == 0, labels))\n\n # Calculate split\n num_test = int(num_covid_points * 0.1)\n num_covid_train = num_covid_points - num_test * 2\n num_other_train = int(num_covid_train * 1.1)\n\n # (train, validate, test) points added\n num_points_added = [\n [0, 0, 0], # COVID-19\n [0, 0, 0], # Viral pneumonia\n [0, 0, 0] # Normal\n ]\n\n # Datasets\n images_train = []\n labels_train = []\n images_validate = []\n labels_validate = []\n images_test = []\n labels_test = []\n\n # Add images and labels to datasets\n notifier.send(\" Adding images and labels to dataset...\")\n for i, label in enumerate(labels):\n print(f\" Point: {i} / {len(labels)}\")\n completed_labels = [False, False, False] # Enough of label added\n if all(completed_labels):\n break\n for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal\n if completed_labels[j]:\n continue\n if label == j:\n # Add training data\n can_add_training = False\n if j == 0: # COVID-19\n if num_points_added[j][0] < num_covid_train:\n can_add_training = True\n num_points_added[j][0] += 1\n elif num_points_added[j][0] < num_other_train: # Not COVID-19\n can_add_training = True\n num_points_added[j][0] += 1\n if can_add_training:\n images_train.append(images[i])\n labels_train.append(labels[i])\n break\n\n # Add validation data\n if num_points_added[j][1] < num_test:\n num_points_added[j][1] += 1\n images_validate.append(images[i])\n labels_validate.append(labels[i])\n break\n\n # Add testing data\n if num_points_added[j][2] < num_test:\n num_points_added[j][2] += 1\n images_test.append(images[i])\n labels_test.append(labels[i])\n break\n\n # Point couldn't be added anywhere: label is complete\n completed_labels[j] = True\n break\n\n # Shuffle all data\n notifier.send(\" Shuffling data...\")\n images_train, labels_train = shuffle_data_pair(\n images_train, labels_train\n )\n images_validate, labels_validate = shuffle_data_pair(\n images_validate, labels_validate\n )\n images_test, labels_test = shuffle_data_pair(\n images_test, labels_test\n )\n\n if PLOT_LABELS:\n # Plot data frequencies\n plt.hist(labels, bins=3)\n plt.title(\"Labels\")\n\n plt.hist(labels_train, bins=3)\n plt.title(\"Train Labels\")\n\n plt.hist(labels_validate, bins=3)\n plt.title(\"Validate Labels\")\n\n plt.hist(labels_test, bins=3)\n plt.title(\"Test Labels\")\n\n plt.show()\n\n # Make labels categorical\n notifier.send(\" Making labels categorical: train...\")\n labels_train = tf.keras.utils.to_categorical(labels_train)\n notifier.send(\" Making labels categorical: validate...\")\n labels_validate = tf.keras.utils.to_categorical(labels_validate)\n notifier.send(\" Making labels categorical: test...\")\n labels_test = tf.keras.utils.to_categorical(labels_test)\n\n notifier.send(\" Converting data to NumPy arrays...\")\n return \\\n np.array(images_train), np.array(images_validate), np.array(images_test), \\\n np.array(labels_train), np.array(labels_validate), np.array(labels_test)",
"def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set",
"def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)",
"def load_data(limit=0, split=0.8):\n # Partition off part of the train data for evaluation\n train_data, _ = thinc.extra.datasets.imdb()\n random.shuffle(train_data)\n train_data = train_data[-limit:]\n texts, labels = zip(*train_data)\n cats = [{'POSITIVE': bool(y)} for y in labels]\n split = int(len(train_data) * split)\n return (texts[:split], cats[:split]), (texts[split:], cats[split:])",
"def load_data(n_folds=10):\n\n sss = sssplit(n_splits=n_folds, test_size=.1, random_state=np.random.RandomState(830452))\n X, y = get_transformed_data()\n (X_train, y_train), (X_test, y_test) = ([],[]),([],[])\n\n for train_idx, test_idx in sss.split(X, y):\n X_train.append(X[train_idx])\n y_train.append(y[train_idx])\n X_test.append(X[test_idx])\n y_test.append(y[test_idx])\n\n return (X_train, y_train), (X_test, y_test)",
"def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels",
"def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes",
"def load_data_kfold_10_test(train_pos_data_path, train_neg_data_path, stopwords, test_category, lower=True):\n\n\ttrain_files_pos = [join(train_pos_data_path, f) for f in listdir(train_pos_data_path) if isfile(join(train_pos_data_path, f))]\n\ttrain_files_neg = [join(train_neg_data_path, f) for f in listdir(train_neg_data_path) if isfile(join(train_neg_data_path, f))]\n\n\ttrain_files_pos.sort(key = lambda x: x.split(\"_\")[0].replace(\"cv\", \"\"))\n\ttrain_files_neg.sort(key = lambda x: x.split(\"_\")[0].replace(\"cv\", \"\"))\n\n\tprint(\"The count of all positive files is: \", len(train_files_pos))\n\tprint(\"The count of all negative files is: \", len(train_files_neg))\n\n\tall_pos_docs = collect_train_data(train_files_pos, stopwords, lower)\n\tall_neg_docs = collect_train_data(train_files_neg, stopwords, lower)\n\n\tpos_train = []\n\tneg_train = []\n\tpos_test = []\n\tneg_test = []\n\tremove_pos = []\n\tremove_neg = []\n\n\tfor idx, pos_entry in enumerate(all_pos_docs):\n\t\t# If the index falls into the test category\n\t\tif (idx%10) == test_category:\n\t\t\t# Add the entry to the positive test set\n\t\t\tpos_test.append(pos_entry)\n\t\telse:\n\t\t\t# Add the entry to the positive train set\n\t\t\tpos_train.append(pos_entry)\n\n\n\t# Do the same for the negative set\n\tfor idx, neg_entry in enumerate(all_neg_docs):\n\t\t# If the index falls into the test category\n\t\tif (idx%10) == test_category:\n\t\t\t# Add the entry to the negative test set\n\t\t\tneg_test.append(neg_entry)\n\t\telse:\n\t\t\tneg_train.append(neg_entry)\n\n\tprint(\"The size of the positive training set is: \", len(pos_train))\n\tprint(\"The size of the negative training set is: \", len(neg_train))\n\tprint(\"The size of the positive test set is: \", len(pos_test))\n\tprint(\"The size of the negative test set is: \", len(neg_test))\n\n\treturn pos_train, pos_test, neg_train, neg_test",
"def create_data_splits(data_dir):\n splits = ['train', 'val', 'test']\n for split in splits:\n print(\"\\n\" + \"Creating {} split\\n\".format(split) + \"-\"*30)\n src_dir = os.path.join(data_dir, split)\n labels = []\n for root, dirs, files in os.walk(src_dir):\n for folder in dirs:\n if folder == 'reactions':\n continue\n elif folder == 'source-tweets':\n for count, file in enumerate(files):\n if file.startswith('.') or file.startswith('structure'):\n continue\n src_file_path = os.path.join(root, file)\n with open(src_file_path, 'r') as j:\n annotation = json.load(j)\n labels.append(convert_annotations(annotation, string = False))\n true, false, unverif = get_label_distribution(labels)\n print(\"\\nNo. of labels = \", len(labels))\n print(\"True labels = {:.2f} %\".format(true*100))\n print(\"False labels = {:.2f} %\".format(false*100))\n print(\"Unverified labels = {:.2f} %\".format(unverif*100))\n \n print(\"\\nGetting the source tweets in one file with labels..\\n\")\n final_data_file = './data/pheme/{}.tsv'.format(split)\n c=0\n # getting the source tweets in one file with labels\n with open(final_data_file, 'a+', encoding = 'utf-8', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter='\\t')\n # csv_writer.writerow(['text', 'label'])\n for root, dirs, files in os.walk(src_dir):\n for folder in dirs:\n if c == len(labels):\n break\n if folder == 'reactions':\n continue\n else:\n src_tweet_file = os.path.join(root, folder, 'source-tweets')\n src_tweet_file = src_tweet_file + '/{}.json'.format(folder)\n with open (src_tweet_file, 'r', encoding = 'utf-8') as j:\n src_tweet= json.load(j)\n text = src_tweet['text'].replace('\\n', ' ')\n text = text.replace('\\t', ' ')\n csv_writer.writerow([text, labels[c]])\n c+=1\n if c%500 == 0:\n print(\"{} done...\".format(c))\n return None",
"def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ",
"def load_data_kfold_10_test_val(train_pos_data_path, train_neg_data_path, stopwords, val_category, test_category):\n\n\ttrain_files_pos = [join(train_pos_data_path, f) for f in listdir(train_pos_data_path) if isfile(join(train_pos_data_path, f))]\n\ttrain_files_neg = [join(train_neg_data_path, f) for f in listdir(train_neg_data_path) if isfile(join(train_neg_data_path, f))]\n\n\ttrain_files_pos.sort(key = lambda x: x.split(\"_\")[0].replace(\"cv\", \"\"))\n\ttrain_files_neg.sort(key = lambda x: x.split(\"_\")[0].replace(\"cv\", \"\"))\n\n\tprint(\"The count of all positive files is: \", len(train_files_pos))\n\tprint(\"The count of all negative files is: \", len(train_files_neg))\n\n\tall_pos_docs = collect_train_data(train_files_pos, stopwords, lower=True)\n\tall_neg_docs = collect_train_data(train_files_neg, stopwords, lower=True)\n\n\tpos_train = []\n\tneg_train = []\n\tpos_val = []\n\tneg_val = []\n\tpos_test = []\n\tneg_test = []\n\n\tfor idx, pos_entry in enumerate(all_pos_docs):\n\t\t# If the index falls into the test category\n\t\tif (idx%10) == test_category:\n\t\t\t# Add the entry to the positive test set\n\t\t\tpos_test.append(pos_entry)\n\t\telif (idx%10) == val_category:\n\t\t\t# Add the entry to the positive validation set\n\t\t\tpos_val.append(pos_entry)\n\t\telse:\n\t\t\t# Add the entry to the positive train set\n\t\t\tpos_train.append(pos_entry)\n\n\n\t# Do the same for the negative set\n\tfor idx, neg_entry in enumerate(all_neg_docs):\n\t\t# If the index falls into the test category\n\t\tif (idx%10) == test_category:\n\t\t\t# Add the entry to the negative test set\n\t\t\tneg_test.append(neg_entry)\n\t\telif (idx%10) == val_category:\n\t\t\t# Add the entry to the negative validation set\n\t\t\tneg_val.append(neg_entry)\n\t\telse:\n\t\t\tneg_train.append(neg_entry)\n\n\tprint(\"The size of the positive training set is: \", len(pos_train))\n\tprint(\"The size of the negative training set is: \", len(neg_train))\n\tprint(\"The size of the positive validation set is: \", len(pos_val))\n\tprint(\"The size o the negative validation set is: \", len(neg_val))\n\tprint(\"The size of the positive test set is: \", len(pos_test))\n\tprint(\"The size of the negative test set is: \", len(neg_test))\n\n\treturn pos_train, pos_val, pos_test, neg_train, neg_val, neg_test",
"def get_newsgroups(categories = None, n_articles = 10):\n \n remove = ('headers', 'footers', 'quotes')\n newsgroups = fetch_20newsgroups(subset = 'train', remove = remove, categories = categories)\n \n ind = np.random.choice(len(newsgroups.data), size = n_articles, replace = False)\n news = [newsgroups.data[i] for i in ind]\n labels = [newsgroups.target[i] for i in ind]\n \n words = [' '.join(filter(str.isalpha, raw.lower().split())) for raw in\n news]\n\n vectorizer = CountVectorizer()\n vectorizer.fit(words)\n wordbank = vectorizer.get_feature_names()\n \n bow_sparse = vectorizer.transform(words)\n bow = np.array(csr_matrix.todense(bow_sparse))\n \n return (bow, labels, wordbank)",
"def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label",
"def train_val_test_split(data):\n raise NotImplementedError",
"def train_and_test(self, data):\n\n np.random.shuffle(data)\n datalist = self.unpack_data(data)\n\n logger.info('[*] 75-25 partition of datasets ...')\n\n markline1 = math.floor(0.75*(len(datalist['features'])))\n markline2 = math.floor(0.75*len(datalist['labels']))\n\n train_features = datalist['features'][:(markline1)]\n test_features = datalist['features'][(markline1):]\n \n train_labels = datalist['labels'][:(markline2)]\n test_labels = datalist['labels'][(markline2):]\n\n logger.info('[*] Training started with 75% Dataset ...')\n\n self.knn_model.fit(train_features, train_labels)\n\n logger.info('[*] Testing started with 25% Dataset ...')\n print('\\n/---------------Accuracy----------------/') \n \n accuracy = self.knn_model.score(train_features, train_labels)\n print('Test set accuracy {:.2f} %'.format(accuracy*100))\n\n if accuracy < 0.40:\n logger.warning('[-.-!] Thanks for tryin\\' but this machine ain\\'t learning.')\n\n return True",
"def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test",
"def load_classification_dataset(step, do_lower_case,data_type,data_subtype,use_syntetic_data):\n assert step in ['train', 'test']\n binary = False \n undersample_majority = False\n\n paths = ['~/Github/Data/Patient/NIRADS/PET_CT_NIRADS.xlsx', '~/Github/Data/Patient/NIRADS/MR_NIRADS_2018.xlsx','~/Github/Data/Patient/NIRADS/MR_NIRADS.xlsx']\n if data_type == 'ct':\n data_r = pd.read_excel(paths[0])\n else:\n data_r = pd.read_excel(paths[1])\n data_r.append(pd.read_excel(paths[2]), ignore_index = True, sort=False)\n\n data_p,data_n, y_p, y_n = tc.text_cleaning(data_r, None, data_target='section') \n\n if data_subtype == 'primary':\n data = data_p\n y = y_p -1\n else:\n data = data_n\n y = y_n -1\n\n if binary:\n y[y<2]=0\n y[y>0]=1\n\n y_dist = [np.sum(y==x) for x in np.unique(y)]\n print(\"Distribution of all labels: \", y_dist, \"\\n\\n\")\n\n train_text, test_text, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=1)\n\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels: \", y_dist, \"\\n\\n\")\n\n if step =='train':\n if use_syntetic_data:\n data_syntetic = pd.read_csv('~/Github/Data/Patient/NIRADS/PET_CT_NIRADS_syntetic.csv')\n train_text = np.concatenate((train_text,data_syntetic['syntetic_data'].values))\n y_train = np.concatenate((y_train,data_syntetic['syntetic_label'].values-1))\n\n train_text, test_text, y_train, y_test = train_test_split(train_text, y_train, test_size=0.5, random_state=1)\n train_text = np.concatenate((train_text,test_text))\n y_train = np.concatenate((y_train,y_test))\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels after inserting syntetic data: \", y_dist, \"\\n\\n\")\n\n if not undersample_majority:\n data_to_use = train_text.copy()\n y_to_use = y_train.copy()\n else:\n max_label1 = 1000\n data_to_use = []\n y_to_use = []\n y1=0\n for x in range(len(y_train)):\n if y_train[x] !=1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n else:\n if y1 <max_label1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n y1+=1\n\n else:\n data_to_use = test_text.copy()\n y_to_use = y_test.copy()\n\n basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n examples = []\n\n for i, tokens in tqdm(enumerate(data_to_use)):\n label = y_to_use[i]\n examples.append(\n ClassificationExample(\n id=i,\n tokens_a=basic_tokenizer.tokenize(tokens),\n tokens_b=None,\n label=label,\n )\n )\n logging.info('Number of `%s` examples: %d', step, len(examples))\n \n return examples",
"def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)",
"def load_data(train_data,limit=0, split=0.8):\n # Partition off part of the train data for evaluation\n #random.shuffle(train_data)\n train_data = train_data[-limit:]\n Y,X = train_data[\"type\"], train_data[\"posts\"]\n y = []\n for y_ in Y:\n if y_[0] == 'I' : INTROVERTED = True\n else: INTROVERTED = False\n if y_[1] == 'N' : INTUTIVE= True\n else: INTUTIVE= False\n if y_[2] == 'T' : THINKING= True\n else: THINKING= False\n if y_[3] == 'J' : JUDGEMENTAL= True\n else: JUDGEMENTAL= False\n y.append({'INTROVERTED':INTROVERTED,\"INTUTIVE\":INTUTIVE,\"THINKING\":THINKING,\"JUDGEMENTAL\":JUDGEMENTAL})\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=42)\n return (X_train, y_train), (X_test, y_test)",
"def load_dataset(name, cnn, load_train=True, fold=0):\n loc = paths.dataset_dir[name]\n\n splits = []\n if load_train:\n splits = ['train', 'dev']\n else:\n splits = ['dev', 'test']\n\n\n dataset = {}\n\n for split in splits:\n dataset[split] = {}\n caps = []\n splitName = 'val' if (name == 'coco' or name == 'flickr30k') and split == 'dev' else split\n with open('%s/%s.txt' % (loc, splitName), 'rb') as f:\n for line in f:\n caps.append(line.strip())\n dataset[split]['caps'] = caps\n\n dataset[split]['ims'] = numpy.load('%s/images/%s/%s.npy' % (loc, cnn, splitName))\n dataset[split]['cap_tps'] = numpy.load('%s/topics/tmp/doc-topic_%s_line_t100.npy' % (loc, splitName))\n #dataset[split]['im_tps'] = numpy.load('%s/topics/t100/doc-topic_%s_t100.npy' % (loc, splitName))\n dataset[split]['im_tps'] = numpy.load('%s/topics/tmp/im_pred_%s.npy' % (loc, splitName))\n \n # norm topic vectors\n dataset[split]['cap_tps'] = (dataset[split]['cap_tps'].T / (dataset[split]['cap_tps'].max(axis=1) + 1e-30)).T\n dataset[split]['im_tps'] = (dataset[split]['im_tps'].T / (dataset[split]['im_tps'].max(axis=1) + 1e-30)).T\n # handle coco specially by only taking 1k or 5k captions/images\n if split in ['dev', 'test'] and fold >= 0:\n dataset[split]['ims'] = dataset[split]['ims'][fold*1000:(fold+1)*1000]\n dataset[split]['im_tps'] = dataset[split]['im_tps'][fold*1000:(fold+1)*1000]\n dataset[split]['caps'] = dataset[split]['caps'][fold*5000:(fold+1)*5000]\n dataset[split]['cap_tps'] = dataset[split]['cap_tps'][fold*5000:(fold+1)*5000]\n\n return dataset",
"def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))",
"def load_data_and_labels_without_shuffled():\n # Load data from files\n with codecs.open('./data/train_pos.txt', 'r+', 'utf-8') as f:\n train_pos = f.readlines()\n with codecs.open('./data/dev_pos.txt', 'r+', 'utf-8') as f:\n dev_pos = f.readlines()\n with codecs.open('./data/train_neg.txt', 'r+', 'utf-8') as f:\n train_neg = f.readlines()\n with codecs.open('./data/dev_neg.txt', 'r+', 'utf-8') as f:\n dev_neg = f.readlines()\n\n positive_examples1 = []\n positive_examples2 = []\n negative_examples1 = []\n negative_examples2 = []\n\n for i in train_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_examples1.append(item1)\n positive_examples2.append(item2)\n\n for i in train_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_examples1.append(item1)\n negative_examples2.append(item2)\n\n # Split by words\n x_text_train1 = positive_examples1 + negative_examples1\n x_text_train2 = positive_examples2 + negative_examples2\n\n positive_dev1 = []\n positive_dev2 = []\n negative_dev1 = []\n negative_dev2 = []\n\n for i in dev_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_dev1.append(item1)\n positive_dev2.append(item2)\n\n for i in dev_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_dev1.append(item1)\n negative_dev2.append(item2)\n\n x_text_dev1 = positive_dev1 + negative_dev1\n x_text_dev2 = positive_dev2 + negative_dev2\n\n # Generate labels\n train_positive_labels = [[0, 1] for _ in train_pos]\n dev_positive_labels = [[0, 1] for _ in dev_pos]\n train_negative_labels = [[1, 0] for _ in train_neg]\n dev_negative_labels = [[1, 0] for _ in dev_neg]\n y_train = np.concatenate([train_positive_labels, train_negative_labels], 0)\n y_dev = np.concatenate([dev_positive_labels, dev_negative_labels], 0)\n\n return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev]",
"def data_split(dataset, val_ratio=0.1, test_ratio=0.1, seed=1234):\n\n\t# How you grab the labels will depend on what type of Pytorch Dataset object 'dataset' is\n\t# (i.e. ImageFolder/DatasetFolder or not)\n\n\t# For fun, check the method resolution order (MRO) of 'dataset'\n\tprint('Dataset object\\'s inheritance: ', type(dataset).__mro__)\n\n\t# Determine what kind of Dataset object it is, then grab labels\n\t# Warning: currently this will break for anything other than an ImageFolder or CIFAR10 train set\n\tif isinstance(dataset, datasets.CIFAR10):\n\t\tlabels = dataset.train_labels\n\telif isinstance(dataset, datasets.ImageFolder):\n\t\tlabels = [img[1] for img in dataset.imgs]\n\telse:\n\t\terror('Dataset not supported yet')\n\n\t# Calculate class priors, (number in class)/(size of dataset)\n\tidcs = [i for i in range(len(dataset))]\n\tsamples_per_class = np.bincount(np.array(labels))\n\tpriors = samples_per_class/len(labels)\n\n\t# Number of samples in each class for val and test set \n\tval_per_class = np.ceil(samples_per_class*val_ratio).astype(np.int)\n\ttest_per_class = np.ceil(samples_per_class*test_ratio).astype(np.int)\n\n\t# Copy and shuffle the labels and corresponding indices to randomize before splitting\n\tshuffled_labels = list(labels)\n\tshuffled_idcs = list(idcs)\n\trandom.Random(seed).shuffle(shuffled_labels)\n\trandom.Random(seed).shuffle(shuffled_idcs)\n\n\t# Iterate through, grabbing indices for each class to place in validation set\n\t# until the desired number is reached\n\tval_idcs = []\n\tval_counts = np.zeros(val_per_class.shape)\n\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if validation set quota has been reached yet for this class\n\t\tif val_counts[l] < val_per_class[l]:\n\t\t\tval_idcs.append(i)\n\t\t\tval_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (val_counts == val_per_class).all():\n\t\t\tbreak\n\n\t# Repeat for test set\n\ttest_idcs = []\n\ttest_counts = np.zeros(test_per_class.shape)\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if this index is already in val set\n\t\tif i in val_idcs:\n\t\t\tcontinue\n\n\t\t# Check if test set quota has been reached yet for this class\n\t\tif test_counts[l] < test_per_class[l]:\n\t\t\ttest_idcs.append(i)\n\t\t\ttest_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (test_counts == test_per_class).all():\n\t\t\tbreak\n\n\t# Get train indices too (all the remaining samples not in val or test)\n\ttrain_idcs = [j for j in idcs if j not in val_idcs+test_idcs]\n\n\t# Split the data\n\ttrain = Subset(dataset, train_idcs)\n\tval = Subset(dataset, val_idcs)\n\ttest = Subset(dataset, test_idcs)\n\n\treturn train, val, test",
"def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)"
]
| [
"0.69632035",
"0.69308716",
"0.6922211",
"0.67450684",
"0.6697376",
"0.66871846",
"0.66731495",
"0.6519348",
"0.6479878",
"0.6477659",
"0.64760816",
"0.64579505",
"0.64511955",
"0.64421725",
"0.64055145",
"0.640277",
"0.6390375",
"0.6376216",
"0.6356931",
"0.6300504",
"0.62793916",
"0.62704855",
"0.6263439",
"0.625719",
"0.62502724",
"0.62486935",
"0.6242171",
"0.62416637",
"0.6241515",
"0.6235838"
]
| 0.7111278 | 0 |
Apply TFIDF transformation to test data. | def tfidf_transform(vectorizer_train, newsgroups_test):
vectors_test = vectorizer_train.transform(newsgroups_test)
dense_test = vectors_test.todense()
denselist_test = np.array(dense_test).transpose()
X_test = denselist_test.copy()
return X_test | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __tf_idf_feature_extraction(self):\n print('=' * 80)\n print(\"TF-IDF Feature Extraction\")\n t0 = time()\n vectorizer = TfidfVectorizer()\n vec_train = vectorizer.fit_transform(self.train.text)\n vec_test = vectorizer.transform(self.test.text)\n duration = time() - t0\n print(\"DONE!!!!! total time: %fs\" % duration)\n print('=' * 80)\n return vec_train, vec_test",
"def get_tfidf_vectors(self):\n\n train_text = self.get_training_data()\n test_text = self.get_testing_data()\n\n print 'Initilizing tf vectorizer ...'\n vectorizer = TfidfVectorizer(sublinear_tf=True)\n vectorizer.fit( train_text + test_text )\n\n print 'Transforming data to tfidf vector ...'\n train_vec = vectorizer.transform(train_text)\n #print len(vectorizer.get_feature_names())\n test_vec = vectorizer.transform(test_text)\n\n return train_vec, test_vec",
"def transform(train_data, test_data, working_dir):\n\n options = PipelineOptions()\n options.view_as(StandardOptions).runner = 'DirectRunner'\n with beam.Pipeline(options=options) as pipeline:\n _ = (pipeline | 'ReadTrainData' >> beam.Create(train_data) |\n 'EncodeTrainData' >> beam.Map(lambda data: to_example(data)) |\n 'WriteTrainData' >> beam.io.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE)))\n\n _ = (pipeline | 'ReadTestData' >> beam.Create(test_data) |\n 'EncodeTestData' >> beam.Map(lambda data: to_example(data)) |\n 'WriteTestData' >> beam.io.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))",
"def get_idf_features(train_data, test_data):\n tfidf = TfidfVectorizer(tokenizer = tokenize, ngram_range = (1, 2))\n tfidf.fit(train_data)\n return tfidf.transform(train_data), tfidf.transform(test_data)",
"def tfIdf(texts):\n vect = TfidfVectorizer(min_df=1)\n tfidf = vect.fit_transform([tt.lower() for tt in texts])\n aa=(tfidf * tfidf.T).A\n return aa",
"def TransformData(text):\n global COUNT_VECTORIZER\n if COUNT_VECTORIZER is None:\n COUNT_VECTORIZER = CountVectorizer(analyzer = 'word', lowercase = True)\n COUNT_VECTORIZER.fit(text)\n features = COUNT_VECTORIZER.transform(text)\n features_nd = features.toarray() # for easy usage\n global TFIDF\n if TFIDF is None:\n TFIDF = TfidfTransformer(use_idf=False)\n TFIDF.fit(features_nd)\n text_tfidf = TFIDF.transform(features_nd)\n return text_tfidf",
"def rf_tf_idf_train(df, dominio):\n df_final = obtain_train_corpus()\n # Puedo separarlo en distintos df segun el dominio\n df_domain_total = [{category:df_domain} for category, df_domain in df_final.groupby('category')]\n \n if dominio == \"entidad\":\n # Tambien puedo separar a nivel de dominio y entity\n df_domain_total_entity = {}\n for df in df_domain_total:\n category = list(df.keys())[0]\n df = list(df.values())[0]\n df_entities = [{entity:df_entity} for entity, df_entity in df.groupby('entity_name')]\n df_domain_total_entity.update({category:df_entities})\n \n vocabulario = corpus_generation(df_domain_total_entity, \"entidad\")\n entidades = list(vocabulario.keys())\n categorias = list(df_domain_total_entity.keys())\n \n i = 1\n total = len(entidades)\n for categoria in categorias:\n for df in df_domain_total_entity[categoria]:\n \n print(\"Entrendando modelo \" + str(i) + \"/\" + str(total))\n \n entidad = list(df.keys())[0]\n df = list(df.values())[0]\n df = df.reset_index()\n \n X = list(df['text'])\n print(\"Entidad: \", entidad)\n\n words, words_tot, median, df_pattern, df_suma = word_tf_idf(X)\n df_classificacion = df_suma.join(df, how=\"outer\") # Join por los index\n \n X_tf_idf = list(df_classificacion['tf-idf'])\n y_tf_idf = list(df_classificacion['topic'])\n \n # Encoding a numerico\n labelencoder_X = LabelEncoder()\n y_tf_idf=labelencoder_X.fit_transform(y_tf_idf) # Codifico en valores numericos las clases que hay\n\n \n # Train/validation split\n X_train, X_val, y_train, y_val = train_test_split(X_tf_idf, y_tf_idf, \n test_size = 0.1, random_state = 0)\n \n \n # Menor distancia cuadratica de TF\n y_pred = []\n for x_ref in X_val:\n ref = 999\n i = 0\n for x in X_train:\n \n diff = (x_ref - x)**2\n diff = np.sqrt(diff)\n print(diff)\n \n if diff < ref:\n i = X_train.index(x)\n ref = diff\n\n y_pred.append(y_train[i]) # Identifico con la clase de menor distancia cuadratica TF-IDF\n \n \n # Making the Confusion Matrix\n cm = confusion_matrix(y_val, y_pred)\n \n # Accuracy\n accuracy = accuracy_score(y_val, y_pred)\n \n # Precision\n average_precision = precision_score(y_val, y_pred, average = \"macro\")\n \n # Recall\n recall = recall_score(y_val, y_pred, average='macro')\n \n print(\"Modelo \"+str(i)+\" resultados\")\n print(\"accuracy \", accuracy, \" precision \", average_precision, \" recall \", recall) # Se ve que los resultados son muy malos",
"def calculate_tfidf(self):\n\n # Add extra columns to clean_dataset from orignial's one\n self.clean_dataframe['overview'], self.clean_dataframe['id'] = self.original_dataframe['overview'], self.original_dataframe['id']\n\n # Define a TF-IDF Vectorizer Object\n tfidf = TfidfVectorizer(stop_words='english') # Remove all english stopwords\n\n # Replace NaN with an empty string\n self.clean_dataframe['overview'] = self.clean_dataframe['overview'].fillna('')\n\n # Construct the required TF-IDF matrix by applying the fit_transform method on the overview feature\n tfidf_matrix = tfidf.fit_transform(self.clean_dataframe['overview'].head(10000)) # 10000 because of memory error\n\n return tfidf_matrix",
"def calc_tf_idf(idf, tf):\r\n tfidf = {}\r\n for key, val in tf.items():\r\n tfidf[key] = val * idf[key]\r\n return tfidf",
"def input_fn():\n dataset = tf.contrib.data.make_batched_features_dataset(\n file_pattern=os.path.join(TFRECORD_DIR, '*'),\n batch_size=BATCH_SIZE,\n features=transformed_feature_spec,\n reader=tf.data.TFRecordDataset,\n shuffle=True)\n transformed_features = dataset.make_one_shot_iterator().get_next()\n # Extract features and labels from the transformed tensors.\n label_cols = set(['TotalVolume', 'Density', 'Temperature', 'Humidity', 'Energy', 'Problems'])\n transformed_labels = {key: value for (key, value) in transformed_features.items() if key in label_cols}\n transformed_features = {key: value for (key, value) in transformed_features.items() if key not in label_cols}\n return transformed_features, transformed_labels",
"def generate_and_save_test_features(test_input, test_output, bag_of_words, tfidf):\n df_test = get_df(test_input)\n test_words = np.array(df_test.text.str.lower().values)\n\n test_words_binary_matrix = bag_of_words.transform(test_words)\n test_words_tfidf_matrix = tfidf.transform(test_words_binary_matrix)\n feature_names = bag_of_words.get_feature_names_out()\n\n save_matrix(df_test, test_words_tfidf_matrix, feature_names, test_output)",
"def build_tfidf_vectors_on_doc_level(train_set: List[Dict], test_set: List[Dict]) -> Tuple[Dict, Dict,\n TfidfVectorizer]:\n train_document_ids = list(set([d['entity_id'] for d in train_set]))\n train_documents = []\n for doc_id in train_document_ids:\n train_documents.append(\" \".join([d['content'] for d in train_set if d['entity_id'] == doc_id]))\n\n test_document_ids = list(set([d['entity_id'] for d in test_set]))\n test_documents = []\n for doc_id in test_document_ids:\n test_documents.append(\" \".join([d['content'] for d in test_set if d['entity_id'] == doc_id]))\n\n # print(\"Some example documents:\")\n # for i in train_documents[:2]:\n # print(i)\n\n print(\"Training vectorizer on {} documents\".format(len(train_documents)))\n vectorizer = TfidfVectorizer(max_df=0.95, min_df=0.01, max_features=200, stop_words='english')\n train_documents_tfidf = vectorizer.fit_transform(train_documents)\n test_documents_tfidf = vectorizer.transform(test_documents)\n print(\" Generated TF/IDF with {} columns\".format(train_documents_tfidf.shape[1]))\n\n train_tfidf = {}\n for i in range(len(train_document_ids)):\n train_tfidf[train_document_ids[i]] = train_documents_tfidf[i]\n\n test_tfidf = {}\n for i in range(len(test_document_ids)):\n test_tfidf[test_document_ids[i]] = test_documents_tfidf[i]\n\n return train_tfidf, test_tfidf, vectorizer",
"def compute_tfidf_weights(train_set, test_set, vocab_size):\n\n transformer = TfidfTransformer()\n\n # fit idf vector on train set\n counts = np.zeros((len(train_set), vocab_size), dtype=np.int64)\n for i, row in enumerate(train_set):\n counts_sample = torch.bincount(row['text'])\n counts[i, :len(counts_sample)] = counts_sample.cpu().data.numpy()\n tfidf = transformer.fit_transform(counts)\n\n for i, row in enumerate(train_set):\n row['weight'] = torch.tensor(tfidf[i, row['text']].toarray().astype(np.float32).flatten())\n\n # compute tf-idf weights for test set (using idf vector from train set)\n counts = np.zeros((len(test_set), vocab_size), dtype=np.int64)\n for i, row in enumerate(test_set):\n counts_sample = torch.bincount(row['text'])\n counts[i, :len(counts_sample)] = counts_sample.cpu().data.numpy()\n tfidf = transformer.transform(counts)\n\n for i, row in enumerate(test_set):\n row['weight'] = torch.tensor(tfidf[i, row['text']].toarray().astype(np.float32).flatten())",
"def useTfidfVectorizer(self, data):\n if self.results:\n print()\n print(\"Extracting features from the training dataset using a sparse vectorizer\", end=\" - \")\n t0 = time()\n \n vectorizer = TfidfVectorizer(max_features=10000, stop_words='english',norm='l2',use_idf=True, sublinear_tf=False,encoding='utf-8')\n matrix = vectorizer.fit_transform(data)\n \n if self.results:\n print(\"done in %0.3fs\" % (time() - t0))\n print(\"n_samples: %0.3d, n_features: %d\" % matrix.shape)\n print()\n \n feature_names = vectorizer.get_feature_names()\n return matrix, feature_names",
"def tf_idf(data_frame, description):\n text = list(data_frame['article'])\n vectorizer = TfidfVectorizer(stop_words='english') # create the transform\n vectorizer.fit(text) # tokenize and build vocab\n # save tf_idf vectorizer as pickle\n with open('resources/tf-idf_encoder_' + description + '.pkl', 'wb') as f:\n pickle.dump(vectorizer.vocabulary_, f)\n f.close()\n data_frame['tf-idf'] = data_frame['article'].apply(lambda x: vectorizer.transform([x]))\n return data_frame",
"def vectorize_data(self, data, idf=False):\r\n\r\n # collect only the cleaned text of the tweet\r\n text = []\r\n for tweet in data:\r\n if not tweet.get_processed_text():\r\n tweet.set_processed_text(self.clean_tweet(tweet))\r\n text.append(tweet.get_processed_text())\r\n\r\n # vectorize tweets\r\n\r\n if idf:\r\n vectorizer = TfidfVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n else:\r\n vectorizer = CountVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n\r\n # vectorizer = TFVectorizing()\r\n vectors = vectorizer.fit_transform(text)\r\n return vectors",
"def test_transform_inverse_transform(example_tsds: TSDataset) -> None:\n trend_transform = TrendTransform(in_column=\"target\", detrend_model=LinearRegression(), model=\"rbf\")\n example_tsds.fit_transform([trend_transform])\n original = example_tsds.df.copy()\n example_tsds.inverse_transform()\n assert (example_tsds.df == original).all().all()",
"def test_tfidf_scorer(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor(tokenizer=tokenizer)\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates)\n self.assertGreater(scores.get('erdogan'), scores.get('damascus'))\n self.assertEqual(scores.get('everywhere'), scores.get('disconnected')) # they appear the same number of times\n self.assertGreater(scores.get('erdogan'), scores.get('threats')) # 'threats' and 'erdogan' appear with the same frequency, but 'threats' has a higher DF",
"def transform(self, test: LAMLDataset) -> LAMLDataset:\n return self._pipeline.transform(test)",
"def tfidf_transform(vector):\n\n tfidf_transformation = TfidfTransformer().fit_transform(X=vector).toarray()\n tfidf_vector = pandas.DataFrame(tfidf_transformation, index=indexes.keys())\n return tfidf_vector",
"def testDFT(DFTfn, IDFTfn, TOL=10**-6, LEN=2**7):\n \n N_TESTS = 100\n \n seqs = [randomSeq(LEN, -1000, 1000) for _ in range(N_TESTS)]\n passed = 0\n failed = 0\n failed_seqs = []\n for x in seqs:\n case_failed = False\n \n y_1 = IDFTfn(DFTfn(x))\n y_2 = DFTfn(IDFTfn(x))\n for a, b, c in zip(x, y_1, y_2):\n if (\n abs(a.re - b.re) > TOL or abs(a.im - b.im) > TOL or\n abs(a.re - c.re) > TOL or abs(a.im - c.im) > TOL\n ):\n case_failed = True\n break\n \n if case_failed:\n failed += 1\n failed_seqs.append(x)\n else:\n passed += 1\n \n print('----')\n print('testDFT results:')\n print('Passed {} out of {} cases.'.format(passed, passed+failed))\n return failed_seqs",
"def tfidf(self):\n\t\ttry:\n\t\t\tself.tfidf_df = tfidf(self.bagofwords)\n\t\texcept AttributeError:\n\t\t\tself.gen_bag_of_words_df()\n\t\t\tself.tfidf_df = tfidf(self.bagofwords)",
"def tfidf_train(newsgroups_train, n_features):\n # Extract Tfidf weights\n stop_words_list = nltk.corpus.stopwords.words('english')\n vectorizer_train = TfidfVectorizer(max_features=n_features,\n min_df=5, max_df=0.70,\n token_pattern = '[a-zA-Z]+',\n stop_words = stop_words_list)\n vectors_train = vectorizer_train.fit_transform(newsgroups_train)\n feature_names_train = vectorizer_train.get_feature_names() #features list\n dense_train = vectors_train.todense()\n\n denselist_train = np.array(dense_train).transpose() # tfidf matrix\n X_train = denselist_train.copy() # train data (tfidf)\n\n return vectorizer_train, feature_names_train, X_train",
"def transform_data(dataset_train, dataset_test):\n\n #transform dataset using fit_transform\n dataset_train = scaler.fit_transform(dataset_train)\n\n #transform dataset using transform (does not influence teaching)\n dataset_test = scaler.transform(dataset_test)\n\n return dataset_train, dataset_test",
"def test_example(self, example_dataset, expected_result):\n\n transformer = PreprocessFeatures()\n result = transformer.fit_transform(example_dataset)\n\n assert (result == expected_result).all()",
"def testVectorize(self):\n \n doc = self.train.tfidf\n\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"novel\"]], 0)\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"novel\"]], 0)\n self.assertEqual(doc[2][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[2][self.vectorizer.vocabulary_[\"novel\"]], 0)\n\n self.assertEqual(doc[3][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[3][self.vectorizer.vocabulary_[\"novel\"]], 1)\n self.assertEqual(doc[4][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[4][self.vectorizer.vocabulary_[\"novel\"]], 1)\n self.assertEqual(doc[5][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[5][self.vectorizer.vocabulary_[\"novel\"]], 1)\n\n doc = self.test.tfidf\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"novel\"]], 0)\n\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"novel\"]], 1)",
"def tfidf(corpus):\n vectorizer = CountVectorizer(stop_words='english', decode_error='ignore')\n x = vectorizer.fit_transform(corpus)\n a = x.toarray()\n name = vectorizer.get_feature_names()\n transformer = TfidfTransformer()\n tfidf = transformer.fit_transform(a)\n return name, tfidf.toarray()",
"def computeTFIDF(self):\n for word in self.dictionary:\n numOfAppearance = self.dictionary[word].getDocumentFrequency()\n idf = math.log( (self.MAX_RATING) / (numOfAppearance), 10 )\n self.dictionary[word].setTFIDF(idf)",
"def _applytfms(args):\n import nibabel as nb\n from nipype.utils.filemanip import fname_presuffix\n from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms\n\n in_file, in_xform, ifargs, index, newpath = args\n out_file = fname_presuffix(\n in_file, suffix=\"_xform-%05d\" % index, newpath=newpath, use_ext=True\n )\n\n copy_dtype = ifargs.pop(\"copy_dtype\", False)\n xfm = ApplyTransforms(\n input_image=in_file, transforms=in_xform, output_image=out_file, **ifargs\n )\n xfm.terminal_output = \"allatonce\"\n xfm.resource_monitor = False\n runtime = xfm.run().runtime\n\n if copy_dtype:\n nii = nb.load(out_file, mmap=False)\n in_dtype = nb.load(in_file).get_data_dtype()\n\n # Overwrite only iff dtypes don't match\n if in_dtype != nii.get_data_dtype():\n nii.set_data_dtype(in_dtype)\n nii.to_filename(out_file)\n\n return (out_file, runtime.cmdline)",
"def test_NeurosynthDecoder_featuregroup(testdata_laird):\n ids = testdata_laird.ids[:5]\n decoder = discrete.NeurosynthDecoder(feature_group=\"Neurosynth_TFIDF\")\n decoder.fit(testdata_laird)\n decoded_df = decoder.transform(ids=ids)\n assert isinstance(decoded_df, pd.DataFrame)"
]
| [
"0.63195014",
"0.61641866",
"0.6114476",
"0.60683584",
"0.60582715",
"0.6029245",
"0.5998361",
"0.59757125",
"0.59153837",
"0.5848679",
"0.5802245",
"0.5792374",
"0.57360756",
"0.57278067",
"0.5684948",
"0.5674857",
"0.56532204",
"0.56437564",
"0.564253",
"0.5634832",
"0.56133306",
"0.5612664",
"0.5584491",
"0.5578245",
"0.5529726",
"0.5524632",
"0.54971933",
"0.54846877",
"0.5477916",
"0.5461684"
]
| 0.6933384 | 0 |
Shuffle data X, labels y, subcategories z. | def shuffle_data(X,y,z):
data = np.row_stack((X, y, z))
np.random.shuffle(data.T)
X = data[:-2,:]
y = data[-2,:]
z = data[-1,:]
return X, y, z | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shuffle_data(self):\n images = list(self.train_images)\n labels = list(self.train_labels)\n self.train_images = []\n self.train_labels = []\n\n # create list of permutated index and shuffle data accoding to list\n idx = np.random.permutation(len(labels))\n for i in idx:\n self.train_images.append(images[i])\n self.train_labels.append(labels[i])",
"def shuffle_dataset(data, label, others=None, class_balanced=False):\n if class_balanced:\n sorted_ids = []\n\n for i in range(label.max() + 1):\n tmp_ids = np.where(label == i)[0]\n np.random.shuffle(tmp_ids)\n sorted_ids.append(tmp_ids)\n\n sorted_ids = np.stack(sorted_ids, 0)\n sorted_ids = np.transpose(sorted_ids, axes=[1, 0])\n ids = np.reshape(sorted_ids, (-1,))\n\n else:\n ids = np.arange(data.shape[0])\n np.random.shuffle(ids)\n\n if others is None:\n return data[ids], label[ids]\n else:\n return data[ids], label[ids], others[ids]",
"def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx",
"def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx], labels[idx]",
"def shuffle_labels(self):\n random.shuffle(self.y_train)\n random.shuffle(self.y_test)",
"def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx,...], idx",
"def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx",
"def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx",
"def shuffle_data(data, labels):\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n return data[idx, ...], labels[idx], idx",
"def shuffle_data(data, labels):\r\n idx = np.arange(len(labels))\r\n np.random.shuffle(idx)\r\n return data[idx, ...], labels[idx], idx",
"def shuffle_dataset(self, seed=None):\n stacked = np.r_[self.X_train,\n self.y_train]\n shuffle(stacked.T)\n X_shuffled = stacked[:self.nfeatures,:]\n y_shuffled = stacked[self.nfeatures:,:]\n return X_shuffled, y_shuffled",
"def shuffle(self):\n self.x['train'], self.y['train'] = self._shuffle(\n self.x['train'],\n self.y['train']\n )",
"def inlabel_shuffle(data):\n num_zero_data = np.sum(data[:,-1]==0)\n label_zero_data = data[:num_zero_data,:]\n label_one_data = data[num_zero_data:,:]\n np.random.shuffle(label_zero_data)\n np.random.shuffle(label_one_data)\n return data",
"def shuffle(inputData):\n\n x = inputData[0]\n y = inputData[1]\n m = inputData[2]\n \n N = np.arange(0,x.shape[0])\n np.random.shuffle(N)\n \n x = x[N,:]\n y = y[N]\n m = m[N,:]\n \n return [x,y,m]",
"def random_perm3(data_x, data_y, data_z):\n data_size = data_y.shape[0]\n rand_perm = np.arange(data_size)\n np.random.shuffle(rand_perm)\n random_data_x = []\n random_data_z = []\n for indices in rand_perm: # 'List' object, more complicated !!!\n random_data_x.append(data_x[indices])\n random_data_z.append(data_z[indices])\n # random_data_x = data_x[rand_perm]\n random_data_y = data_y[rand_perm]\n # random_data_z = data_z[rand_perm]\n return random_data_x, random_data_y, random_data_z",
"def shuffle_T(self):\n np.random.shuffle(self.T)",
"def shuffleData(self, x, y):\n #get new random order for inputs and targets\n order = np.arange( x.shape[0] )\n random.shuffle( order )\n #reorder inputs and targets\n return x[order], y[order]",
"def shuffle_data(y, data):\n combined = np.c_[data.reshape(len(data), -1), y.reshape(len(y), -1)]\n np.random.shuffle(combined)\n shuffled_data = combined[:, :data.size // len(data)].reshape(data.shape)\n shuffled_y = combined[:, data.size // len(data):].reshape(y.shape)\n return [shuffled_y, shuffled_data]",
"def _shuffle(self, reinit_indexes = False):\n print('Shuffling data...')\n # set seed for reproducibility\n #random.seed()\n # shuffle identities\n random.shuffle(self.identities)\n # shuffle images associated to each identity\n for identity in self.groundtruth_metadata.keys():\n random.shuffle(self.groundtruth_metadata[identity]['metadata'])\n if reinit_indexes:\n self.groundtruth_metadata[identity]['index'] = 0\n print('Finished shuffling data!')",
"def shuffle(data):\n\n shuffle = np.arange(data.shape[0])\n np.random.shuffle(shuffle)\n undo_shuffle = np.argsort(shuffle)\n\n return (data[shuffle], undo_shuffle)",
"def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])",
"def shuffle_data(data):\n idx = np.arange(len(data))\n np.random.shuffle(idx)\n return data[idx, ...]",
"def shuffle_dataset(instances, labels, seed):\n data = list(zip(instances, labels))\n\n if isinstance(seed, int):\n random.Random(seed).shuffle(data)\n else:\n random.Random().shuffle(data)\n\n instances, labels = zip(*data)\n\n return instances, labels",
"def shuffle(self):\n self.train_edges = np.random.permutation(self.train_edges)\n self.nodes = np.random.permutation(self.nodes)\n self.batch_num = 0",
"def shuffle_datasets(self):\n assert self.data_tags is not None\n assert self.training_dataset is not None\n assert self.validation_dataset is not None\n self.training_dataset = self.shuffle_data_dictionary(self.training_dataset)\n self.validation_dataset = self.shuffle_data_dictionary(self.validation_dataset)",
"def shuffle_data(data):\n indices = list(range(data.shape[0]))\n np.random.shuffle(indices)\n return data[indices]",
"def shuffle(self, random_state=None): \n if random_state is None:\n random_state = self.random_state\n perm_ids = random_state.permutation(self.n_examples)\n self.u = self.u[perm_ids]\n self.v = self.v[perm_ids]\n self.rating = self.rating[perm_ids]",
"def shuffle_points(batch_data):\n idx = np.arange(batch_data.shape[1])\n np.random.shuffle(idx)\n return batch_data[:,idx,:]",
"def shuffle_points(batch_data):\n idx = np.arange(batch_data.shape[1])\n np.random.shuffle(idx)\n return batch_data[:, idx, :]",
"def fetch_data_groups(data_home=None, subset='train', categories=None,\r\n shuffle=False, random_state=42,\r\n remove=(),\r\n process_if_missing=True):\r\n\r\n data_home = get_data_home(data_home=data_home)\r\n cache_path = _pkl_filepath(data_home, CACHE_NAME)\r\n #twenty_home = os.path.join(data_home, \"20news_home\")\r\n twenty_home = data_home\r\n cache = None\r\n if os.path.exists(cache_path):\r\n try:\r\n with open(cache_path, 'rb') as f:\r\n compressed_content = f.read()\r\n uncompressed_content = codecs.decode(\r\n compressed_content, 'zlib_codec')\r\n cache = pickle.loads(uncompressed_content)\r\n except Exception as e:\r\n print(80 * '_')\r\n print('Cache loading failed')\r\n print(80 * '_')\r\n print(e)\r\n\r\n if cache is None:\r\n if process_if_missing:\r\n logger.info(\"Processing data. \"\r\n \"This may take a few minutes.\")\r\n cache = convert_to_pkz(target_dir=twenty_home,\r\n cache_path=cache_path)\r\n else:\r\n raise IOError('dataset not found')\r\n\r\n data = cache['data']\r\n\r\n data['description'] = 'given dataset'\r\n\r\n\r\n if shuffle:\r\n random_state = check_random_state(random_state)\r\n indices = np.arange(data.target.shape[0])\r\n random_state.shuffle(indices)\r\n data.filenames = data.filenames[indices]\r\n data.target = data.target[indices]\r\n # Use an object array to shuffle: avoids memory copy\r\n data_lst = np.array(data.data, dtype=object)\r\n data_lst = data_lst[indices]\r\n data.data = data_lst.tolist()\r\n\r\n return data"
]
| [
"0.67308176",
"0.6415329",
"0.63351786",
"0.6297167",
"0.6285768",
"0.6260271",
"0.6247948",
"0.6247948",
"0.6247948",
"0.62380946",
"0.6138317",
"0.60020167",
"0.5964029",
"0.59294426",
"0.5869422",
"0.58593374",
"0.58375067",
"0.5821528",
"0.5807298",
"0.5691683",
"0.5679712",
"0.56330997",
"0.55645674",
"0.5548904",
"0.55407155",
"0.5539009",
"0.5495606",
"0.54594904",
"0.54494655",
"0.5443916"
]
| 0.69409347 | 0 |
Returns the data grouped by Movie | def movies(self):
return self.data.groupby('Parameters') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_movies(self):\n\n movies = {}\n\n for index, movie in self.movies.iterrows():\n\n id_movie = int(movie[0])\n title = movie[1]\n movie_tags = []\n movie_ratings = []\n\n # Get tags for each movie\n tags = self.movies_tags.loc[self.movies_tags[0] == id_movie]\n\n for index_movie, tag in tags.iterrows():\n\n movie_tags.append(tag[1])\n\n # Get ratings for each film\n ratings = self.ratings.loc[self.ratings[1] == id_movie]\n\n # Get user ratings\n for index_ratings, row_rating in ratings.iterrows():\n movie_ratings.append((row_rating[0], float(row_rating[2]))) # (id_user, score)\n\n # Create Movie object\n movie_object = Movie(id_movie=id_movie, title=title, tags=movie_tags, ratings=movie_ratings)\n\n # Append movie_object in dictionary\n movies[id_movie] = movie_object\n\n return movies",
"def movie(response):\n\n response = response.json()\n\n if response.get(\"Error\"):\n raise NotFoundError(response[\"Error\"])\n\n if response[\"Type\"] != \"movie\":\n raise NotFoundError(\"Type is {}, should be movie\".format(response[\"Type\"]))\n\n return [OrderedDict([(\"Title\", response[\"Title\"]),\n (\"ID\", response[\"imdbID\"]),\n (\"Rating\", response[\"imdbRating\"]),\n (\"Year\", response[\"Year\"].split(u\"\\u2013\")[0])])]",
"def get_movies_by_director():\n with Path(MOVIE_DATA).open(encoding=\"utf-8\") as csv_file:\n reader = csv.DictReader(csv_file)\n movies_by_director = defaultdict(list)\n\n for r in reader:\n try:\n director = r[\"director_name\"]\n movie = Movie(\n title=r[\"movie_title\"],\n year=int(r[\"title_year\"]),\n score=float(r[\"imdb_score\"]),\n )\n movies_by_director[director].append(movie)\n except Exception:\n continue\n return movies_by_director",
"def movies(self) -> List[Movie]:\n return super(MoviesManager, self).items",
"def get_items(self):\n self.logger.info('Fetching movie records...')\n session = connect()\n\n cols = [\n Movie.movie_id,\n Movie.primary_title,\n Movie.start_year\n ]\n\n try:\n return session.query(*cols).all()\n finally:\n session.close()",
"def get_results_list(movies):\n resultlist = [\n {\n IMDB.name: movie.name,\n IMDB.imdb_score: movie.imdb_score,\n IMDB.director: movie.director,\n IMDB.popularity: movie.popularity,\n IMDB.genre: movie.genre\n } for movie in movies]\n return resultlist",
"def get_movies(week):\n movies = {}\n for movie in week.find_all('div', class_='venuefilmbloc'):\n movies[movie.a.strong.text] = \"\\n\".join(movie.span.text.split('; '))\n return movies",
"def _get_movies(self):\n\n return self.data_file['movies']",
"def get_movies(self):\n movies = dict()\n\n for endpoint_instance in self.instance_list:\n try:\n endpoint_offer = endpoint_instance.get_movie_list()\n except ServerSideError as e:\n self.logger.error(e.log_error_string + traceback.format_exc())\n continue\n index = endpoint_instance.__class__.__name__\n for movie in endpoint_offer:\n try:\n title = movie['Title']\n movie_id = movie['ID']\n poster = movie['Poster']\n except KeyError:\n self.logger.error(e.log_error_string + traceback.format_exc())\n continue\n\n # This is combination of adaptor class name and movie id. Used to create link to movie\n instance_ref = f'{index}={movie_id}'\n\n if title in movies:\n movies[title]['instance'].append(instance_ref)\n else:\n movies[title] = {\n 'title': title,\n 'instance': [instance_ref],\n 'poster': poster\n }\n # At this stage all the movies from all adaptors are recoreded so convert dictionary to list and return\n return movies.values()",
"def list_movie():\n if not MOVIES:\n print('No stored movies yet')\n\n for movie in MOVIES:\n print(f\"{movie['name']} ({movie['year']}) - Director by '{movie['director']}'\")",
"def query_all_movies():\n result = session.query(Movie).all()\n print(\"total movies: %s\" % len(result))\n for movie in result:\n print(\"movie poster: %s\" % movie.poster)\n print(\"%s trailer:%s genre:%s user_id:%s\" %\n (movie.name, movie.trailer_url, movie.genre, movie.user_id))\n print(\"-------------------------------------------------\")",
"def get_movies_by_director() -> Dict[str, List[Movie]]:\n movie_data = {}\n with open(os.path.abspath(FILE_PATH), newline='', encoding='utf-8') as in_file:\n for row in csv.DictReader(in_file):\n # keys\n name = row.get(\"director_name\")\n title = row.get(\"movie_title\")\n year = row.get(\"title_year\")\n score = row.get(\"imdb_score\")\n\n # convert\n int_year = 0\n if year:\n try:\n int_year = int(year)\n except ValueError:\n int_year = 0\n\n float_score = 0.0\n if score:\n try:\n float_score = float(score)\n except ValueError:\n float_score = 0.0\n\n # filter\n if int_year > MIN_YEAR:\n if name in movie_data:\n movie = Movie(title=title,\n year=int_year,\n score=float_score)\n movie_data[name].append(movie)\n\n else:\n movie_data[name] = []\n movie = Movie(title=title,\n year=int_year,\n score=float_score)\n movie_data[name].append(movie)\n\n return movie_data",
"def get_movies(cinema_name):\n cinema_code = CINEMA_CODES[cinema_name]\n movies = {}\n uncaught = []\n genres = set(json_response(URLS['attributes'])['body']['dropdownConfig']['genres'])\n poster_json = json_response(URLS['posters'])\n # Get the movies names\n for poster in poster_json['body']['posters']:\n # Extract movie title from poster's url\n try:\n movie_name = re.sub('-', ' ', re.search(r'films/([a-z0-9\\-]+)', poster['url']).group(1))\n movie_name = re.sub('(.*)(\\s*(green|purple))', '\\g<1>', movie_name).strip()\n if movie_name in {movie.title for movie in movies.values()}:\n continue\n except (AttributeError, IndexError):\n logger.warning(\"Could not find movie title of this url: {}\".format(poster['url']))\n continue\n try:\n release_year = datetime.strptime(poster['dateStarted'].split('T')[0], \"%Y-%m-%d\").year\n movie_genres = genres.intersection(set(poster['attributes']))\n\n # If no Poster genres found in Planet, it's probably a 'fake' movie\n if not movie_genres:\n raise RuntimeError\n selected_movie = map_poster_to_matching_movie(movie_name, release_year)\n if selected_movie is None:\n raise RuntimeError\n except (IMDbParserError, RuntimeError):\n uncaught.append(movie_name)\n continue\n movies[poster['code']] = Movie(poster['code'],\n selected_movie.get('title'),\n poster['featureTitle'],\n selected_movie.get('rating'),\n selected_movie.get('votes'),\n selected_movie.get('year'),\n movie_genres,\n selected_movie.get('imdbID'),\n poster['url']\n )\n # Add screening dates\n dates = get_dates(cinema_code)\n for day in dates:\n movies_json = json_response(URLS['events'].format(cinema_code, day))\n for event in movies_json['body']['events']:\n if not MACBOOK.intersection(set(event['attributeIds'])):\n try:\n movies[event['filmId']].add_date(event['eventDateTime'])\n except KeyError:\n continue\n logger.warning(f\"Couldn't find result(s) for movie(s): {uncaught}\")\n return movies, uncaught",
"def all_movies(request):\n if request.method == 'GET':\n movies = Movie.objects.order_by('name')\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)",
"def get_movies():\n\n # ouverture du fichier de notre liste de films\n with open(DATA_FILE,\"r\") as f:\n movies_list = json.load(f)\n\n # notre liste des instances\n movies = [Movie(m)for m in movies_list] \n return movies",
"def get_movies():\n tree = get_tree()\n movie_list = [movie.get(\"title\") for movie in tree.getroot().findall(\"movie\")]\n return movie_list",
"def get_movie_info(movie_id):\n info = {}\n\n movie = tmdb.get_movie(movie_id)\n info['title'] = movie['original_title']\n info['genres'] = \", \".join(x['name'] for x in movie['genres'])\n info['plot'] = movie['overview']\n info['year'] = movie['release_date'][:4]\n\n cast = movie['credits']['cast']\n info['actors'] = \", \".join(x['name'] for x in cast[:5])\n\n directors = [x for x in movie['credits']['crew'] if x['department'] == 'Directing']\n info['directors'] = \", \".join(x['name'] for x in directors[:2])\n\n return info",
"def get_movie_data(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM movie_table\")\n rows = cur.fetchall()\n return rows",
"def parse_movie_page(movie_url: str) -> Dict[str, str]:\n movie_page = get_soup_for_page(movie_url)\n\n # title and id\n movie_id = movie_url.split(\"/\")[-2]\n title = movie_page.find(\"div\", class_=\"title_wrapper\").find(\"h1\").get_text(\";\", strip=True).split(\";\")[0]\n\n # director and stars\n credit_summary_elements = movie_page.find_all(\"div\", class_=\"credit_summary_item\")\n director = credit_summary_elements[0].find(\"a\").text if len(credit_summary_elements) > 0 else \"\"\n if len(credit_summary_elements) > 2:\n stars_links = credit_summary_elements[2].find_all(\"a\")\n stars = [str(elem.text) for elem in stars_links[:-1]]\n else:\n stars = []\n movie_data = {\n \"id\": movie_id,\n \"title\": title,\n \"director\": director,\n \"stars\": stars,\n }\n print(movie_data)\n return movie_data",
"def extract_movies(dom):\n\n # extract data per movie\n movies = dom.find_all('div', class_ = 'lister-item mode-advanced')\n\n # list to store scraped data\n movielist = []\n\n for movie in movies:\n\n # append extracted data to this dict\n moviedict = {}\n\n # scrape titles and add to dict\n moviedict['title'] = movie.h3.a.text\n\n # scrape ratings and add to dict\n moviedict['rating'] = float(movie.strong.text)\n\n # scrape year of release and add to dict\n year = movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')\n moviedict['year'] = re.findall('\\d+', year.text.strip('()'))[0]\n\n # scrape actors and add to dict\n actors = movie.find_all(href=re.compile(\"adv_li_st\"))\n actorlist = []\n for actor in actors:\n actorlist.append(actor.text)\n actorstring = ', '.join(actorlist)\n moviedict['actors'] = actorstring\n\n # scrape runtime and add to dict\n moviedict['runtime'] = movie.p.find('span', class_ = 'runtime').text.split(' ')[0]\n movielist.append(moviedict)\n\n\n # ADD YOUR CODE HERE TO EXTRACT THE ABOVE INFORMATION ABOUT THE\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT.\n\n return movielist # REPLACE THIS LINE AS WELL IF APPROPRIATE",
"def get_movie_data(movie_title, year=None, *queries):\n\n movie_info = movie_search.omdb_movie_info(movie_title, year)\n video_id = movie_search.youtube_video_id(movie_title, year, *queries)\n\n # Adding the Youtube data to the dictionary\n data = movie_info\n\n data['youtube_id'] = video_id\n data['youtube_url'] = Youtube_URL.format(video_id)\n\n return data",
"def movie_list(self):\n return self._request_obj(self._urls[\"movie_list\"], key=\"genres\")",
"def select(self):\n if not self.movie_reviews:\n movie1 = {'movie_name': 'movie1', 'year_released': 2018,'genre':'horror', 'rating': 5, 'review': \"Awesome!\", 'reviewer': 'AK'}\n movie2 = {'movie_name': 'movie2', 'year_released': 2012,'genre':'suspense', 'rating': 9, 'review': \"Bad!\", 'reviewer': 'PK'}\n self.movie_reviews.append(movie1)\n self.movie_reviews.append(movie2)\n return self.movie_reviews",
"def __get_movies(title):\n params = {\n 's': title,\n 'type': 'movie'\n }\n\n response = requests.get(API_URL + API_KEY, params=params).json()\n return response",
"def get_movies(genre: str):\n with MongoClient(uri) as client:\n movie_collection = client[DB][MSG_COLLECTION]\n msg_list = movie_collection.find({\"genres\": genre}).limit(100)\n movie_title_list = []\n for msg in msg_list:\n movie_title_list.append(msg[\"title\"])\n return movie_title_list",
"def extract_movies(dom):\n\n movie_csv = []\n for movie in find_div(dom):\n title = find_title(movie)\n rating = find_rating(movie)\n year = find_year(movie)\n actors = find_actors(movie)\n runtime = find_runtime(movie)\n movie_list = append_movie(title, rating, year, actors, runtime)\n movie_csv.append(movie_list)\n return movie_csv # REPLACE THIS LINE AS WELL IF APPROPRIsATE",
"def get_all_movies(self):\n cinemaworld_movies, filmworld_movies = self.get_movies()\n\n if cinemaworld_movies is not None:\n self.get_title_map(cinemaworld_movies, \"cinemaworld\")\n if filmworld_movies is not None:\n self.get_title_map(filmworld_movies, \"filmworld\")\n\n return self.title_map",
"def get_movies_list(self, world):\n api_url = self.api_url_base + '/api/{}/movies'.format(world)\n movies_dict = self.get_dict_from_apis(api_url)\n ret_dict = {world: None}\n if movies_dict is not None:\n ret_dict[world] = movies_dict['Movies']\n return ret_dict",
"def get_movies(jwt):\n\n movies = Movie.query.all()\n\n return jsonify({\n 'success': True,\n 'movies': [movie.format() for movie in movies],\n }), 200",
"def get_data(query, search_type):\n\n def filter_movies_only(entries):\n return [e for e in entries if e['media_type'] == 'movie']\n\n query = query.encode('utf-8')\n tmdb = get_tmdb(lang)\n search = tmdb.Search()\n if search_type == 'movie':\n movies = search.movie(query=query)['results']\n else:\n persons = search.person(query=query)['results']\n # We only select the first found actor/director.\n if persons:\n person_id = persons[0]['id']\n else:\n return []\n person = tmdb.People(person_id)\n person.combined_credits()\n if search_type == 'actor':\n movies = filter_movies_only(person.cast)\n else:\n movies = filter_movies_only(person.crew)\n movies = [m for m in movies if m['job'] == 'Director']\n return movies"
]
| [
"0.7033445",
"0.6535361",
"0.64394385",
"0.6439323",
"0.63908094",
"0.6382928",
"0.630076",
"0.6293877",
"0.6247835",
"0.6170389",
"0.6121725",
"0.61166793",
"0.6083215",
"0.6007548",
"0.59889406",
"0.59845316",
"0.59766597",
"0.5923087",
"0.5894062",
"0.58830535",
"0.5880029",
"0.58630043",
"0.5830581",
"0.581026",
"0.5809517",
"0.5805048",
"0.5779391",
"0.5768838",
"0.5762471",
"0.5761319"
]
| 0.78259444 | 0 |
Backup a ES index from its Doc class, including all index settings, mapping, aliases, and all docs, save to outfile if provided, otherwise, return the backup data as a dictionary. | def backup_es(esdoc_class, outfile=None):
data = esdoc_class._index.get()
idx_name = list(data)[0]
data[idx_name]["docs"] = list(
dict(_id=hit.meta.id, **hit.to_dict()) for hit in esdoc_class.search().scan()
)
if outfile:
with open(outfile, "w") as out_f:
json.dump(data, out_f, indent=2, default=json_serial)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backup_schema(outfile=None):\n return backup_es(Schema, outfile=outfile)",
"def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )",
"def backup_dataset(outfile=None):\n return backup_es(Dataset, outfile=outfile)",
"def save_index(self, index_path: str = \"hnswlib_index.bin\"):\n if index_path:\n if self.index is None:\n self.build_index()\n self.index.save_index(index_path)\n corpus_emb_json_path = index_path + \".json\"\n super().save_index(corpus_emb_json_path)\n logger.info(f\"Saving hnswlib index to: {index_path}, corpus embedding to: {corpus_emb_json_path}\")\n else:\n logger.warning(\"No index path given. Index not saved.\")",
"def backup_schema_class(outfile=None):\n return backup_es(SchemaClass, outfile=outfile)",
"def save(self):\n self.index.saveIndex(c.index_path('hnsw.index'))\n joblib.dump(self.ys, \"%s.ys\" % self.index_file_prefix)",
"def save_index(self, index_path: str = \"annoy_index.bin\"):\n if index_path:\n if self.index is None:\n self.build_index()\n self.index.save(index_path)\n corpus_emb_json_path = index_path + \".json\"\n super().save_index(corpus_emb_json_path)\n logger.info(f\"Saving Annoy index to: {index_path}, corpus embedding to: {corpus_emb_json_path}\")\n else:\n logger.warning(\"No index path given. Index not saved.\")",
"def backup_from_file(api):\n logger = logging.getLogger(\"backup_from_file\")\n if not api:\n logger.error(\"failure to restore from file, no json object passed.\")\n else:\n indices.reset()\n api_schema = api[\"discover_schema\"]\n api_schema_class = api[\"discover_schema_class\"]\n api_dataset = api[\"discover_dataset\"]\n\n for doc in api_schema[\"docs\"]:\n file = Schema(**doc)\n file.meta.id = doc[\"_id\"]\n file.save()\n\n for doc in api_schema_class[\"docs\"]:\n file = SchemaClass(**doc)\n file.save()\n\n for doc in api_dataset[\"docs\"]:\n file = Dataset(**doc)\n file.save()",
"def export_documents(self, index, filename, **kwargs):\n documentsGenerator = self.get_documents(index, **kwargs)\n documents = []\n format=kwargs.get('format','json')\n for doc in documentsGenerator:\n doc_with_id={**doc.to_dict(),'_id':doc.meta.id}\n documents.append(doc_with_id)\n self.__export_documents(documents,filename,exportformat=format)",
"def save(self) -> None:\n try:\n js = json.loads(\n self.reset_index().to_json(orient=\"records\", date_format=\"iso\")\n )\n\n with open(self._fp, \"w\") as f:\n f.writelines(json.dumps(js, indent=4))\n logger.debug(f\"Saved index to {self._fp}\")\n except Exception as e:\n logger.error(f\"Could not update database -- {e}\")",
"def save_index_config(index_dir, data):\n\n with open(os.path.join(index_dir, 'index.json'), \"w+\") as data_file:\n json.dump(data, data_file, indent=4)",
"def save_index(self):\n vsn_objs = [dict(Id = v['id'], Name = v['name']) for v in self.versions]\n self.backend.write_json(dict(\n Versions = vsn_objs,\n Channels = [], # This is unused.\n ApiVersion = 0,\n ), self.index_path())",
"def as_search_document(self, index='_all'):\n raise NotImplementedError(\n \"{} does not implement 'get_search_document'.\".format(self.__class__.__name__)\n )",
"def save(self):\n logging.debug(\"environment save entered\")\n filename = \"index.json\"\n content_dict = {}\n for fpname in self.footprints:\n # for now, just using the patteern ${footprint_name}-metadata for the name \n content_dict[fpname] = fpname\n content = json.dumps(content_dict)\n index = cf.store_object(self.container, filename, content) \n return True",
"def to_document_dict(self):\n urls = [u.url for u in self.urls]\n acl = [u.ace for u in self.acl]\n hashes = {h.hash_type: h.hash_value for h in self.hashes}\n metadata = {m.key: m.value for m in self.index_metadata}\n\n urls_metadata = {\n u.url: {m.key: m.value for m in u.url_metadata} for u in self.urls}\n created_date = self.created_date.isoformat()\n updated_date = self.updated_date.isoformat()\n\n return {\n 'did': self.did,\n 'baseid': self.baseid,\n 'rev': self.rev,\n 'size': self.size,\n 'file_name': self.file_name,\n 'version': self.version,\n 'uploader': self.uploader,\n 'urls': urls,\n 'urls_metadata': urls_metadata,\n 'acl': acl,\n 'hashes': hashes,\n 'metadata': metadata,\n 'form': self.form,\n 'created_date': created_date,\n \"updated_date\": updated_date,\n }",
"def save_import(self, out_dict):\n self.attributes('-topmost', 'false')\n options = self.create_options(saving=True)\n options['spreadsheet_path'] = self.spreadsheet_path\n self.wait_window(SavePage(self, options))\n self.attributes('-topmost', 'true')",
"def save(self, data, **kwargs):\r\n\r\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\r\n lg.setLevel(self.log_level)\r\n\r\n es = self.es\r\n es_index = get2(kwargs, \"es_index\", self.es_index)\r\n reset_index = kwargs.get(\"reset_index\",self.reset_index)\r\n doc_type = kwargs.get(\"doc_type\", self.doc_type)\r\n op_type = kwargs.get(\"op_type\", self.op_type)\r\n id_value = kwargs.get(\"id\")\r\n id_field = kwargs.get(\"id_field\")\r\n if id_field:\r\n id_value = data.get(id_field)\r\n if op_type == \"index\":\r\n result = es.index(index=es_index,\r\n id=id_value,\r\n doc_type=doc_type,\r\n body=data)\r\n elif op_type == \"create\":\r\n result = es.create(index=es_index,\r\n id=id_value,\r\n doc_type=doc_type,\r\n body=data)\r\n\r\n lg.debug(\"Result = \\n%s\",pp.pformat(result))\r\n return result",
"def save_index(self, fn):\n utils.save_obj(self.inverted_idx, fn)",
"def save(self):\n \n with open(os.path.join(self.output_dir, 'terms.dict'), 'wb') as f:\n pkl.dump(self.term_id_map, f)\n with open(os.path.join(self.output_dir, 'docs.dict'), 'wb') as f:\n pkl.dump(self.doc_id_map, f)",
"def save(self, *args, **kwargs):\n # create the index string\n message = str()\n data = kwargs.get(\"data\")\n ts = kwargs.get(\"ts\")\n # prepare the doc for indexing\n doc = dict()\n doc['_type'] = kwargs.get(\"type\")\n doc['info'] = data\n doc['@timestamp'] = datetime.datetime(*ts[:6])\n try:\n exists = self.es.indices.exists(kwargs.get(\"index\"))\n if not exists:\n map_st, map_msg = self.build_mapping(**kwargs)\n if not map_st:\n return map_st, map_msg\n res = self.es.index(\n index=kwargs.get(\"index\"),\n doc_type=doc['_type'],\n body=doc, # message\n timestamp=datetime.datetime.utcnow(), # set to current time\n consistency='one', # do not wait for quorum / all shards\n replication='async', # async\n ttl=ELASTICSEARCH_TTL) # as defined in settings\n return True, res\n except TransportError, e:\n # fail silently - just log and die ...\n message = 'Error in indexing, host: {}, unable to index'.format(\n ELASTICSEARCH_CONN)\n if ADD_LOG_FAILURES:\n LOGGER.exception(e)\n return False, message",
"def make_es_worker(search_conn, es_index, es_doc_type, class_name):\n new_esbase = copy.copy(search_conn)\n new_esbase.es_index = es_index\n new_esbase.doc_type = es_doc_type\n log.info(\"Indexing '%s' into ES index '%s' doctype '%s'\",\n class_name.pyuri,\n es_index,\n es_doc_type)\n return new_esbase",
"def save(self):\n a_copy = FileStorage.__objects\n obj_dict = {obj: a_copy[obj].to_dict() for obj in a_copy.keys()}\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(obj_dict, f)",
"def __index_data_body(index, doc_type, doc_id, source):\n\n index_data = {\n \"_index\": index,\n \"_type\": doc_type,\n \"_id\": doc_id,\n \"_source\": source\n }\n\n return index_data",
"def save_index(self, fn):\n utils.save_obj(self.tweetTerms, \"TweetTerm_%s\" % (self.counterOfTweetTermsFiles))\n self.computeTfIdf(self.counterOfTweets)\n self.deleteSingleEntities()\n inv_dict = {'inverted_idx': self.inverted_idx, 'posting': self.postingFiles}\n utils.save_obj(inv_dict, fn)",
"def serialize(self, sig_version=DEFAULT_SIGNATURE_VERSION):\n index_sig_dict = {\n 'fields': self.fields,\n }\n\n if self.name:\n index_sig_dict['name'] = self.name\n\n return index_sig_dict",
"def blend(self, index, id=None, **kwargs):\n url = f'{self.host}{index}/_doc/'\n method = 'post'\n if id is not None:\n method = 'put'\n url += str(id)\n resp = getattr(requests, method)(url, json=kwargs)\n self.flush(index)\n _id = resp.json()['_id']\n doc = self.get(index, _id)\n return doc",
"def persist(self):\n\n return {'index': self._index, 'bitfield': self._bitfield}",
"def test_read_index(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n TroveIndexBuilder(\"test/short.dat\", out=indexfile)\n\n index = TroveIndex()\n index.reload(indexfile)\n\n docs = sorted([doc for doc in index.documents])\n self.assertEquals(10, len(docs))\n\n self.assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], docs)\n\n doc = index.get_document(1)\n ref = {u\"id\":\"1\",u\"titleName\":u\"Hello\"}\n self.assertNotEquals(None, doc, \"Document not found for id 1\")\n self.assertDictEqual(ref, doc)\n\n doc = index.get_document(10)\n ref = {\"id\":\"10\",\"titleName\":\"Hello\"}\n self.assertNotEquals(None, doc)\n self.assertDictEqual(ref, doc)",
"def _save_elasticsearch(self, json_response, index, doc_type):\n try:\n _ = self._ensure_es_index(index)\n data = self.elasticsearch.index(index=index,\n doc_type=doc_type,\n body=json.dumps(json_response))\n self.elasticsearch.indices.refresh(index=index)\n except TransportError as error_msg:\n self.logger.error('%s triggered while trying to index type %s with body: %s',\n error_msg.error, doc_type, json.dumps(json_response))\n return False\n self.logger.debug(\"Document added to index '%s' with type '%s'. Document: %s which \" \\\n \"returned data: %s\", index, doc_type, json.dumps(json_response), data)\n return True",
"def index(self):\n return dict(data='index')"
]
| [
"0.5878769",
"0.57617337",
"0.5701214",
"0.56426847",
"0.56349516",
"0.5593593",
"0.5569869",
"0.5430634",
"0.5421856",
"0.5361963",
"0.5343214",
"0.5291585",
"0.5266545",
"0.52519333",
"0.5180529",
"0.51573664",
"0.5149495",
"0.50542545",
"0.5037411",
"0.502542",
"0.4937648",
"0.4929609",
"0.48751387",
"0.484337",
"0.48358554",
"0.48252112",
"0.48241287",
"0.48225662",
"0.482175",
"0.48024133"
]
| 0.79996437 | 0 |
Backup Dataset index "discover_dataset", including settings, mapping, aliases and all docs Save to outfile if provided, otherwise, return the backup data as a dictionary. | def backup_dataset(outfile=None):
return backup_es(Dataset, outfile=outfile) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backup_es(esdoc_class, outfile=None):\n data = esdoc_class._index.get()\n idx_name = list(data)[0]\n data[idx_name][\"docs\"] = list(\n dict(_id=hit.meta.id, **hit.to_dict()) for hit in esdoc_class.search().scan()\n )\n if outfile:\n with open(outfile, \"w\") as out_f:\n json.dump(data, out_f, indent=2, default=json_serial)\n return data",
"def export_dataset(self):\n raise NotImplementedError",
"def _backup(self, data, dsetname=\"data\"):\n with h5.File(self._backupfile, 'a') as hfile:\n grp = hfile.create_group(\n dsetname +\n \"{}\".format(\n self._current_backup_indx))\n for key, value in data.items():\n if value is None:\n continue\n if key == \"images\":\n for img_num, img in enumerate(value):\n if img is None:\n continue\n #img = img.T\n dset = grp.create_dataset(\n \"img_{}\".format(img_num), data=img)\n dset.attrs['CLASS'] = \"IMAGE\"\n dset.attrs['IMAGE_VERSION'] = '1.2'\n dset.attrs['IMAGE_SUBCLASS'] = 'IMAGE_INDEXED'\n dset.attrs['IMAGE_MINMAXRANGE'] = np.array(\n [0, 255], dtype=np.uint8)\n else:\n grp.create_dataset(key, data=value)\n self._current_backup_indx += 1",
"def export_to_dict(session,\n recursive,\n back_references,\n include_defaults):\n logging.info('Starting export')\n dbs = session.query(Database)\n databases = [database.export_to_dict(recursive=recursive,\n include_parent_ref=back_references,\n include_defaults=include_defaults) for database in dbs]\n logging.info('Exported %d %s', len(databases), DATABASES_KEY)\n cls = session.query(DruidCluster)\n clusters = [cluster.export_to_dict(recursive=recursive,\n include_parent_ref=back_references,\n include_defaults=include_defaults) for cluster in cls]\n logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY)\n data = dict()\n if databases:\n data[DATABASES_KEY] = databases\n if clusters:\n data[DRUID_CLUSTERS_KEY] = clusters\n return data",
"def indexed_dataset(self) -> Dict[int, List]:\n if self.__indexed_dataset is None:\n dataset = self.dataset()\n truncated_dataset = dataset[:1000]\n self.__indexed_dataset = {\n i: dataset[i] for i in range(len(dataset))\n }\n return self.__indexed_dataset",
"def _save_output(self, output: memh5.MemDiskGroup) -> Optional[str]:\n if output is None:\n return\n\n # Parse compression/chunks options\n def walk_dset_tree(grp, root=\"\"):\n # won't find forbidden datasets like index_map but we are not compressing those\n datasets = []\n for key in grp:\n if isinstance(grp[key], memh5.MemGroup):\n datasets += walk_dset_tree(grp[key], f\"{root}{key}/\")\n else:\n datasets.append(root + key)\n return datasets\n\n if isinstance(self.compression, dict):\n # We want to overwrite some compression settings\n datasets = walk_dset_tree(output)\n for ds in self.compression:\n if ds in datasets:\n for key, val in self.compression[ds].items():\n self.log.debug(\n f\"Overriding default compression setting on dataset {ds}: {key}={val}.\"\n )\n setattr(output._data._storage_root[ds], key, val)\n # shorthand for bitshuffle\n if output[ds].compression in (\n \"bitshuffle\",\n fileformats.H5FILTER,\n ):\n output[ds].compression = fileformats.H5FILTER\n if output[ds].compression_opts is None:\n output._data._storage_root[ds].compression_opts = (\n 0,\n fileformats.H5_COMPRESS_LZ4,\n )\n else:\n self.log.warning(\n f\"Ignoring config entry in `compression` for non-existing dataset `{ds}`\"\n )\n elif not self.compression:\n # Disable compression\n for ds in walk_dset_tree(output):\n output._data._storage_root[ds].chunks = None\n output._data._storage_root[ds].compression = None\n output._data._storage_root[ds].compression_opts = None\n\n # Routine to write output if needed.\n if self.save:\n # add metadata to output\n metadata = {\"versions\": self.versions, \"config\": self.pipeline_config}\n for key, value in metadata.items():\n output.add_history(key, value)\n\n # Construct the filename\n name_parts = self._interpolation_dict(output)\n if self.output_root != \"\":\n self.log.warn(\"Use of `output_root` is deprecated.\")\n name_parts[\"output_root\"] = self.output_root\n outfile = self.output_name.format(**name_parts)\n\n # Expand any variables in the path\n outfile = os.path.expanduser(outfile)\n outfile = os.path.expandvars(outfile)\n\n self.log.debug(\"Writing output %s to disk.\", outfile)\n self.write_output(\n outfile,\n output,\n file_format=self.output_format,\n )\n return outfile",
"def export_data(self):\n return self.export_all_data()",
"def save_dataset(self):\n if self.res_dataset is None:\n return\n if self.write_path is None:\n raise Exception(\"Error: Attempted to save result dataset without ever specifiying a path to write to\")\n\n if self.format == \"arrow\":\n self.res_dataset.save_to_disk(self.write_path)\n elif self.format == \"csv\":\n self.res_dataset.to_csv(self.write_path, index = False)",
"def download_dataset(self):\n raise NotImplementedError",
"def main_process_function(project_id, config_file, retention, backup_type, expiration):\n print(\"Running bigquery dataset export for project:{}\".format(project_id))\n # Reading backup-parameters from json config\n with open(config_file) as f:\n master_config = json.load(f)\n backup_config = master_config[\"backup\"]\n\n location = backup_config[\"location\"]\n schema_path = backup_config[\"schema_uri\"]\n table_path = backup_config[\"table_uri\"]\n project_backup_config = backup_config[\"projects_dict\"][project_id]\n mapped_list = []\n\n # Get timestamp\n timestamp = datetime.now().strftime(\"%Y-%m-%d\")\n\n # Creating Big Query Client\n client = bigquery.Client(project=project_id)\n\n # Getting mapped relation between datasets and their tables\n if backup_type == \"all\":\n # Get all datasets\n datasets = list_all_datasets(client=client)\n # Map dataset->[tables]\n dataset_tables_map = get_datasets_tables_dict(\n client=client, project_id=project_id, datasets=datasets\n )\n mapped_list.append(dataset_tables_map)\n elif backup_type == \"config\":\n # Extract the backup pattern from config\n backup_pattern = project_backup_config[\"backup_pattern\"]\n for key, value in backup_pattern.items():\n dataset_tables_map = {}\n if value == \"all\":\n # Map dataset->[tables]\n dataset_tables_map = get_datasets_tables_dict(\n client=client, project_id=project_id, datasets=[key]\n )\n mapped_list.append(dataset_tables_map)\n else:\n # Map dataset->[tables]\n dataset_tables_map[key] = value\n mapped_list.append(dataset_tables_map)\n else:\n print(\n \"Please provide a valid backup_type option. Choose from ['all', 'config']\"\n )\n return None\n\n # Performing dataset export to gcs (data, schema)\n if mapped_list:\n for datasets_tables_dict in mapped_list:\n for bq_dataset_name in datasets_tables_dict.keys():\n print(\"Backup Operation on dataset: {}\".format(bq_dataset_name))\n for bq_table_name in datasets_tables_dict[bq_dataset_name]:\n print(\"Backing up table: {}\".format(bq_table_name))\n try:\n # Getting dataset and table objects\n dataset_ref = bigquery.DatasetReference(\n project_id, bq_dataset_name\n )\n table_ref = dataset_ref.table(bq_table_name)\n table_obj = client.get_table(table_ref)\n\n # Specifying extract-job parameters\n gcs_table_path = table_path.format(\n bucket_name=project_backup_config[\"bucket_name\"],\n retention=retention,\n dataset_name=bq_dataset_name,\n timestamp=timestamp,\n table_file_name=bq_table_name + \"-*.json\",\n )\n job_config = bigquery.ExtractJobConfig()\n job_config.compression = bigquery.Compression.GZIP\n job_config.destination_format = (\n bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON\n )\n\n # Exporting table-data to gcs\n extract_job = client.extract_table(\n table_ref,\n gcs_table_path,\n job_config=job_config,\n location=location,\n )\n extract_job.result()\n\n # Extracting table-schema\n table_schema = table_obj.schema\n table_schema = [\n {\n \"name\": item.name,\n \"mode\": item.mode,\n \"type\": item.field_type,\n }\n for item in table_schema\n ]\n json_schema = json.dumps(table_schema)\n\n # Defining schema-path\n gcs_schema_path = schema_path.format(\n bucket_name=project_backup_config[\"bucket_name\"],\n retention=retention,\n dataset_name=bq_dataset_name,\n timestamp=timestamp,\n schema_file_name=bq_table_name + \"-schema.json\",\n )\n\n # Writing table-schema to gcs\n sa_credentials = os.getenv(\"GOOGLE_APPLICATION_CREDENTIALS\")\n fs = gcsfs.GCSFileSystem(\n project=project_id, token=sa_credentials\n )\n with fs.open(\n gcs_schema_path,\n \"w\",\n metadata={\"Content-Type\": \"application/json\"},\n ) as f:\n f.write(json_schema)\n except Exception as error:\n print(\n \"Exception occurred for project {} at function {} inside export-loop: {}\".format(\n project_id, \"main_process_function\", error\n )\n )\n # Deleting backup data based on the backup_data_policy\n backup_data_policy = {\n \"daily\": 1,\n \"weekly\": 7,\n \"monthly\": 30,\n \"yearly\": 365,\n }\n if str(expiration).title() == \"True\":\n try:\n bucket_name = project_backup_config[\"bucket_name\"]\n storage_client = storage.Client(project_id)\n client_bucket = storage_client.get_bucket(bucket_name)\n delete_date = (\n datetime.now()\n - timedelta(days=backup_data_policy[retention])\n ).strftime(\"%Y-%m-%d\")\n delete_path = \"{retention}/{dataset_name}/{timestamp}\".format(\n retention=retention,\n dataset_name=bq_dataset_name,\n timestamp=delete_date,\n )\n for file in client_bucket.list_blobs(prefix=delete_path):\n file.delete()\n print(\"Deleted '{}'\".format(file.name))\n except Exception as error:\n print(\n \"Exception occurred at function {} inside expiration-loop: {}\".format(\n \"main_process_function\", error\n )\n )\n else:\n pass\n return None\n else:\n print(\"The mapping between datasets and their tables is empty.\")\n return None",
"def Get_datasets(**kwargs):\n from .utils import option_printer, get_conn, get_param_dict, get_logger_instance\n from .cohort_tables import make_target_comp_tables\n from .table2rawseq import table_to_rawseq\n from .rawseq2multihot import rawseq_to_multihot\n from .multihot2datasets import multihot_to_datasets\n import os, logging\n from importlib import reload\n \n ## get params\n param_dict = get_param_dict(kwargs['DS_PARAMS_FILE_NAME'], kwargs['CONFIG_FOLDER_PATH'])\n param_dict.update(kwargs)\n if not os.path.exists(param_dict['DATA_FOLDER_PATH']): os.makedirs(param_dict['DATA_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['CDM_DB']\n \n param_dict['DUMPING_PATH'] = os.path.join(param_dict['RESULT_FOLDER_PATH'], \n param_dict['PROJECT_NAME'], \n param_dict['CDM_DB_NAME'])\n if not os.path.exists(param_dict['DUMPING_PATH']): \n os.makedirs(param_dict['DUMPING_PATH'])\n \n if param_dict['PIPELINE_START_LEVEL']<3:\n param_dict['DB_CONN'], CDM_DB_NAME, RESULT_DB_NAME = get_conn(param_dict['DB_CONN_FILENAME'], \n param_dict['CONFIG_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = CDM_DB_NAME\n param_dict['RESULT_DB_NAME'] = RESULT_DB_NAME\n else:\n param_dict['RESULT_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['RESULT_DB']\n \n ## logger\n logging.shutdown()\n reload(logging)\n main_logger = get_logger_instance(logger_name='ds_pipeline', \n DUMPING_PATH=param_dict['DUMPING_PATH'], \n parent_name=False,\n stream=True)\n \n ## print params\n main_logger.info(\"\\n (params) \\n\")\n try: option_printer(main_logger, param_dict['DB_CONN'], **param_dict)\n except: pass\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [1] Make_target_comp_tables\n if param_dict['PIPELINE_START_LEVEL']<=1:\n main_logger.info(\"\\n[Level 1] Make_TARGET_COMP_tables\\n\")\n make_target_comp_tables(**param_dict)\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [2] Table to rawSeq\n if param_dict['PIPELINE_START_LEVEL']<=2:\n main_logger.info(\"\\n[Level 2] Table to rawSeq\\n\")\n table_to_rawseq(param_dict['DUMPING_PATH'], \n param_dict['DB_CONN'], param_dict['CDM_DB_NAME'], \n param_dict['DATA_FOLDER_PATH'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [3] rawSeq to multihot\n if param_dict['PIPELINE_START_LEVEL']<=3:\n main_logger.info(\"\\n[Level 3] Convert to multihot\\n\")\n rawseq_to_multihot(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['MAX_TIME_STEP'], \n param_dict['DX_ONLY'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [4] Multihot to Dataset\n if param_dict['PIPELINE_START_LEVEL']<=4:\n main_logger.info(\"\\n[Level 4] Multihot to Dataset\\n\")\n datasets = multihot_to_datasets(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['TR_RATIO'])\n \n #add info\n if param_dict['PIPELINE_START_LEVEL']<3: \n datasets.info['DB_CONN'] = param_dict['DB_CONN']\n datasets.info['CONFIG_FOLDER_PATH'] = param_dict['CONFIG_FOLDER_PATH']\n datasets.info['DATA_FOLDER_PATH'] = param_dict['DATA_FOLDER_PATH']\n datasets.info['RESULT_FOLDER_PATH'] = param_dict['RESULT_FOLDER_PATH']\n datasets.info['DB_CONN_FILENAME'] = param_dict['DB_CONN_FILENAME']\n datasets.info['DS_PARAMS_FILE_NAME'] = param_dict['DS_PARAMS_FILE_NAME']\n datasets.info['CDM_DB_NAME'] = param_dict['CDM_DB_NAME']\n datasets.info['RESULT_DB_NAME'] = param_dict['RESULT_DB_NAME']\n \n main_logger.info(\"\\n[Datasets Info.]\\n\")\n main_logger.info(\"{0:>26} {1:}\".format('[OPTION]', '[VALUE]'))\n for k in sorted(datasets.info.keys()):\n main_logger.info(\" {0:>23}: {1:}\".format(k, datasets.info[k]))\n \n #print(\"\\nALL DONE!!\")\n main_logger.info(\"\\n[ALL DONE!!]\\n\\n\")\n for h in list(main_logger.handlers):\n main_logger.removeHandler(h)\n h.flush()\n h.close()\n return datasets",
"def download_dataset(dataset, subset='train', format='bow', root='./data', verbose=False):\n dataset_meta = _get_data_meta(dataset, subset=subset, format=format)\n dataset_dest = path.join(root, dataset.lower() + '_' + format + \".zip\")\n data_dir = path.join(root, dataset_meta['dir'])\n file_path = dataset_meta[subset]\n\n if isinstance(file_path, str):\n file_path = [file_path]\n elif isinstance(file_path, dict):\n file_path = file_path.values()\n if not all(path.exists(path.join(data_dir, f)) for f in file_path):\n if 'drive.google.com' in dataset_meta['url']:\n _download_file_from_google_drive(dataset_meta['url'], dataset_dest, unzip=True, overwrite=True, delete_zip=True, verbose=verbose)",
"def get_dataset_meta(self, output_name, dataset_id):\n return {}",
"def get_new_dataset_meta_by_basename(self, output_name, basename):\n return {}",
"def list_datasets():\n datasets = {}\n for datafile in HERE.glob(\"*.csv.gz\"):\n index = False\n name = datafile.name[:-7]\n if \".indexed\" in name:\n name = name.replace(\".indexed\", \"\")\n index = True\n datasets[name] = {\"index\": index, \"file\": datafile}\n return datasets",
"def backup_from_file(api):\n logger = logging.getLogger(\"backup_from_file\")\n if not api:\n logger.error(\"failure to restore from file, no json object passed.\")\n else:\n indices.reset()\n api_schema = api[\"discover_schema\"]\n api_schema_class = api[\"discover_schema_class\"]\n api_dataset = api[\"discover_dataset\"]\n\n for doc in api_schema[\"docs\"]:\n file = Schema(**doc)\n file.meta.id = doc[\"_id\"]\n file.save()\n\n for doc in api_schema_class[\"docs\"]:\n file = SchemaClass(**doc)\n file.save()\n\n for doc in api_dataset[\"docs\"]:\n file = Dataset(**doc)\n file.save()",
"def fetch(index, outfile):\n populate_index(index, outfile=outfile)",
"def discovery_documents(filepath, preferred=False, skip=None):\n repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name()))\n filenames = glob.glob(join(repo.filepath, 'discoveries/*.json'))\n # Skip index.json.\n filenames = [x for x in filenames if os.path.basename(x) != 'index.json']\n ddocs = {}\n for filename in filenames:\n id_ = None\n with open(filename) as file_:\n id_ = json.load(file_)['id']\n # If an ID has already been visited, skip it.\n if id_ in ddocs:\n continue\n ddocs[id_] = filename\n if skip:\n _ = [ddocs.pop(id_, None) for id_ in skip]\n if not preferred:\n return ddocs\n index = {}\n with open(join(repo.filepath, 'discoveries/index.json')) as file_:\n index = json.load(file_)\n for api in index['items']:\n id_ = api['id']\n if id_ in _ACTUALLY_PREFERRED:\n continue\n if api['preferred']:\n continue\n ddocs.pop(id_, None)\n return ddocs",
"def to_dask(self):\n self._load_metadata()\n return self._ds",
"def to_dask(self):\n self._load_metadata()\n return self._ds",
"def save(self):\n if self._data is None and self._meta is None:\n w = \"No data/meta components found in the DataSet.\"\n warnings.warn(w)\n return None\n ds_clone = self.clone()\n self._cache['savepoint'] = ds_clone.split()\n return None",
"def save_data(self):\n # Command to get the download data\n pass",
"def save_data(self, data_dir, rank=0, save_dataset=False, skip_keys=None):\n if rank != 0:\n return\n\n io.check_else_make_dir(data_dir)\n for key, val in self.data.items():\n if skip_keys is not None:\n if key in skip_keys:\n continue\n\n out_file = os.path.join(data_dir, f'{key}.z')\n head, tail = os.path.split(out_file)\n io.check_else_make_dir(head)\n io.savez(np.array(val), out_file)\n\n if save_dataset:\n try:\n self.save_dataset(data_dir)\n except ValueError:\n io.console.log('Unable to save `xarray.Dataset`, continuing')",
"def export_documents(self, index, filename, **kwargs):\n documentsGenerator = self.get_documents(index, **kwargs)\n documents = []\n format=kwargs.get('format','json')\n for doc in documentsGenerator:\n doc_with_id={**doc.to_dict(),'_id':doc.meta.id}\n documents.append(doc_with_id)\n self.__export_documents(documents,filename,exportformat=format)",
"def get_data_info(self, idx: int) -> dict:\n if self.serialize_data:\n start_addr = 0 if idx == 0 else self.data_address[idx - 1].item()\n end_addr = self.data_address[idx].item()\n bytes = memoryview(self.data_infos_bytes[start_addr:end_addr])\n data_info = pickle.loads(bytes)\n else:\n data_info = self.data_infos[idx]\n\n return data_info",
"def retrieveData(processedDataStoreDir, data_to_select, var_from_database_to_select,\n data_ext ='*.pickle'):\n \n dataPathString = os.path.join(processedDataStoreDir, data_ext)\n all_data_files = glob.glob(dataPathString)\n \n imageIDs_to_analyze = data_to_select['imageID'].values.tolist()\n \n \n all_data = {}\n for iData, data_file in enumerate(all_data_files):\n file_name = os.path.basename(data_file).split('.')[0]\n if any(file_name == imageID for imageID in imageIDs_to_analyze):\n imageIdx = imageIDs_to_analyze.index(file_name)\n data_file = open(data_file, 'rb')\n if data_ext == '*.pickle':\n data = cPickle.load(data_file)\n else:\n print('The extension type is not compatible with current reading \\\n procedure')\n break\n # Keeping desired variables from the meta data\n for variable in var_from_database_to_select:\n if 'imageID' in variable:\n pass\n else:\n data[variable] = data_to_select[variable].values.tolist()[imageIdx]\n \n \n \n \n \n data_file.close()\n all_data[iData] = data\n \n \n else:\n pass\n return all_data",
"def get_dataset():\n\n return db.store.all()",
"def get_dataset(request):\n from seed.models import obj_to_dict\n dataset_id = request.GET.get('dataset_id', '')\n orgs = request.user.orgs.all()\n # check if user has access to the dataset\n d = ImportRecord.objects.filter(\n super_organization__in=orgs, pk=dataset_id\n )\n if d.exists():\n d = d[0]\n else:\n return {\n 'status': 'success',\n 'dataset': {},\n }\n\n dataset = obj_to_dict(d)\n importfiles = []\n for f in d.files:\n importfile = obj_to_dict(f)\n importfile['name'] = f.filename_only\n importfiles.append(importfile)\n\n dataset['importfiles'] = importfiles\n if d.last_modified_by:\n dataset['last_modified_by'] = d.last_modified_by.email\n dataset['number_of_buildings'] = BuildingSnapshot.objects.filter(\n import_file__in=d.files\n ).count()\n dataset['updated_at'] = convert_to_js_timestamp(d.updated_at)\n\n return {\n 'status': 'success',\n 'dataset': dataset,\n }",
"def getPreviewDataSet(self, date: str = None, output: str = \"raw\") -> dict:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getPreviewDataSet\")\n path = \"/previewsamplestatus/report/dataset\"\n params = {}\n if date is not None:\n params[\"date\"] = date\n privateHeader = deepcopy(self.header)\n privateHeader[\"Accept\"] = \"application/json\"\n res = self.connector.getData(self.endpoint + path, headers=privateHeader)\n if output == \"df\":\n df = pd.DataFrame(res[\"data\"])\n return df\n return res",
"def download_entire_dataset(dataset_name, num_data, labels, method, cache_dir):\n\n print('Downloading {}...'.format(dataset_name))\n preprocessor = preprocess_method_dict[method]()\n\n # Select the first `num_data` samples from the dataset.\n target_index = numpy.arange(num_data) if num_data >= 0 else None\n dataset_parts = D.molnet.get_molnet_dataset(dataset_name, preprocessor,\n labels=labels,\n target_index=target_index)\n dataset_parts = dataset_parts['dataset']\n\n # Cache the downloaded dataset.\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n for i, part in enumerate(['train', 'valid', 'test']):\n filename = dataset_part_filename(part, num_data)\n path = os.path.join(cache_dir, filename)\n NumpyTupleDataset.save(path, dataset_parts[i])\n return dataset_parts"
]
| [
"0.5655255",
"0.5525369",
"0.5212151",
"0.5152905",
"0.51345235",
"0.5077431",
"0.5040934",
"0.50195885",
"0.49345028",
"0.49191532",
"0.48888722",
"0.48806277",
"0.48804173",
"0.48743966",
"0.48640543",
"0.48606038",
"0.4850102",
"0.47867522",
"0.47753274",
"0.47753274",
"0.47696662",
"0.4764247",
"0.47555462",
"0.47454673",
"0.47404614",
"0.47061157",
"0.46984777",
"0.46905735",
"0.4685199",
"0.46836808"
]
| 0.686908 | 0 |
Backup Schema index "discover_schema", including settings, mapping, aliases and all docs Save to outfile if provided, otherwise, return the backup data as a dictionary. | def backup_schema(outfile=None):
return backup_es(Schema, outfile=outfile) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backup_es(esdoc_class, outfile=None):\n data = esdoc_class._index.get()\n idx_name = list(data)[0]\n data[idx_name][\"docs\"] = list(\n dict(_id=hit.meta.id, **hit.to_dict()) for hit in esdoc_class.search().scan()\n )\n if outfile:\n with open(outfile, \"w\") as out_f:\n json.dump(data, out_f, indent=2, default=json_serial)\n return data",
"def do_export_schema(self):\n export_schema = self.get_arg_value(\"export_schema\")\n\n if export_schema:\n row = {\"schemas\": self.final_schemas}\n self.write_rows(rows=row)\n del row",
"def backup_schema_class(outfile=None):\n return backup_es(SchemaClass, outfile=outfile)",
"def export_schema_to_dict(back_references):\n databases = [Database.export_schema(recursive=True,\n include_parent_ref=back_references)]\n clusters = [DruidCluster.export_schema(recursive=True,\n include_parent_ref=back_references)]\n data = dict()\n if databases:\n data[DATABASES_KEY] = databases\n if clusters:\n data[DRUID_CLUSTERS_KEY] = clusters\n return data",
"def backup_from_file(api):\n logger = logging.getLogger(\"backup_from_file\")\n if not api:\n logger.error(\"failure to restore from file, no json object passed.\")\n else:\n indices.reset()\n api_schema = api[\"discover_schema\"]\n api_schema_class = api[\"discover_schema_class\"]\n api_dataset = api[\"discover_dataset\"]\n\n for doc in api_schema[\"docs\"]:\n file = Schema(**doc)\n file.meta.id = doc[\"_id\"]\n file.save()\n\n for doc in api_schema_class[\"docs\"]:\n file = SchemaClass(**doc)\n file.save()\n\n for doc in api_dataset[\"docs\"]:\n file = Dataset(**doc)\n file.save()",
"def _DumpSchema(root, out, path):\n if isinstance(root, type):\n root = root()\n elif hasattr(root, '__call__') and not hasattr(root, 'export_params'):\n root = root()\n for i in root.export_params:\n name = i.replace('-', '_')\n out.append('.'.join(path + [name]))\n for i in root.export_objects:\n name = i.replace('-', '_')\n out.append('.'.join(path + [name, '']))\n _DumpSchema(getattr(root, name), out, path + [name])\n for i in root.export_object_lists:\n name = i.replace('-', '_')\n out.append('.'.join(path + [name, '']))\n out.append('.'.join(path + [name, '{i}']))\n _DumpSchema(getattr(root, name), out, path + [name, '{i}'])",
"def get_schema(self) -> dict:",
"def get_schema_store(validate_schema: bool = False, schema_search_path: str = None) -> dict:\n try:\n if not schema_search_path:\n schema_search_path = path.join(path.dirname(__file__), 'schemas')\n schemastore = {}\n fnames = listdir(schema_search_path)\n for fname in fnames:\n fpath = path.join(schema_search_path, fname)\n if fpath[-5:] == '.json':\n with open(fpath, 'r', encoding='utf-8') as schema_fd:\n schema = json.load(schema_fd)\n if '$id' in schema:\n schemastore[schema['$id']] = schema\n\n if validate_schema:\n for _, schema in schemastore.items():\n Draft7Validator.check_schema(schema)\n\n return schemastore\n except (SchemaError, json.JSONDecodeError) as error:\n # handle schema error\n raise error",
"def save(self):\n logging.debug(\"environment save entered\")\n filename = \"index.json\"\n content_dict = {}\n for fpname in self.footprints:\n # for now, just using the patteern ${footprint_name}-metadata for the name \n content_dict[fpname] = fpname\n content = json.dumps(content_dict)\n index = cf.store_object(self.container, filename, content) \n return True",
"def dump_designs(self):\n param_dict = {'startkey': '\"_design/\"',\n 'endkey': '\"_design0\"',\n 'include_docs': 'true'} \n r = requests.get('/'.join([self.url, self.db, \"_all_docs\"]), params=param_dict)\n # Write out the JSON document itself\n for row in r.json()['rows']:\n design_str = row['id'].replace('/', '%2F')\n json.dump(row, open(design_str + '.json', 'w'), indent=4)\n # Also create separate files for the map/reduce javascript\n for view in row[\"doc\"][\"views\"]:\n view_str = '%2F'.join(['_view', view])\n f = open('%2F'.join([design_str, view_str + \".js\"]), 'w')\n for func in row[\"doc\"][\"views\"][view]:\n f.write(func + \" = \" + row[\"doc\"][\"views\"][view][func] + \"\\n\")\n f.close()",
"def get_schema(self) -> ArchiveSchema:\n return self.schema",
"def get_metadata_schema(self) -> dict:\n metadata_schema = load_dict_from_file(Path(__file__).parent / \"schemas\" / \"base_metadata_schema.json\")\n for data_interface in self.data_interface_objects.values():\n interface_schema = unroot_schema(data_interface.get_metadata_schema())\n metadata_schema = dict_deep_update(metadata_schema, interface_schema)\n\n default_values = self.get_metadata()\n fill_defaults(metadata_schema, default_values)\n return metadata_schema",
"def database_dump(self):\r\n print('=====Dumping database=====')\r\n self.database_table_dump(query.TABLE_STATS)\r\n print()\r\n self.database_table_dump(query.TABLE_TWEETS)\r\n print()\r\n self.database_table_dump(query.TABLE_POSTS)\r\n print()\r\n self.database_table_dump(query.TABLE_FOLLOWS)",
"def dump(self, file_name=None, sort=None):\n if file_name:\n dump_file = open(file_name, 'w')\n results = [\"Schema: %s\\n\" % self.name]\n objects = list(self.schema.keys())\n if sort:\n objects.sort()\n for schema_object in objects:\n results.append(self.schema[schema_object].dump())\n if file_name:\n dump_file.write('\\n'.join(results))\n dump_file.close()\n else:\n return '\\n'.join(results)",
"def _write(self, schema, writer, snapshots):\n encoder = self.serializer\n doc = {\n META_SCHEMA: [encoder.serialize_column(c) for c in schema],\n META_SNAPSHOTS: [encoder.serialize_snapshot(s) for s in snapshots],\n META_ROWCOUNT: writer.row_counter,\n META_PRIMARYKEY: self._primary_key\n }\n with open(self.tmpmetafile, 'w') as f:\n json.dump(doc, f, cls=self.encoder)\n # Close the archive writer.\n writer.close()\n # Replace existing files with temporary files for new archive version.\n # This is the point of no return.\n # TODO: Instead of moving files we could delete (or keep) previous\n # files as backup.\n shutil.move(src=self.tmpmetafile, dst=self.metafile)\n shutil.move(src=self.tmpdatafile, dst=self.datafile)",
"def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )",
"def save_index_config(index_dir, data):\n\n with open(os.path.join(index_dir, 'index.json'), \"w+\") as data_file:\n json.dump(data, data_file, indent=4)",
"def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)",
"def export_to_dict(session,\n recursive,\n back_references,\n include_defaults):\n logging.info('Starting export')\n dbs = session.query(Database)\n databases = [database.export_to_dict(recursive=recursive,\n include_parent_ref=back_references,\n include_defaults=include_defaults) for database in dbs]\n logging.info('Exported %d %s', len(databases), DATABASES_KEY)\n cls = session.query(DruidCluster)\n clusters = [cluster.export_to_dict(recursive=recursive,\n include_parent_ref=back_references,\n include_defaults=include_defaults) for cluster in cls]\n logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY)\n data = dict()\n if databases:\n data[DATABASES_KEY] = databases\n if clusters:\n data[DRUID_CLUSTERS_KEY] = clusters\n return data",
"def get_schema() -> dict:\n raise NotImplementedError()",
"def fetch(index, outfile):\n populate_index(index, outfile=outfile)",
"def export_documents(self, index, filename, **kwargs):\n documentsGenerator = self.get_documents(index, **kwargs)\n documents = []\n format=kwargs.get('format','json')\n for doc in documentsGenerator:\n doc_with_id={**doc.to_dict(),'_id':doc.meta.id}\n documents.append(doc_with_id)\n self.__export_documents(documents,filename,exportformat=format)",
"def dump_footprint(self, fpname):\n container_name = \"%s-metadata\" % fpname\n container = cf.get_container(container_name)\n index = container.get_object(\"index.json\")\n config = json.loads(index.fetch())\n pprint.pprint(config)",
"def backup_dataset(outfile=None):\n return backup_es(Dataset, outfile=outfile)",
"def dump(self, obj, context=None):\n return self.schema_class(context=context).dump(obj).data",
"def export(self):\n f = open(self.database, 'w')\n for line in self.conn.iterdump():\n f.write(line)\n self.c.close()",
"def save(self) -> None:\n try:\n js = json.loads(\n self.reset_index().to_json(orient=\"records\", date_format=\"iso\")\n )\n\n with open(self._fp, \"w\") as f:\n f.writelines(json.dumps(js, indent=4))\n logger.debug(f\"Saved index to {self._fp}\")\n except Exception as e:\n logger.error(f\"Could not update database -- {e}\")",
"def index_schema(schema, path):\n for section in path.split(\"/\"):\n if schema[\"type\"] != \"object\":\n raise ValueError(\n \"Only object types are supported in the schema structure, \"\n \"but saw type %s\" % schema[\"type\"]\n )\n properties = schema[\"properties\"]\n if section not in properties:\n raise ValueError(\"Invalid path %s in user options\" % path)\n schema = properties[section]\n return schema",
"def save_mappings(self, folder: str, filename: str = None, vocabulary_uri: str = None) -> None:\n\n concordance = self._get_concordance(vocabulary_uri)\n\n if filename is None:\n filename = str(datetime.now()).split(\".\")[0].replace(\":\", \"-\")\n\n full_filename = folder + f\"{filename}.ndjson\"\n with open(full_filename, \"w\") as file:\n for mapping in concordance.mappings:\n print(f\"{mapping.get_dict()}\".replace(\"'\", '\"').replace(\" \", \"\"), file=file)",
"def export_db(self, export_location: Path) -> None:\n raise NotImplementedError"
]
| [
"0.57294905",
"0.5624704",
"0.5621064",
"0.5573264",
"0.54107946",
"0.50398797",
"0.49729794",
"0.49718326",
"0.49672258",
"0.49517736",
"0.48920152",
"0.48407328",
"0.48218256",
"0.48188037",
"0.48028147",
"0.47949687",
"0.47470766",
"0.4696858",
"0.46950325",
"0.467566",
"0.4674002",
"0.4661355",
"0.46492082",
"0.4643623",
"0.46343285",
"0.45984742",
"0.4578856",
"0.4560891",
"0.45426202",
"0.45410195"
]
| 0.6826845 | 0 |
Backup SchemaClass index "discover_schema_class", including settings, mapping, aliases and all docs Save to outfile if provided, otherwise, return the backup data as a dictionary. | def backup_schema_class(outfile=None):
return backup_es(SchemaClass, outfile=outfile) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backup_es(esdoc_class, outfile=None):\n data = esdoc_class._index.get()\n idx_name = list(data)[0]\n data[idx_name][\"docs\"] = list(\n dict(_id=hit.meta.id, **hit.to_dict()) for hit in esdoc_class.search().scan()\n )\n if outfile:\n with open(outfile, \"w\") as out_f:\n json.dump(data, out_f, indent=2, default=json_serial)\n return data",
"def backup_schema(outfile=None):\n return backup_es(Schema, outfile=outfile)",
"def backup_from_file(api):\n logger = logging.getLogger(\"backup_from_file\")\n if not api:\n logger.error(\"failure to restore from file, no json object passed.\")\n else:\n indices.reset()\n api_schema = api[\"discover_schema\"]\n api_schema_class = api[\"discover_schema_class\"]\n api_dataset = api[\"discover_dataset\"]\n\n for doc in api_schema[\"docs\"]:\n file = Schema(**doc)\n file.meta.id = doc[\"_id\"]\n file.save()\n\n for doc in api_schema_class[\"docs\"]:\n file = SchemaClass(**doc)\n file.save()\n\n for doc in api_dataset[\"docs\"]:\n file = Dataset(**doc)\n file.save()",
"def export_schema_to_dict(back_references):\n databases = [Database.export_schema(recursive=True,\n include_parent_ref=back_references)]\n clusters = [DruidCluster.export_schema(recursive=True,\n include_parent_ref=back_references)]\n data = dict()\n if databases:\n data[DATABASES_KEY] = databases\n if clusters:\n data[DRUID_CLUSTERS_KEY] = clusters\n return data",
"def _deco_class_settings_dict(self) -> OrderedDict:\n return self._classname2SettingsData_dict[self.deco_class.__name__]",
"def find_dump(cls):\n cls_attrs = dir(cls)\n if \"to_json\" in cls_attrs:\n return cls.to_json\n if \"json\" in cls_attrs:\n return lambda o: o.json\n if is_dataclass(cls):\n return asdict\n raise ValueError(f\"Cannot find a dumper method for {cls}\")",
"def save(self):\n logging.debug(\"environment save entered\")\n filename = \"index.json\"\n content_dict = {}\n for fpname in self.footprints:\n # for now, just using the patteern ${footprint_name}-metadata for the name \n content_dict[fpname] = fpname\n content = json.dumps(content_dict)\n index = cf.store_object(self.container, filename, content) \n return True",
"def to_dict(xml_document, schema=None, cls=None, path=None, process_namespaces=True,\n locations=None, base_url=None, defuse='remote', timeout=300, lazy=False, **kwargs):\n source, schema = get_context(\n xml_document, schema, cls, locations, base_url, defuse, timeout, lazy\n )\n return schema.decode(source, path=path, process_namespaces=process_namespaces, **kwargs)",
"def get_schema_store(validate_schema: bool = False, schema_search_path: str = None) -> dict:\n try:\n if not schema_search_path:\n schema_search_path = path.join(path.dirname(__file__), 'schemas')\n schemastore = {}\n fnames = listdir(schema_search_path)\n for fname in fnames:\n fpath = path.join(schema_search_path, fname)\n if fpath[-5:] == '.json':\n with open(fpath, 'r', encoding='utf-8') as schema_fd:\n schema = json.load(schema_fd)\n if '$id' in schema:\n schemastore[schema['$id']] = schema\n\n if validate_schema:\n for _, schema in schemastore.items():\n Draft7Validator.check_schema(schema)\n\n return schemastore\n except (SchemaError, json.JSONDecodeError) as error:\n # handle schema error\n raise error",
"def do_export_schema(self):\n export_schema = self.get_arg_value(\"export_schema\")\n\n if export_schema:\n row = {\"schemas\": self.final_schemas}\n self.write_rows(rows=row)\n del row",
"def getSchema(cls):\n pass",
"def get_schema(self) -> dict:",
"def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )",
"def export_to_dict(session,\n recursive,\n back_references,\n include_defaults):\n logging.info('Starting export')\n dbs = session.query(Database)\n databases = [database.export_to_dict(recursive=recursive,\n include_parent_ref=back_references,\n include_defaults=include_defaults) for database in dbs]\n logging.info('Exported %d %s', len(databases), DATABASES_KEY)\n cls = session.query(DruidCluster)\n clusters = [cluster.export_to_dict(recursive=recursive,\n include_parent_ref=back_references,\n include_defaults=include_defaults) for cluster in cls]\n logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY)\n data = dict()\n if databases:\n data[DATABASES_KEY] = databases\n if clusters:\n data[DRUID_CLUSTERS_KEY] = clusters\n return data",
"def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)",
"def class_to_db(self):",
"def get_schema(self) -> ArchiveSchema:\n return self.schema",
"def dump(self, obj, context=None):\n return self.schema_class(context=context).dump(obj).data",
"def _DumpSchema(root, out, path):\n if isinstance(root, type):\n root = root()\n elif hasattr(root, '__call__') and not hasattr(root, 'export_params'):\n root = root()\n for i in root.export_params:\n name = i.replace('-', '_')\n out.append('.'.join(path + [name]))\n for i in root.export_objects:\n name = i.replace('-', '_')\n out.append('.'.join(path + [name, '']))\n _DumpSchema(getattr(root, name), out, path + [name])\n for i in root.export_object_lists:\n name = i.replace('-', '_')\n out.append('.'.join(path + [name, '']))\n out.append('.'.join(path + [name, '{i}']))\n _DumpSchema(getattr(root, name), out, path + [name, '{i}'])",
"def to_dict(self, exclusions=[]) -> dict:\n dump = {}\n for (name, value) in self.__dict__.items():\n if name == \"data\" and isinstance(value, pd.DataFrame):\n df_dict = value.to_dict(orient=\"split\")\n dump[encoder(name)] = encoder(df_dict)\n else:\n dump[encoder(name)] = encoder(value)\n\n if \"class_str\" not in dump or dump[\"class_str\"] is None:\n dump[\"class_str\"] = [f\"{self.__module__}.{self.__class__.__name__}\"]\n return dump",
"def dump(self, file_name=None, sort=None):\n if file_name:\n dump_file = open(file_name, 'w')\n results = [\"Schema: %s\\n\" % self.name]\n objects = list(self.schema.keys())\n if sort:\n objects.sort()\n for schema_object in objects:\n results.append(self.schema[schema_object].dump())\n if file_name:\n dump_file.write('\\n'.join(results))\n dump_file.close()\n else:\n return '\\n'.join(results)",
"def _write(self, schema, writer, snapshots):\n encoder = self.serializer\n doc = {\n META_SCHEMA: [encoder.serialize_column(c) for c in schema],\n META_SNAPSHOTS: [encoder.serialize_snapshot(s) for s in snapshots],\n META_ROWCOUNT: writer.row_counter,\n META_PRIMARYKEY: self._primary_key\n }\n with open(self.tmpmetafile, 'w') as f:\n json.dump(doc, f, cls=self.encoder)\n # Close the archive writer.\n writer.close()\n # Replace existing files with temporary files for new archive version.\n # This is the point of no return.\n # TODO: Instead of moving files we could delete (or keep) previous\n # files as backup.\n shutil.move(src=self.tmpmetafile, dst=self.metafile)\n shutil.move(src=self.tmpdatafile, dst=self.datafile)",
"def save_class_representation(self):\n class_dict = {}\n for key, value in self.class_dict.items():\n class_dict['-'.join(key)] = list(value)\n with open('data/class_vectors.txt', 'w') as file:\n json.dump(class_dict, file)\n return class_dict",
"def serialize(self) -> Dict:\n return dict(\n author=self.doc.author if self.doc else None,\n name=self.name,\n functions=[x.serialize() for x in self.functions],\n classes=[x.serialize() for x in self.classes],\n doc=self.doc.serialize() if self.doc else None,\n )",
"def save_class(self):\n with open(self.savefile, \"w\") as f:\n data = {\"name\": self.name, \"host\": self.host, \"port\": self.port}\n json.dump(data, f)",
"def get_schema() -> dict:\n raise NotImplementedError()",
"def _transform_search_database(self):\n # TODO: Create this and link with protein object when fasta file is provided\n return {\n \"file_format\": \"fasta format\",\n \"name\": \"\",\n \"id\": 1,\n \"location\": \"\",\n \"params\": [],\n }",
"def get_deco_class_settings_dict(cls, clsname) -> OrderedDict:\n return cls._classname2SettingsData_dict[clsname]",
"def load_classes(self):\n super().load_classes()\n from scipy.io import loadmat\n mat_path = os.path.join(self.input_data_path, \"index_ade20k.mat\")\n object_names = loadmat(mat_path)['index']['objectnames'][0][0][0]\n self.ADE20K_CLASSES = {}\n self.ADE20K_CLASSES_reverse = {}\n for i in range(len(object_names)):\n self.ADE20K_CLASSES[object_names[i][0]] = i+1\n self.ADE20K_CLASSES_reverse[i+1] = object_names[i][0]\n if i == 3:\n logger.debug(f\"ADE20K_CLASSES: {self.ADE20K_CLASSES}\")\n logger.debug(f\"ADE20K_CLASSES_reverse: {self.ADE20K_CLASSES_reverse}\")",
"def load_class_index(filename_class_index):\n class_dictionary = np.load(filename_class_index).item()\n return class_dictionary"
]
| [
"0.62869155",
"0.5954692",
"0.5110072",
"0.49648246",
"0.47868812",
"0.4665774",
"0.4657948",
"0.46555963",
"0.4620666",
"0.45999575",
"0.45988777",
"0.45965794",
"0.45868897",
"0.45225358",
"0.45041782",
"0.45039794",
"0.4491598",
"0.44227284",
"0.44207746",
"0.44183654",
"0.44143012",
"0.44002494",
"0.43644327",
"0.43558803",
"0.43452924",
"0.4335903",
"0.43264055",
"0.43145475",
"0.4312715",
"0.4295464"
]
| 0.7076887 | 0 |
Calculate integral of a polynomial from list of coefficients | def poly_integral(poly, C=0):
try:
if len(poly) < 1:
return None
except TypeError:
return None
if not (isinstance(C, int) or isinstance(C, float)):
return None
lastidx = 0
for idx, coef in enumerate(poly):
if not (type(coef) is int or type(coef) is float):
return None
if coef != 0:
lastidx = idx + 1
newlist = [C] + [int_if_whole(coef / (exp + 1))
for exp, coef in enumerate(poly)]
return newlist[:lastidx + 1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def poly_integral(poly, C=0):\n if not isinstance(poly, list) or poly == []:\n return None\n if not isinstance(C, (int, float)):\n return None\n result = [C]\n for degree, coef in enumerate(poly):\n val = coef * (1 / (degree + 1))\n if val.is_integer():\n result.append(int(val))\n else:\n result.append(val)\n for _ in range(len(result)):\n if result[-1] == 0:\n result.pop()\n return result",
"def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)",
"def poly_integral(poly, C=0):\n if poly == [] or type(poly) is not list or type(C) is not int:\n return None\n if len(poly) == 1:\n return [C]\n result = [C]\n\n for i in range(len(poly)):\n integ = poly[i] / (i+1)\n if inte.is_integer():\n integ = int(r)\n result.append(integ)\n\n return result",
"def evaluate_poly(poly: Sequence[float], x: float) -> float:\n return sum(c * (x**i) for i, c in enumerate(poly))",
"def integral(self, constant: float = 0) -> Polynomial:\n coefficients: list[float] = [0] * (self.degree + 2)\n coefficients[0] = constant\n for i in range(self.degree + 1):\n coefficients[i + 1] = self.coefficients[i] / (i + 1)\n return Polynomial(self.degree + 1, coefficients)",
"def polynomial(a, x):\n\n sum = 0\n\n for i in range(len(a)):\n sum += a[i] * x**i\n return sum",
"def evaluate_poly(poly, x):\n exp = 0\n total = 0\n for coef in poly:\n total += coef * (x ** exp)\n exp += 1\n\n return total",
"def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y",
"def general_poly (L):\n def to_apply (x):\n n = 0\n for i in L:\n n = x*n + i\n return n\n return to_apply",
"def evaluate_polynomial(f,x):\n degree = len(f)-1\n ans = 0\n for i in f:\n ans += i*x**degree\n degree -= 1\n return(ans)",
"def poly_int(params: PolyParams, x: NDArray, order: int) -> NDArray:\n\n return np.polyval(np.polyint(params, -order), x)",
"def evaluate_poly(poly, x):\n value_of_poly = 0\n for i in range(0, len(poly)):\n var = x\n power = i\n coeff = poly[i]\n value_of_poly += (coeff * (var**power))\n return value_of_poly",
"def addPol(*pol):\n\n sum = np.zeros((1,))\n for p in pol:\n sum = polyadd(sum, p)\n\n return sum",
"def _evalPoly(self,a,x):\n y = a[0]\n for i in range(1,len(a)):\n y = self.F.Multiply(y, x)\n y = self.F.Add(y, a[i])\n return y",
"def polynomial_sum(x1, x2):\n #-- convert variable to array if importing a single value\n x2 = np.atleast_1d(x2)\n return np.sum([c * (x2 ** i) for i,c in enumerate(x1)],axis=0)",
"def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2",
"def zzX_eval(f, x):\n if hasattr(x, '__iter__'):\n return zzX_eval_list(f, x)\n\n if poly_univariate_p(f):\n return zzx_eval(f, x)\n\n if not x:\n return poly_TC(f)\n\n result = poly_LC(f)\n\n for coeff in f[1:]:\n result = zzX_mul_const(result, x)\n result = zzX_add(result, coeff)\n\n return result",
"def coeffients(x, y):\n\n # ensure floating point datatypes\n x.astype(float)\n y.astype(float)\n\n # degree of interpolating polynomial\n n = len(x)\n\n # intitilize list of coeffients for interpolating polynomial to y values\n c = y.tolist()\n\n # compute coeffients\n for j in range(1, n):\n for i in range(n-1, j-1, -1):\n c[i] = float(c[i]-c[i-1])/float(x[i]-x[i-j])\n\n # return an array of polynomial coefficient, note: reverse order for np.polyval function\n return np.array(c[::-1])",
"def evaluate_polynomial(tropical_matrix, coefficient_list):\n identity_matrix = get_identity_matrix(tropical_matrix.get_dimension())\n sum_list = []\n sum_list.append(identity_matrix.mult_scalar(coefficient_list[0]))\n for i in range(1, len(coefficient_list)):\n sum_list.append(tropical_matrix.mult_scalar(coefficient_list[i]))\n return get_minimum_sum(sum_list)",
"def horner(poly: Sequence[float], x: float) -> float:\n result = 0.0\n for coeff in reversed(poly):\n result = result * x + coeff\n return result",
"def general_poly(L):\n def evaluate(x):\n length=len(L)-1\n value=0\n for i in L:\n value+=i*(x**length)\n length-=1\n return value\n return evaluate",
"def polynomial_creator(*coefficients):\n def polynomial(x):\n res = 0\n for index, coeff in enumerate(coefficients):\n res += coeff * x** index\n return res\n return polynomial",
"def poly_complex(*args, real_x=False):\n from .. import z\n\n args = list(args)\n x = args.pop()\n if real_x is not None:\n pow_func = znp.power\n else:\n pow_func = z.nth_pow\n return tf.add_n(\n [coef * z.to_complex(pow_func(x, p)) for p, coef in enumerate(args)]\n )",
"def compute_deriv(poly):\n derivative_of_poly = []\n for i in range(1, len(poly)):\n power = i\n coeff = poly[i]\n y = float(coeff * power)\n first = derivative_of_poly.append(y)\n return derivative_of_poly",
"def poly_derivative(poly):\n if not poly or type(poly) is not list:\n return None\n\n response = []\n\n for order in range(1, len(poly)):\n response.append(order * poly[order])\n\n if not response:\n response.append(0)\n\n return response",
"def polyval(p, x):\r\n val = 0\r\n ii = len(p) - 1\r\n for i in range(len(p) - 1):\r\n val += p[i] * (x ** ii)\r\n ii -= 1\r\n return val + p[-1]",
"def zzX_to_poly(f, *symbols):\n from sympy.polys import Poly\n\n terms = {}\n\n for monom, coeff in zzX_to_dict(f).iteritems():\n terms[monom] = Integer(int(coeff))\n\n return Poly(terms, *symbols)",
"def poly_derivative(poly):\n if type(poly) is not list or len(poly) < 1:\n return None\n if len(poly) == 1:\n return [0]\n\n derivated_coefficients = []\n\n for power, coefficient in enumerate(poly):\n if power == 0:\n pass\n\n else:\n new_coefficient = coefficient * power\n derivated_coefficients.append(new_coefficient)\n\n return(derivated_coefficients)",
"def polynomial_incremental_value(multipliers, variable):\n return sum([(mult * order * variable ** (order - 1) if order > 0 else 0) for order, mult in enumerate(multipliers)])",
"def calculate(self, x):\n result = 0\n\n for index, polynomial in self.polynomials.items():\n result += polynomial * pow(x, int(index))\n\n return result"
]
| [
"0.74296737",
"0.7381701",
"0.7244842",
"0.7224842",
"0.7110448",
"0.70556635",
"0.69254386",
"0.6911941",
"0.6725386",
"0.6708102",
"0.6701443",
"0.6629917",
"0.65790784",
"0.65371686",
"0.6455424",
"0.64405704",
"0.641371",
"0.6384638",
"0.6370596",
"0.6348242",
"0.63109946",
"0.62659484",
"0.62584925",
"0.62333417",
"0.6221058",
"0.6193145",
"0.6181737",
"0.61740917",
"0.6164384",
"0.61378604"
]
| 0.73913735 | 1 |
Returns integer if number is whole, else returns number | def int_if_whole(num):
if num.is_integer():
return int(num)
else:
return num | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_integer(number: float):\n\tif number.is_integer():\n\t\treturn int(number)\n\treturn number",
"def whole_number_to_int(value: AnyBasicType) -> AnyBasicType:\n if isinstance(value, float) and value.is_integer():\n return int(value)\n return value",
"def get_number(self, operand):\n try:\n num = int(operand)\n except ValueError:\n try:\n num = float(operand)\n except ValueError:\n print(\"Invalid number %s\" % operand)\n num = None\n return num",
"def visit_Num(self, node):\n token = node.token\n if token.type in (INTEGER, FLOAT):\n return token.value",
"def check_for_int(check):",
"def ir(some_value):\r\n return int(round(some_value))",
"def trunc_if_integer(n: Any) -> Any:\n if n == int(n):\n return int(n)\n return n",
"def checkInt(value):\n if int(value) == value:\n return int(value)\n else:\n return value",
"def intify(x):\n return int(x) if almost_equal(x, round(x)) else x",
"def nearest_int(num):\n return int(np.round(num))",
"def INT(val):\n return math.floor(val)",
"def convert_to_int(number):\n try:\n return int(number)\n except:\n return None",
"def round_int(num):\n try:\n float(num)\n except TypeError:\n _logger.debug(\"error trying to convert %s to number\" % (str(num)))\n raise pythonerror(\"#syntaxerror\")\n\n if int(float(num)) == num:\n return int(num)\n else:\n if float(num) < 0:\n _nn = int((float(num) - 0.005) * 100) / 100.\n else:\n _nn = int((float(num) + 0.005) * 100) / 100.\n if int(float(_nn)) == _nn:\n return int(_nn)\n return _nn",
"def _ensure_number(value):\n assert isinstance(value, (bytes, float, int)), \"value has to be either bytes or float or int\"\n\n return int.from_bytes(value, byteorder=\"big\") if type(value) is bytes else value",
"def is_number(n):\n return isinstance(n, (int, float))",
"def divide(num):\n return (int(num / 2))",
"def is_int(num):\n return int(num) == num",
"def _maybe_convert_to_number(v: Any) -> Any:\n try:\n return int(v)\n except Exception:\n pass\n\n try:\n return float(v)\n except Exception:\n pass\n\n return v",
"def toint(number):\n if isinstance(number, float):\n if number > 1:\n number = round(number, 0)\n else:\n # The following solves when image has small dimensions (like 1x54)\n # then scale factor 1 * 0.296296 and `number` will store `0`\n # that will later raise ZeroDivisionError.\n number = round(math.ceil(number), 0)\n return int(number)",
"def _coerce_to_integer(value):\n try:\n return int(value)\n except ValueError:\n return int(float(value))",
"def tryCastToInt(number):\n try:\n return int(number)\n except:\n print(\"Error! Impossible to parse this variable\")\n return 0",
"def checkNumberInt(value):\n if value.isnumeric():\n return int(value)\n else:\n print(\"You did not enter the correct numbers!\")\n newNum = input(\"Please enter a number: \")\n return checkNumberInt(newNum)",
"def num_of_num(first_number: float, second_number: float) -> float:\n return round( second_number / ( first_number / 100 ))",
"def _make_number(ctx, param, value):\n if value:\n try:\n value = int(value)\n except ValueError:\n value = float(value)\n return value",
"def compute(num):\n # 567 / 9 = 63, 235 / 47 = 5\n num = (num * 63 + 7492) * 5 - 498\n if num < 0: # modulus won't give correct result if number is negative\n num *= -1\n res = (num // 10) % 10\n return res",
"def rint(flt: float) -> int | float:\n return int(rounded) if (rounded := round(flt, 2)).is_integer() else rounded",
"def integer(self):\n literal = re.compile(r'(0){1}|([1-9])\\d*')\n result = ''\n while self.current_char is not None and self.current_char.isdigit():\n result += self.current_char\n self.advance()\n if literal.fullmatch(result) is not None:\n return int(result)\n else:\n self.error()",
"def try_int(value: Any) -> Optional[float]:\n try:\n return int(value)\n except (TypeError, ValueError):\n return None",
"def num(s: str):\n try: return int(s)\n except ValueError: return float(s)",
"def __int__(self) -> int:\n return self._translate_in_type(int, self.float_num)"
]
| [
"0.7314011",
"0.7166847",
"0.6652101",
"0.6609565",
"0.65567404",
"0.65475607",
"0.6547118",
"0.6521454",
"0.65158075",
"0.64692986",
"0.6402044",
"0.6399932",
"0.6330981",
"0.63286954",
"0.63224167",
"0.62902737",
"0.62311643",
"0.62228835",
"0.6186363",
"0.61849284",
"0.6173586",
"0.61642134",
"0.61543715",
"0.6120279",
"0.61117005",
"0.6107427",
"0.6102026",
"0.6097803",
"0.608314",
"0.60656786"
]
| 0.85024625 | 0 |
Call which_action for each action in s1 that has an analogue in s2 to determine which of two actions that operate against the same oid+index should be preferred. If neither is preferred, a ConflictError will be raised. | def action_intersection(s1, s2):
isect = s1 & s2
L1 = [ ( (a.oid, a.index_oid), a) for a in s1 ]
L2 = [ ( (a.oid, a.index_oid), a) for a in s2 ]
ds1 = dict(L1)
ds2 = dict(L2)
for k1, action1 in ds1.items():
action2 = ds2.get(k1)
if action2 is not None:
# replace action in union with correct one or conflict
isect.add(which_action(action1, action2))
return isect | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def which_action(a1, a2):\n def doconflict(a1, a2):\n raise ConflictError\n def dosecond(a1, a2):\n return a2\n def dofirst(a1, a2):\n return a1\n statefuncs = {\n (IndexAction, UnindexAction):doconflict,\n (UnindexAction, IndexAction):doconflict,\n (ReindexAction, UnindexAction):doconflict,\n (UnindexAction, ReindexAction):doconflict,\n (ReindexAction, IndexAction):dosecond,\n }\n return statefuncs.get((a1.__class__, a2.__class__), dofirst)(a1, a2)",
"def choose_best_action(self, s):\r\n raise NotImplemented()",
"def logic_action(first_index, second_index):\n return None",
"def resolve_conflicts(actions):\n\n # organize actions by discriminators\n unique = {}\n output = []\n for i, action in enumerate(actions):\n if not isinstance(action, dict):\n # old-style tuple action\n action = expand_action(*action)\n\n # \"order\" is an integer grouping. Actions in a lower order will be\n # executed before actions in a higher order. Within an order,\n # actions are executed sequentially based on original action ordering\n # (\"i\").\n order = action['order'] or 0\n discriminator = action['discriminator']\n\n # \"ainfo\" is a tuple of (order, i, action) where \"order\" is a\n # user-supplied grouping, \"i\" is an integer expressing the relative\n # position of this action in the action list being resolved, and\n # \"action\" is an action dictionary. The purpose of an ainfo is to\n # associate an \"order\" and an \"i\" with a particular action; \"order\"\n # and \"i\" exist for sorting purposes after conflict resolution.\n ainfo = (order, i, action)\n\n if discriminator is None:\n # The discriminator is None, so this action can never conflict.\n # We can add it directly to the result.\n output.append(ainfo)\n continue\n\n L = unique.setdefault(discriminator, []) # noqa\n L.append(ainfo)\n\n # Check for conflicts\n conflicts = {}\n\n for discriminator, ainfos in unique.items():\n\n # We use (order, i) as a sort key because we need to\n def byorder(ainfo):\n order, i = ainfo[0], ainfo[1]\n return order, i\n\n ainfos.sort(key=byorder)\n ainfo, rest = ainfos[0], ainfos[1:]\n output.append(ainfo)\n _, _, action = ainfo\n order = action['order']\n discriminator = action['discriminator']\n base_module_name = action['module_name']\n base_order = action['order']\n\n for _, _, action in rest:\n if action['order'] <= base_order:\n L = conflicts.setdefault(discriminator, [base_module_name, base_order]) # noqa\n L.append((action['module_name'], action['order']))\n\n if conflicts:\n raise ConfigurationConflictError(conflicts)\n\n # Sort conflict-resolved actions by (order, i) and return them.\n return [x[2] for x in sorted(output, key=operator.itemgetter(0, 1))]",
"def choose_action(self, action_options):\n allowed_action_idxs = []\n for i in range(len(action_options)):\n action = int(action_options[i])\n if action != -1:\n allowed_action_idxs.append(i)\n\n if len(allowed_action_idxs) == 0:\n return None\n \n # if we are choosing actions based on the saved matrix, we should select\n # the action with the best weight at the current state in the q-matrix\n if self.use_saved_matrix:\n best_q_value = None\n best_action_num = None\n best_next_state = None\n for action_idx in allowed_action_idxs:\n action_num = int(action_options[action_idx])\n next_state = action_idx\n q_value = self.q_matrix[self.current_state][action_num]\n if best_q_value is None or q_value > best_q_value:\n best_q_value = q_value\n best_action_num = action_num\n best_next_state = next_state\n return (best_action_num, best_next_state)\n\n # during training, we should randomly select from the allowed actions and\n # transition to the next state to continue training\n action_idx = np.random.choice(allowed_action_idxs)\n action_num = int(action_options[action_idx])\n next_state = action_idx\n return (action_num, next_state)",
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def different_actions(old_action: PersistentAction, new_action: PersistentAction) -> bool:\n if Invocation.different_required(old_action.required, new_action.required):\n return True\n\n if old_action.command != new_action.command:\n if old_action.command is None:\n old_action_kind = \"a phony command\"\n else:\n old_action_kind = \"the command: \" + \" \".join(old_action.command)\n\n if new_action.command is None:\n new_action_kind = \"a phony command\"\n else:\n new_action_kind = \"the command: \" + \" \".join(new_action.command)\n\n Logger.why(f\"Must run actions because changed {old_action_kind} \" f\"into {new_action_kind}\")\n return True\n\n return False",
"def choose_action(self, command: State, agent_names: List[str],\n candidate_actions: Optional[List[Union[Action, List[Action]]]],\n force_response: bool, pre_post_state: str) -> Optional[Action]:\n\n if not candidate_actions:\n return None\n\n confs = [self.__calculate_confidence__(action) for action in candidate_actions]\n idx_maxconf = np.argmax(confs)\n\n max_conf = confs[idx_maxconf]\n selected_candidate = candidate_actions[idx_maxconf]\n\n if force_response:\n return selected_candidate\n\n if max_conf >= self.threshold:\n return selected_candidate\n\n return None",
"def optimize_actions(actions):\n result = {}\n\n def donothing(oid, index_oid, action1, action2):\n del result[(oid, index_oid)]\n\n def doadd(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action1\n\n def dochange(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = ReindexAction(\n action2.index, action2.mode, oid,\n )\n\n def dodefault(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action2\n\n statefuncs = {\n # txn asked to remove an object that previously it was\n # asked to add, conclusion is to do nothing\n (IndexAction, UnindexAction):donothing,\n # txn asked to change an object that was not previously added,\n # concusion is to just do the add\n (IndexAction, ReindexAction):doadd,\n # txn action asked to remove an object then readd the same\n # object. We translate this to a single change action.\n (UnindexAction, IndexAction):dochange,\n }\n\n for newaction in actions:\n oid = newaction.oid\n index_oid = newaction.index_oid\n oldaction = result.get((oid, index_oid))\n statefunc = statefuncs.get(\n (oldaction.__class__, newaction.__class__),\n dodefault,\n )\n statefunc(oid, index_oid, oldaction, newaction)\n\n result = list(sorted(result.values()))\n return result",
"def choose_action(self, board, possible_actions):\r\n pass",
"def choose_action(self, *args, **kwargs):\n return NotImplementedError",
"def choose_action(self, obs, **kwargs):\n pass",
"def _select_operator(self, state, action):\n if self.operators_as_actions:\n # There should be only one possible operator if actions are operators\n possible_operators = set()\n for name, operator in self.domain.operators.items():\n if name.lower() == action.predicate.name.lower():\n assert len(possible_operators) == 0\n possible_operators.add(operator)\n else:\n # Possibly multiple operators per action\n possible_operators = set(self.domain.operators.values())\n\n # Knowledge base: literals in the state + action taken\n kb = set(state.literals) | {action}\n\n selected_operator = None\n assignment = None\n for operator in possible_operators:\n if isinstance(operator.preconds, Literal):\n conds = [operator.preconds]\n else:\n conds = operator.preconds.literals\n # Necessary for binding the operator arguments to the variables\n if self.operators_as_actions:\n conds = [action.predicate(*operator.params)] + conds\n # Check whether action is in the preconditions\n action_literal = None\n for lit in conds: \n if lit.predicate == action.predicate:\n action_literal = lit\n break\n if action_literal is None:\n continue\n # For proving, consider action variable first\n action_variables = action_literal.variables\n variable_sort_fn = lambda v : (not v in action_variables, v)\n assignments = find_satisfying_assignments(kb, conds,\n variable_sort_fn=variable_sort_fn,\n type_to_parent_types=self.domain.type_to_parent_types)\n num_assignments = len(assignments)\n if num_assignments > 0:\n assert num_assignments == 1, \"Nondeterministic envs not supported\"\n selected_operator = operator\n assignment = assignments[0]\n break\n\n return selected_operator, assignment",
"def choose_action(self):\r\n pass",
"def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]",
"def choose_action(self, board):\n raise NotImplementedError",
"def get_joss_strategy_actions(\n opponent_moves: list, indices_to_flip: list\n) -> list:\n out = []\n for index, action in enumerate(opponent_moves):\n previous_action = opponent_moves[index - 1]\n if index == 0:\n out.append((C, action))\n elif index in indices_to_flip:\n out.append((previous_action.flip(), action))\n else:\n out.append((previous_action, action))\n return out",
"def get_action(self, q_ind, r_table, t, exploration_factor,\n allow_coordination, coordination_vector,\n best_actions):\n if t < self.next_availability:\n action = (0, self.did, -100, -100)\n return action\n\n # In case previous action was coordination then only allow independent\n # waiting action as next action\n if allow_coordination:\n coordination_factor = coordination_vector[self.curr_bin]\n else:\n coordination_factor = 0.0\n\n rand1 = np.random.random()\n rand2 = np.random.random()\n\n if not self.is_strategic:\n coordination = False\n action = q_ind.get_non_strategic_action(self.curr_bin)\n elif rand1 <= coordination_factor:\n # Coordination\n coordination = True\n # Fill up coordinated rebalancing\n action = r_table.get_action(t, self.curr_bin)\n else:\n # Independence\n coordination = False\n if rand2 <= exploration_factor:\n action = q_ind.get_random_action(self.curr_bin)\n else:\n action = best_actions[self.curr_bin]['ind']\n\n if action == self.curr_bin:\n if coordination:\n action = (1, self.did, self.curr_bin, self.curr_bin)\n else:\n action = (2, self.did, self.curr_bin, self.curr_bin)\n else:\n if coordination:\n action = (3, self.did, self.curr_bin, action)\n else:\n action = (4, self.did, self.curr_bin, action)\n return action",
"def decide_place(self, action):\n pass",
"def _interpret_action(self, action_idx: int, team: int):\n if action_idx < 4:\n return action_idx\n\n noops = self._allow_noops[team]\n diags = self._allow_diagonals[team]\n assert noops or diags\n\n if noops and not diags:\n assert action_idx == 4\n return NOOP\n\n if not noops and diags:\n assert action_idx < 8\n return action_idx\n\n if noops and diags:\n assert action_idx < 9\n return action_idx",
"def _actions_in_different_order(\n local_history: History, remote_history: History\n) -> bool:\n return set(remote_history.actions).issubset(local_history.actions)",
"def get_action_meaning(s1: int, s2: int) -> str:\r\n r1, g1, b1 = state_ball_mapper.get(s1)\r\n r2, g2, b2 = state_ball_mapper.get(s2)\r\n red_moved = r2 - r1 != 0\r\n green_moved = g2 - g1 != 0\r\n blue_moved = b2 - b1 != 0\r\n colour = ''\r\n rod_from = ''\r\n rod_to = ''\r\n \r\n if red_moved:\r\n colour = 'R'\r\n arr1, arr2 = r1 % 10, r2 % 10\r\n rod_from = get_rod_letter(r1)\r\n rod_to = get_rod_letter(r2)\r\n \r\n if green_moved:\r\n colour = 'G'\r\n arr1, arr2 = g1 % 10, g2 % 10\r\n rod_from = get_rod_letter(g1)\r\n rod_to = get_rod_letter(g2)\r\n \r\n if blue_moved:\r\n colour = 'B'\r\n arr1, arr2 = b1 % 10, b2 % 10\r\n rod_from = get_rod_letter(b1)\r\n rod_to = get_rod_letter(b2)\r\n \r\n action = f'[{colour} {rod_from}{arr1} -> {colour} {rod_to}{arr2}]'\r\n return action",
"def score_helper(agent1_action, agent2_action, payoffs):\r\n\r\n if agent1_action == 'C' and agent2_action == 'C':\r\n # Both cooperate\r\n return payoffs[0][0]\r\n elif agent1_action == 'D' and agent2_action == 'C':\r\n # Agent 1 backstab\r\n return payoffs[1][0]\r\n elif agent1_action == 'C' and agent2_action == 'D':\r\n # Agent 2 backstab\r\n return payoffs[0][1]\r\n else:\r\n # Both defect\r\n return payoffs[1][1]",
"def choose_action(self, game_state):\n util.raise_not_defined()",
"def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action",
"def InitActionCheck(initActionList, init):\n for actions in initActionList:\n action_class = getNameFromIRI(actions.is_a[0].iri)\n # if the action is a SpeedAction class\n if action_class == \"SpeedAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n target_speed = actions.has_target_speed[0]\n ontology_transition_dynamics = actions.has_transition_dynamics[0]\n xosc_transition_dynamics = checkTransitionDynamics(ontology_transition_dynamics)\n init.add_init_action(action_entity_ref, xosc.AbsoluteSpeedAction(target_speed, xosc_transition_dynamics))\n continue\n #if the action is TeleportAction\n if action_class == \"TeleportAction\":\n action_entity_ref = getNameFromIRI(actions.has_entity_ref[0].iri)\n # if the action has position as parameter set\n s: int = 0\n offset = 0\n lane_id = 0\n road_id = 0\n if len(actions.has_position) != 0:\n position = actions.has_position[0]\n if len(position.has_s) != 0:\n s = position.has_s[0]\n\n if len(position.has_offset) != 0:\n offset = position.has_offset[0]\n\n if len(position.has_lane_id) != 0:\n lane_id = position.has_lane_id[0]\n\n if len(position.has_road_id) != 0:\n road_id = position.has_road_id[0]\n\n init.add_init_action(action_entity_ref, xosc.TeleportAction(xosc.LanePosition(s, offset, lane_id, road_id)))\n continue\n if action_class == \"EnvironmentAction\": # if the action is an EnvironmentAction\n xosc_environment_action = checkEnvironmentAction(actions)\n init.add_global_action(xosc_environment_action)\n return init",
"def _action_to_perform(self, ids, operationParams , default={}):\n full_ids=[]\n status=operationParams['status'] \n action=operationParams['action']\n docaction=operationParams['docaction']\n excludeStatuses=operationParams['excludeStatuses']\n includeStatuses=operationParams['includeStatuses']\n \n stopFlag,allIDs=self._get_recursive_parts(ids, excludeStatuses, includeStatuses)\n self._action_ondocuments(allIDs,docaction, status)\n if action:\n idMoves=move_workflow(self, allIDs, action, status)\n self.logging_workflow(idMoves, action, status)\n objId=self.browse(allIDs).with_context({'internal_writing':True}).write(default)\n if objId:\n wf_message_post(self, allIDs, body='Status moved to: {status}.'.format(status=status))\n return objId",
"def chooseAction(self, gameState):\r\n actions = gameState.getLegalActions(self.index)\r\n\r\n # You can profile your evaluation time by uncommenting these lines\r\n # start = time.time()\r\n values = [self.evaluate(gameState, a) for a in actions]\r\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\r\n\r\n maxValue = max(values)\r\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\r\n\r\n foodLeft = len(self.getFood(gameState).asList())\r\n\r\n if foodLeft <= 2:\r\n bestDist = 9999\r\n for action in actions:\r\n successor = self.essor(gameState, action)\r\n pos2 = successor.getAgentPosition(self.index)\r\n dist = self.getMazeDistance(self.start,pos2)\r\n if dist < bestDist:\r\n bestAction = action\r\n bestDist = dist\r\n return bestAction\r\n\r\n return random.choice(bestActions)",
"def __cmp__(self, other):\n # note compare order deliberatly compares other first, because we want the opposite\n # of what normally be returned by the these tuples\n if isinstance(other, Operation):\n return cmp((other.is_ready, other.queue_priority, other.seq),(self.is_ready, self.queue_priority,self.seq))\n else:\n raise TypeError('Operations can only be compared to other Operation')",
"def choose_action(data_map, connection, data_ia):\n player = 'player' + str((data_map['main_turn'] % 2) + 1)\n enemy = 'player' + str(2 - (data_map['main_turn'] % 2))\n if data_map['remote']:\n player = 'player' + str(data_map['ia_id'])\n enemy = 'player' + str(data_map['enemy_id'])\n\n # Tells whether IA or player's turn.\n if data_map['main_turn'] % 2 == data_map['ia_id'] or data_map[str(player + 'info')][1] == 'IA':\n game_instruction = ia_action(data_map, data_ia, player)\n if data_map['remote']:\n notify_remote_orders(connection, game_instruction)\n else:\n if data_map['remote']:\n game_instruction = get_remote_orders(connection)\n else:\n game_instruction = raw_input('Enter your commands in format xx_xx -a-> xx_xx or xx_xx -m-> xx_xx')\n\n # Split commands string by string.\n list_action = game_instruction.split()\n\n # grouper instruction par instructions\n list_action2 = []\n for instruction in range(0, len(list_action), 3):\n list_action2.append((list_action[instruction], list_action[instruction + 1], list_action[instruction + 2]))\n\n # Call attack_unit or move_unit in function of instruction.\n attack_counter = 0\n for i in range(len(list_action2)):\n if '-a->' in list_action2[i]:\n data_map, attacked, data_ia = attack_unit(data_map, (int(list_action2[i][0][:2]), int(list_action2[i][0][3:])),\n (int(list_action2[i][2][:2]), int(list_action2[i][2][3:])), player, enemy, data_ia)\n attack_counter += attacked\n elif '-m->' in list_action2[i]:\n data_map, data_ia = move_unit(data_map, (int(list_action2[i][0][:2]), int(list_action2[i][0][3:])),\n (int(list_action2[i][2][:2]), int(list_action2[i][2][3:])), player, enemy, data_ia)\n\n # Save if a player have attacked.\n if attack_counter:\n data_map['attack_turn'] = 0\n else:\n data_map['attack_turn'] += 1\n data_map['main_turn'] += 1\n\n return data_map"
]
| [
"0.73710716",
"0.5827848",
"0.5650774",
"0.56128246",
"0.55781186",
"0.5469253",
"0.5348622",
"0.53037953",
"0.52899754",
"0.5289762",
"0.5206281",
"0.5147254",
"0.5144174",
"0.51324034",
"0.5051541",
"0.50176245",
"0.49743837",
"0.4961874",
"0.49386695",
"0.4918847",
"0.49023473",
"0.48791128",
"0.48786667",
"0.48472378",
"0.4837746",
"0.48375764",
"0.4832541",
"0.48265386",
"0.4805322",
"0.479736"
]
| 0.65523064 | 1 |
Compare two actions and return 'the right' one, or raise a ConflictError. It's presumed that both actions share the same (oid,index). We use this | def which_action(a1, a2):
def doconflict(a1, a2):
raise ConflictError
def dosecond(a1, a2):
return a2
def dofirst(a1, a2):
return a1
statefuncs = {
(IndexAction, UnindexAction):doconflict,
(UnindexAction, IndexAction):doconflict,
(ReindexAction, UnindexAction):doconflict,
(UnindexAction, ReindexAction):doconflict,
(ReindexAction, IndexAction):dosecond,
}
return statefuncs.get((a1.__class__, a2.__class__), dofirst)(a1, a2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def action_intersection(s1, s2):\n isect = s1 & s2\n L1 = [ ( (a.oid, a.index_oid), a) for a in s1 ]\n L2 = [ ( (a.oid, a.index_oid), a) for a in s2 ]\n ds1 = dict(L1)\n ds2 = dict(L2)\n for k1, action1 in ds1.items():\n action2 = ds2.get(k1)\n if action2 is not None:\n # replace action in union with correct one or conflict\n isect.add(which_action(action1, action2))\n return isect",
"def logic_action(first_index, second_index):\n return None",
"def optimize_actions(actions):\n result = {}\n\n def donothing(oid, index_oid, action1, action2):\n del result[(oid, index_oid)]\n\n def doadd(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action1\n\n def dochange(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = ReindexAction(\n action2.index, action2.mode, oid,\n )\n\n def dodefault(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action2\n\n statefuncs = {\n # txn asked to remove an object that previously it was\n # asked to add, conclusion is to do nothing\n (IndexAction, UnindexAction):donothing,\n # txn asked to change an object that was not previously added,\n # concusion is to just do the add\n (IndexAction, ReindexAction):doadd,\n # txn action asked to remove an object then readd the same\n # object. We translate this to a single change action.\n (UnindexAction, IndexAction):dochange,\n }\n\n for newaction in actions:\n oid = newaction.oid\n index_oid = newaction.index_oid\n oldaction = result.get((oid, index_oid))\n statefunc = statefuncs.get(\n (oldaction.__class__, newaction.__class__),\n dodefault,\n )\n statefunc(oid, index_oid, oldaction, newaction)\n\n result = list(sorted(result.values()))\n return result",
"def __eq__(self, other):\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False",
"def different_actions(old_action: PersistentAction, new_action: PersistentAction) -> bool:\n if Invocation.different_required(old_action.required, new_action.required):\n return True\n\n if old_action.command != new_action.command:\n if old_action.command is None:\n old_action_kind = \"a phony command\"\n else:\n old_action_kind = \"the command: \" + \" \".join(old_action.command)\n\n if new_action.command is None:\n new_action_kind = \"a phony command\"\n else:\n new_action_kind = \"the command: \" + \" \".join(new_action.command)\n\n Logger.why(f\"Must run actions because changed {old_action_kind} \" f\"into {new_action_kind}\")\n return True\n\n return False",
"def test_get_index_action(self):\n index_name = 'index'\n document_type = 'message'\n document = {'text': 'some message'}\n action = get_index_action(index_name, document_type, document)\n self.assertDictEqual(\n action,\n {\n '_index': 'index',\n '_type': 'message',\n '_source': {\n 'text': 'some message',\n },\n },\n )",
"def __eq__(self, other):\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False",
"def _do_search_action(self, index, action, force=False):\n assert self.pk, \"Object must have a primary key before being indexed.\"\n assert action in ('index', 'delete'), (\n \"Search action '{}' is invalid; must be 'index' or 'delete'.\".format(action)\n )\n client = get_client()\n cache_key = self.search_document_cache_key\n if action == 'index':\n # if the locally cached search doc is the same as the new one,\n # then don't bother pushing to ES.\n new_doc = self.as_search_document(index)\n if not force:\n cached_doc = cache.get(cache_key)\n if new_doc == cached_doc:\n logger.debug(\"Search document for %r is unchanged, ignoring update.\", self)\n return []\n cache.set(cache_key, new_doc, timeout=60) # TODO: remove hard-coded timeout\n return client.index(\n index=index,\n doc_type=self.search_doc_type,\n body=new_doc,\n id=self.pk\n )\n\n if action == 'delete':\n cache.delete(cache_key)\n return client.delete(\n index=index,\n doc_type=self.search_doc_type,\n id=self.pk\n )",
"def _actions_in_different_order(\n local_history: History, remote_history: History\n) -> bool:\n return set(remote_history.actions).issubset(local_history.actions)",
"def __eq__(self, other):\n if not isinstance(other, SubsequentAction):\n return False\n\n return self.to_dict() == other.to_dict()",
"def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \"kafka:BatchDisassociateScramSecret\",\n \"kafka:CreateClusterV2\",\n \"kafka:DeleteConfiguration\",\n \"kafka:DescribeClusterV2\",\n \"kafka:ListClustersV2\",\n \"kafka:ListConfigurationRevisions\",\n \"kafka:ListKafkaVersions\",\n \"kafka:ListScramSecrets\",\n \"kafka:RebootBroker\",\n \"kafka:UpdateBrokerType\",\n \"kafka:UpdateConfiguration\",\n \"kafka:UpdateConnectivity\",\n \"kafka:UpdateSecurity\"\n ]\n\n for action in actions:\n self.assertTrue(action in self.all_actions)",
"def confirm_doc_and_rev(src, dest):\n if src['_id'] != dest['_id']:\n raise http.BadRequestError('incorrect id')\n if src['_rev'] != dest['_rev']:\n raise http.ConflictError([('Content-Type', 'text/plain')], 'rev is out of date')",
"def __ne__(self, other):\n if not isinstance(other, SubsequentAction):\n return True\n\n return self.to_dict() != other.to_dict()",
"def act(self):\n action = self.best_action()\n return action",
"def resolve_conflicts(actions):\n\n # organize actions by discriminators\n unique = {}\n output = []\n for i, action in enumerate(actions):\n if not isinstance(action, dict):\n # old-style tuple action\n action = expand_action(*action)\n\n # \"order\" is an integer grouping. Actions in a lower order will be\n # executed before actions in a higher order. Within an order,\n # actions are executed sequentially based on original action ordering\n # (\"i\").\n order = action['order'] or 0\n discriminator = action['discriminator']\n\n # \"ainfo\" is a tuple of (order, i, action) where \"order\" is a\n # user-supplied grouping, \"i\" is an integer expressing the relative\n # position of this action in the action list being resolved, and\n # \"action\" is an action dictionary. The purpose of an ainfo is to\n # associate an \"order\" and an \"i\" with a particular action; \"order\"\n # and \"i\" exist for sorting purposes after conflict resolution.\n ainfo = (order, i, action)\n\n if discriminator is None:\n # The discriminator is None, so this action can never conflict.\n # We can add it directly to the result.\n output.append(ainfo)\n continue\n\n L = unique.setdefault(discriminator, []) # noqa\n L.append(ainfo)\n\n # Check for conflicts\n conflicts = {}\n\n for discriminator, ainfos in unique.items():\n\n # We use (order, i) as a sort key because we need to\n def byorder(ainfo):\n order, i = ainfo[0], ainfo[1]\n return order, i\n\n ainfos.sort(key=byorder)\n ainfo, rest = ainfos[0], ainfos[1:]\n output.append(ainfo)\n _, _, action = ainfo\n order = action['order']\n discriminator = action['discriminator']\n base_module_name = action['module_name']\n base_order = action['order']\n\n for _, _, action in rest:\n if action['order'] <= base_order:\n L = conflicts.setdefault(discriminator, [base_module_name, base_order]) # noqa\n L.append((action['module_name'], action['order']))\n\n if conflicts:\n raise ConfigurationConflictError(conflicts)\n\n # Sort conflict-resolved actions by (order, i) and return them.\n return [x[2] for x in sorted(output, key=operator.itemgetter(0, 1))]",
"def test_get_index_action_row_with_id_field(self):\n index_name = 'index'\n document_type = 'message'\n document = {\n '_id': 7,\n 'text': 'some message',\n }\n action = get_index_action(index_name, document_type, document)\n self.assertDictEqual(\n action,\n {\n '_id': 7,\n '_index': 'index',\n '_type': 'message',\n '_source': {\n '_id': 7,\n 'text': 'some message',\n },\n },\n )",
"def get_actions(node, actions):\n # checkout\n if node.is_revision() and hasattr(node.data, \"update\") \\\n and node.revision.resource_type == tank.constants.ResourceType.SINGLE_FILE:\n # we can only do a checkout if the delegate supports updates\n # don't try to check out sequences\n actions[\"checkout\"] = Action(\"Check-out\", func=checkout, args=[node])\n\n # import\n if hasattr(node.data, \"import_\"):\n # we can only do an import if the delegate supports imports\n actions[\"import\"] = Action(\"Import\", func=node.import_)\n\n if hasattr(node.data, \"update\"):\n # we can only do a replace if the delegate supports updates\n actions[\"replace\"] = Action(\"Replace...\", func=replace, args=[node],\n params=[Param(\"revision\", Param.Revision,\n label=\"Revision\",\n default=(node.revision if node is not None else None))])\n\n if hasattr(node.data, \"update\") and node.is_revision() and node.container.latest_revisions.has_key(node.revision.system.type.system.name) and node.revision != node.container.latest_revisions[node.revision.system.type.system.name]:\n actions[\"latest\"] = Action(\"Update to Latest\",\n func=replace_with_latest, args=[node])\n# actions[\"recommended\"] = Action(\"Update to Recommended\",\n# func=replace_with_recommended, args=[node])\n\n if hasattr(node.data, \"update\") and node.container and node.container.system.type.properties.use_name:\n actions[\"container_name_to_filename\"] = Action(\"Set Container Name to Filename\",\n func=set_container_name_to_filename, args=[node])\n\n if node.is_working():\n params = [Param(\"description\", Param.Text, label=\"Description\", default=\"\"),\n Param(\"subset\", Param.NodeList, label=\"Nodes\", default=(), node=node)]\n try:\n if node.revision_type is not None:\n for field_name, field in node.revision_type.fields.items():\n try:\n if field.properties.hints['set_at_publish']:\n if field.properties.type == \"boolean\":\n p = Param(field_name, Param.Boolean, label=field.properties.nice_name, default=False)\n params.append(p)\n except (TypeError, KeyError):\n pass\n except:\n import traceback\n print traceback.print_exc()\n\n# if os.getenv('TOOLSET') not in ('beta', 'dev'):\n if os.getenv('TOOLSET') != 'dev':\n # Disable publish when a dev or beta toolset is used.\n actions[\"publish\"] = Action(\"Publish...\", func=publish, args=[node], params=params)\n\n return actions",
"def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"",
"def test_resource_actions(self):\n test_resource = ResourceTypeName.get()\n expected_actions = sorted(['rt:get', 'rt:put', 'rt:update', 'rt:delete'])\n self.app.post(\n f'/v1/resource/{test_resource}',\n data=json.dumps({'actions': expected_actions}),\n headers=admin_headers)\n\n # Get the actions for a resource type\n resp = self.app.get(f'/v1/resource/{test_resource}/actions', headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n actions = json.loads(resp.body)['actions']\n self.assertEqual(actions, expected_actions)\n\n # Delete actions from a resource type\n modify_actions = expected_actions[-2:]\n resp = self.app.delete(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n actions = sorted(json.loads(resp.body)['actions'])\n self.assertEqual(actions, expected_actions[:2])\n\n # OK returned when deleting actions not part of a resource type\n resp = self.app.delete(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n\n # Put actions into a resource type\n resp = self.app.put(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n actions = sorted(json.loads(resp.body)['actions'])\n self.assertEqual(actions, expected_actions)\n\n # OK returned when putting actions already a part of a resource type.\n resp = self.app.put(f'/v1/resource/{test_resource}/actions',\n data=json.dumps({'actions': modify_actions}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)",
"def find_action(self, env):\n best_action = None\n best_omega = None\n best_reward = -1e6\n for i, action in enumerate(zip(self.left, self.right)):\n action = np.array(action)\n reward = env.theoretical_step(action, self.delta_t)\n if reward > best_reward:\n best_action = action\n best_reward = reward\n best_omega = self.possible_omega[i]\n\n if best_reward <= -20:\n return None\n else:\n return Result(best_action, {\"omega\": best_omega.item()})",
"def test_custom_get_actions(self):\n # Action not specified\n self.assertFalse(admin.GlobalAdminHandler.add_custom_get_action(\n \"\", None))\n self.assertFalse(admin.GlobalAdminHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))\n\n # Handler not specified\n self.assertFalse(admin.GlobalAdminHandler.add_custom_get_action(\n self.CUSTOM_ACTION_NAME, None))\n self.assertFalse(admin.GlobalAdminHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))\n\n # All required fields specified\n self.assertTrue(admin.GlobalAdminHandler.add_custom_get_action(\n self.CUSTOM_ACTION_NAME, self.custom_handler))\n self.assertTrue(admin.GlobalAdminHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))\n\n # Duplicate entry not allowed\n self.assertFalse(admin.GlobalAdminHandler.add_custom_get_action(\n self.CUSTOM_ACTION_NAME, self.custom_handler2))\n self.assertTrue(admin.GlobalAdminHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))\n self.assertEqual(\n self.custom_handler,\n admin.GlobalAdminHandler\n ._custom_get_actions[self.CUSTOM_ACTION_NAME].handler)\n\n # Force overwrite existing entry\n self.assertTrue(admin.GlobalAdminHandler.add_custom_get_action(\n self.CUSTOM_ACTION_NAME, self.custom_handler2,\n overwrite=True))\n self.assertTrue(admin.GlobalAdminHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))\n self.assertEqual(\n self.custom_handler2,\n admin.GlobalAdminHandler\n ._custom_get_actions[self.CUSTOM_ACTION_NAME].handler)\n\n # Remove the action\n admin.GlobalAdminHandler.remove_custom_get_action(\n self.CUSTOM_ACTION_NAME)\n self.assertFalse(admin.GlobalAdminHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))\n\n # Should not overwrite Dashboard action\n self.assertTrue(dashboard.DashboardHandler.add_custom_get_action(\n self.CUSTOM_ACTION_NAME, handler=self.custom_handler))\n self.assertTrue(dashboard.DashboardHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))\n self.assertFalse(admin.GlobalAdminHandler.add_custom_get_action(\n self.CUSTOM_ACTION_NAME, self.custom_handler))\n self.assertFalse(admin.GlobalAdminHandler._custom_get_actions.has_key(\n self.CUSTOM_ACTION_NAME))",
"def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]",
"def __eq__(self, a):\n if self.numActions != a.numActions:\n return False\n if self.act != a.act:\n return False\n return True",
"def __eq__(self, other):\n if not isinstance(other, CreateMyActionTemplateResponse):\n return False\n\n return self.__dict__ == other.__dict__",
"def helper_action_get_request_is_wrong(self, action_name):\n wrong = not util.safe_string_compare(action_name, self.last_request_get_dict[\"action\"][0])\n return wrong",
"def update_action(self):\n self.action = self.automata > self.states\n self.inv_action = self.inv_automata > self.states",
"def response_action(self, request, queryset): # noqa\n # There can be multiple action forms on the page (at the top\n # and bottom of the change list, for example). Get the action\n # whose button was pushed.\n try:\n action_index = int(request.POST.get('index', 0))\n except ValueError: # pragma: no cover\n action_index = 0\n\n # Construct the action form.\n data = request.POST.copy()\n data.pop(helpers.ACTION_CHECKBOX_NAME, None)\n data.pop(\"index\", None)\n\n # Use the action whose button was pushed\n try:\n data.update({'action': data.getlist('action')[action_index]})\n except IndexError: # pragma: no cover\n # If we didn't get an action from the chosen form that's invalid\n # POST data, so by deleting action it'll fail the validation check\n # below. So no need to do anything here\n pass\n\n action_form = self.action_form(data, auto_id=None)\n action_form.fields['action'].choices = self.get_action_choices(request)\n\n # If the form's valid we can handle the action.\n if action_form.is_valid():\n action = action_form.cleaned_data['action']\n func, name, description = self.get_actions(request)[action]\n\n # Get the list of selected PKs. If nothing's selected, we can't\n # perform an action on it, so bail.\n if action_form.cleaned_data['select_across']:\n selected = ALL\n else:\n selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)\n\n if not selected:\n return None\n\n revision_field = self.model._concurrencymeta.field\n\n if self.check_concurrent_action:\n self.delete_selected_confirmation_template = self.get_confirmation_template()\n\n # If select_across we have to avoid the use of concurrency\n if selected is not ALL:\n filters = []\n for x in selected:\n try:\n pk, version = x.split(\",\")\n except ValueError: # pragma: no cover\n raise ImproperlyConfigured('`ConcurrencyActionMixin` error.'\n 'A tuple with `primary_key, version_number` '\n 'expected: `%s` found' % x)\n filters.append(Q(**{'pk': pk,\n revision_field.attname: version}))\n\n queryset = queryset.filter(reduce(operator.or_, filters))\n if len(selected) != queryset.count():\n messages.error(request, 'One or more record were updated. '\n '(Probably by other user) '\n 'The execution was aborted.')\n return HttpResponseRedirect(\".\")\n else:\n messages.warning(request, 'Selecting all records, you will avoid the concurrency check')\n\n response = func(self, request, queryset)\n\n # Actions may return an HttpResponse, which will be used as the\n # response from the POST. If not, we'll be a good little HTTP\n # citizen and redirect back to the changelist page.\n if isinstance(response, HttpResponse):\n return response\n else:\n return HttpResponseRedirect(\".\")",
"def as_search_action(self, index, action):\n assert action in ('index', 'update', 'delete'), (\"Action must be 'index', 'update' or 'delete'.\") # noqa\n\n document = {\n '_index': index,\n '_type': self.search_doc_type,\n '_op_type': action,\n '_id': self.pk,\n }\n\n if action == 'index':\n document['_source'] = self.as_search_document(index)\n elif action == 'update':\n document['doc'] = self.as_search_document(index)\n return document",
"def _action_to_perform(self, ids, operationParams , default={}):\n full_ids=[]\n status=operationParams['status'] \n action=operationParams['action']\n docaction=operationParams['docaction']\n excludeStatuses=operationParams['excludeStatuses']\n includeStatuses=operationParams['includeStatuses']\n \n stopFlag,allIDs=self._get_recursive_parts(ids, excludeStatuses, includeStatuses)\n self._action_ondocuments(allIDs,docaction, status)\n if action:\n idMoves=move_workflow(self, allIDs, action, status)\n self.logging_workflow(idMoves, action, status)\n objId=self.browse(allIDs).with_context({'internal_writing':True}).write(default)\n if objId:\n wf_message_post(self, allIDs, body='Status moved to: {status}.'.format(status=status))\n return objId",
"def findAction(self, actionId): #$NON-NLS-1$\r"
]
| [
"0.5894497",
"0.58674324",
"0.57587546",
"0.57040775",
"0.567426",
"0.5658385",
"0.5616273",
"0.5513813",
"0.547424",
"0.54145575",
"0.5409295",
"0.5403284",
"0.54000676",
"0.5348082",
"0.53056765",
"0.52547824",
"0.5209515",
"0.51633745",
"0.5149985",
"0.51365876",
"0.51152486",
"0.51098895",
"0.50674206",
"0.5036201",
"0.5022388",
"0.5010751",
"0.5006814",
"0.5003415",
"0.4967344",
"0.49472618"
]
| 0.7227713 | 0 |
State chart for optimization. If the new action is X and the existing action is Y, generate the resulting action named in the chart cells. New INDEX UNINDEX REINDEX Existing INDEX index nothing index UNINDEX reindex unindex reindex REINDEX index unindex reindex Starred entries in the chart above indicate special cases. Typically the last action encountered in the actions list is the most optimal action, except for the starred cases. | def optimize_actions(actions):
result = {}
def donothing(oid, index_oid, action1, action2):
del result[(oid, index_oid)]
def doadd(oid, index_oid, action1, action2):
result[(oid, index_oid)] = action1
def dochange(oid, index_oid, action1, action2):
result[(oid, index_oid)] = ReindexAction(
action2.index, action2.mode, oid,
)
def dodefault(oid, index_oid, action1, action2):
result[(oid, index_oid)] = action2
statefuncs = {
# txn asked to remove an object that previously it was
# asked to add, conclusion is to do nothing
(IndexAction, UnindexAction):donothing,
# txn asked to change an object that was not previously added,
# concusion is to just do the add
(IndexAction, ReindexAction):doadd,
# txn action asked to remove an object then readd the same
# object. We translate this to a single change action.
(UnindexAction, IndexAction):dochange,
}
for newaction in actions:
oid = newaction.oid
index_oid = newaction.index_oid
oldaction = result.get((oid, index_oid))
statefunc = statefuncs.get(
(oldaction.__class__, newaction.__class__),
dodefault,
)
statefunc(oid, index_oid, oldaction, newaction)
result = list(sorted(result.values()))
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def result(self, state, action):\n state_after_act = [[0 for i in range(self.col)] for j in range(self.row)]\n for k in action:\n x = k[1][0]\n y = k[1][1]\n if k[0] == \"vaccinate\":\n state_after_act[x][y] = ('I', 1)\n else:\n state_after_act[x][y] = ('Q', 1)\n\n for i in range(self.row):\n for j in range(self.col):\n if state_after_act[i][j] == 0:\n if state[i][j][0] == 'U' or state[i][j][0] == 'I':\n state_after_act[i][j] = state[i][j]\n\n elif state[i][j][0] == 'S':\n if state[i][j][1] == 3:\n state_after_act[i][j] = ('H', 1)\n else:\n if state[i][j][1] == 1:\n state_after_act[i][j] = ('S', 2)\n elif state[i][j][1] == 2:\n state_after_act[i][j] = ('S', 3)\n\n elif state[i][j][0] == 'Q':\n if state[i][j][1] == 2:\n state_after_act[i][j] = ('H', 1)\n else:\n state_after_act[i][j] = ('Q', 2)\n\n elif state[i][j][0] == 'H':\n state_after_act[i][j] = self.healthy(i, j, state,state_after_act)\n state_after_act[i] = tuple(state_after_act[i])\n return tuple(state_after_act)",
"def applyAction(state, action):\r\n if action == 'N':\r\n return (state[0] - 1, state[1])\r\n\r\n if action == 'E':\r\n return (state[0], state[1] + 1)\r\n\r\n if action == 'W':\r\n return (state[0], state[1] - 1)\r\n\r\n if action == 'S':\r\n return (state[0] + 1, state[1])",
"def generateStateTable(C, ruleSet, terminals, indexFunc):\n #initialize the state dictionary\n stateDict = {}\n for i in range(len(C)):\n stateDict[i] = {}\n\n gotoDict = {}\n for i in range(len(C)):\n gotoDict[i] = {}\n \n #compute the states\n for state in range(len(C)):\n for item in C[state]:\n exp = item.expects()\n targetSet = goto(C[state], exp, ruleSet, terminals)\n\n #check for conflicts\n #if there is a goto, shift\n if targetSet:\n #targetState = C.index(targetSet)\n #targetState = fullIndex(C, targetSet)\n targetState = indexFunc(targetSet)\n if stateDict[state].has_key(exp):\n x = stateDict[state][exp]\n if x[0] == SHIFT and x[1] != targetState:\n print 'shift/shift conflict! for %s' % item\n print 'favoring this on %s' % exp\n if x[0] == REDUCE:\n print ('shift/reduce conflict for %s, was reducing '\\\n 'by %s, now shifting on %s') % (\n item, str(ruleSet[x[1]]), exp)\n stateDict[state][exp] = (SHIFT, targetState)\n\n #else if point is at the end and lhs isn't S', reduce\n elif item.pointAtEnd() and item.lhs != \"S'\":\n for i in item.lookaheads:\n if stateDict[state].has_key(i):\n x = stateDict[state][i]\n if x[0] == SHIFT:\n print ('shift/reduce conflict for %s, was '\n 'shifting, will not reduce') % item\n if x[0] == REDUCE:\n print 'reduce/reduce conflict for %s' % item\n thisRule = ruleSet[item.ruleNumber]\n thatRule = ruleSet[x[1]]\n if len(thisRule.rhs) > len(thatRule.rhs):\n print 'favoring redux by %s over %s' % (\n thisRule, thatRule)\n stateDict[state][i] = (REDUCE,\n item.ruleNumber)\n else:\n print 'favoring redux by %s over %s' % (\n thatRule, thisRule)\n print\n else:\n stateDict[state][i] = (REDUCE, item.ruleNumber)\n\n #else if point is at the and and lhs is S', accept\n elif item.pointAtEnd() and item.lhs == \"S'\":\n for i in item.lookaheads:\n stateDict[state][i] = (ACCEPT, item.ruleNumber)\n\n #else, panic\n else:\n raise RuntimeError, 'Waaaaaaah!!! Aieeee!'\n\n #compute goto table\n ## ATC -- this and the LR0 version are identical, move to common\n for state in range(len(C)):\n for item in C[state]:\n targetSet = goto(C[state], item.lhs, ruleSet, terminals)\n if targetSet:\n targetState = C.index(targetSet)\n gotoDict[state][item.lhs] = targetState\n\n return stateDict, gotoDict",
"def get_action(self, state):\n\n best_action = None\n best_value = -np.inf\n actions = [0, 1, 2, 3] # left, down, right, up\n for a in actions:\n row = state // self.edge\n col = state % self.edge\n # print (row, col)\n if a == 0:\n col = max(col-1, 0)\n elif a == 1:\n row = min(row+1, self.edge-1)\n elif a == 2:\n col = min(col+1, self.edge-1)\n elif a == 3:\n row = max(row-1, 0)\n # print (row, col)\n\n new_state = row * self.edge + col\n # print (new_state)\n if (self.values[new_state] > best_value or new_state == self.num_states-1): #goal\n best_value = 1.0 if new_state == self.num_states-1 else self.values[new_state]\n best_action = a\n return best_action",
"def computeActionFromValues(self, state):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n \n # Code to remove --- from here\n resultingAction = None\n if self.mdp.isTerminal(state):\n return resultingAction\n else:\n bestq = float(\"-inf\")\n actions = self.mdp.getPossibleActions(state)\n for action in actions:\n qvalue = self.computeQValueFromValues(state, action)\n if qvalue > bestq:\n bestq = qvalue\n resultingAction = action\n return resultingAction\n\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"",
"def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for l in range(self.u0_n):\n for m in range(self.u1_n):\n \n u = np.array([ self.ud[0][l] , self.ud[1][m] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = np.array([l,m])\n \n # Increment node number\n action = action + 1",
"def update_action(self):\n self.action = self.automata > self.states\n self.inv_action = self.inv_automata > self.states",
"def choose_action(self, board):\n options = board.empty_cells\n # to allow exploration, have a small probability of a random move\n p_random = random.random()\n # if the state is not in the table add it\n if (self.sign, board.state) not in self.Q_table.keys() or p_random < self.epsilon:\n values = {}\n for option in options:\n values[option] = random.random()\n self.Q_table[(self.sign, board.state)] = values\n self.action = random.choice(options)\n else:\n values = self.Q_table[(self.sign, board.state)]\n action = max(values, key=values.get)\n self.action = action\n\n # decrease exploration after each action\n if self.epsilon > 0:\n self.epsilon -= 0.0001\n\n return self.action",
"def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n optimalAction = None\n maxValue = float('-inf')\n for a in actions:\n qValue = self.computeQValueFromValues(state, a)\n if qValue > maxValue:\n maxValue = qValue\n optimalAction = a\n return optimalAction",
"def choose_action(self, d, name):\n # some initializations\n current_state = d.state\n caution = False\n confidence = False\n buy_rules = [0,0,0,0]\n next_vec = self.transition_matrix[name][int(current_state)]\n num_undesirable_states = (self.trigger_states[name]+1)\n num_desirable_states = (next_vec.size-num_undesirable_states)\n if num_undesirable_states<5:\n left_basket_max = 2\n else:\n left_basket_max = num_undesirable_states//3\n if num_desirable_states<5:\n right_basket_min = next_vec.size-2\n else:\n right_basket_min = next_vec.size-num_undesirable_states//3\n # check if rules are satisfied\n # rule-1\n m1 = np.max(next_vec[:self.trigger_states[name]+1])\n m1_idx = np.argmax(next_vec[:self.trigger_states[name]+1])\n m2 = np.max(next_vec[self.trigger_states[name]+1:])\n m2_idx = np.argmax(next_vec[self.trigger_states[name]+1:])+\\\n next_vec[:self.trigger_states[name]+1].size\n if m2-m1>=0.1: # threshold\n #print('Rule #1 satisfied.')\n buy_rules[0]=1\n # rule-2\n if np.sum(next_vec[self.trigger_states[name]+1:])-\\\n np.sum(next_vec[:self.trigger_states[name]+1])>=0.25: # threshold\n #print('Rule #2 satisfied.')\n buy_rules[1]=1\n # rule-3 \n if m1_idx<left_basket_max: \n if buy_rules[0]!=1:\n caution=True\n #print('Predicted state is very undesirable.')\n # rule-3\n if m2_idx>=right_basket_min:\n if buy_rules[0]==1:\n confidence=True\n #print('Predicted state is very desirable.')\n if d.MACD>d.signal_line:\n #print('Rule #3 satisfied.')\n buy_rules[2] = True\n # sum of k most undesirable vs k most desirable\n temp_1 = np.sort(next_vec[self.trigger_states[name]+1:])\n temp_2 = np.sort(next_vec[:self.trigger_states[name]+1])\n size = 3\n if temp_1.size<size or temp_2.size<size:\n size = min(temp_1.size, temp_2.size)\n k1 = np.sum(temp_1[::-size])\n k2 = np.sum(temp_2[::-size])\n if k1-k2>0.25:\n #print('Rule #4 satisfied.')\n buy_rules[3] = True\n # finally, make a call using the rules\n if confidence or sum(buy_rules)>=3:\n return 'buy'\n elif caution or (buy_rules[0]==0 and sum(buy_rules)<=2 and m1-m2>0.05):\n return 'sell'\n else:\n return 'hold'",
"def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n self.Temporary_QValue = util.Counter() #initializing a temporary QValue counter\n\n temporary_QValue = self.Temporary_QValue\n\n legal_Actions = self.getLegalActions(state) #get all the legal actions like north,south,east,west,exit\n\n length_legalActions = len(legal_Actions) #find length of legal actions just to find later if we have legal actions or not\n\n if length_legalActions == 0: #to check if we have any legal action or not\n return 0.0 #Returns value 0 as we do not have any legal actions, we cannot pass 'None' as autograder in q8 expects a float value and not string value\n\n for a in legal_Actions: #loop to check for each legal action\n\n temporary_QValue[a] = self.getQValue(state,a) #Find the Qvalue of each action\n\n best_action = temporary_QValue.argMax() #find the best action to take in a state\n return best_action\n #util.raiseNotDefined()",
"def get_action(self, state):\n if np.random.rand() <= self.epsilon:\n action_idx = random.randrange(self.action_size)\n else:\n \n # Use all traces for RNN\n #q = self.model.predict(state) # 1x8x3\n #action_idx = np.argmax(q[0][-1])\n\n # Only use last trace for RNN\n q = self.model.predict(state) # 1x3\n action_idx = np.argmax(q)\n return action_idx",
"def choose_action(q_table: np.ndarray, state: int,\n exploration_rate: float) -> int:\n random_value = random.uniform(0, 1)\n if random_value > exploration_rate:\n action = best_action(q_table, state)\n else:\n num_actions = q_table.shape[1]\n action = random.randint(0, num_actions-1)\n return action",
"def action(self, action_space, observation, info): # pylint: disable=no-self-use,unused-argument\n _ = (observation, info) # not using the observation for random decision\n action = None\n\n # decide if explore or explot\n\n # forward\n\n # save to memory\n\n # backward\n # decide what to use for training\n # update model\n # save weights\n\n return action",
"def nextAction(state) :\n newState = copy.deepcopy(state)\n # Updates the timeline to be at the next action, i.e. the action with the \n # lowest timestamp in the list of next actions\n newState['timeline']['currentAction'] = newState['timeline']['nextActions'][0][1]\n newState['timeline']['timestamp'] = newState['timeline']['nextActions'][0][0]\n newState['timeline']['nextActions'] = newState['timeline']['nextActions'][1::]\n return newState",
"def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for k in range(self.u0_n):\n \n u = np.array([ self.ud[0][k] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = k\n\n # Increment node number\n action = action + 1",
"def optimize_states(old_state, committed_state, new_state):\n old = old_state['actions']\n committed = committed_state['actions']\n new = new_state['actions']\n\n old, new, committed = map(optimize_actions, [old, new, committed])\n\n old_state['actions'] = old\n committed_state['actions'] = committed\n new_state['actions'] = new",
"def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n # Initialize max_value as - infinity\n # Initialize best action as None, choose max_value action\n max_value = float(\"-inf\")\n computed_action = None\n\n for action in actions:\n # Find q value of specified action\n q_value = self.computeQValueFromValues(state, action)\n # Update action if it's the best so far\n if q_value > max_value:\n max_value = q_value\n computed_action = action\n return computed_action",
"def chooseAction(self, gameState):\n\n ####print \"chooseAction Called\"\n\n #self.lastEatenFood = None\n\n\n actions = gameState.getLegalActions(self.index)\n\n ##print \"\\nNEW ACTION\\n--------\"\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # ###print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n \n\n return random.choice(bestActions)",
"def _setup_action_selection(self, state_ph):\n ### PROBLEM 2\n ### YOUR CODE HERE\n # raise NotImplementedError\n # current_state = tf.reshape(state_ph, (1, tf.size(state_ph)))\n\n current_state = tf.tile(state_ph, (self._num_random_action_selection, 1))\n horizon_rewards = 0\n sample_actions = tf.random_uniform(shape=(self._horizon, self._num_random_action_selection, self._action_dim),\n minval=self._action_space_low, maxval=self._action_space_high)\n\n for i in range(self._horizon):\n next_state_pred = self._dynamics_func(current_state, sample_actions[i], reuse=True)\n horizon_rewards = horizon_rewards + self._reward_func(current_state, sample_actions[i], next_state_pred)\n current_state = next_state_pred\n\n best_action_idx = tf.argmax(horizon_rewards)\n best_action_idx = tf.squeeze(best_action_idx)\n best_action = sample_actions[0][best_action_idx]\n\n return best_action",
"def get_action(self, q_ind, r_table, t, exploration_factor,\n allow_coordination, coordination_vector,\n best_actions):\n if t < self.next_availability:\n action = (0, self.did, -100, -100)\n return action\n\n # In case previous action was coordination then only allow independent\n # waiting action as next action\n if allow_coordination:\n coordination_factor = coordination_vector[self.curr_bin]\n else:\n coordination_factor = 0.0\n\n rand1 = np.random.random()\n rand2 = np.random.random()\n\n if not self.is_strategic:\n coordination = False\n action = q_ind.get_non_strategic_action(self.curr_bin)\n elif rand1 <= coordination_factor:\n # Coordination\n coordination = True\n # Fill up coordinated rebalancing\n action = r_table.get_action(t, self.curr_bin)\n else:\n # Independence\n coordination = False\n if rand2 <= exploration_factor:\n action = q_ind.get_random_action(self.curr_bin)\n else:\n action = best_actions[self.curr_bin]['ind']\n\n if action == self.curr_bin:\n if coordination:\n action = (1, self.did, self.curr_bin, self.curr_bin)\n else:\n action = (2, self.did, self.curr_bin, self.curr_bin)\n else:\n if coordination:\n action = (3, self.did, self.curr_bin, action)\n else:\n action = (4, self.did, self.curr_bin, action)\n return action",
"def reducer(state: State, action: Action) -> State:\n state = copy.deepcopy(state)\n if isinstance(state, dict):\n state = forest.state.State.from_dict(state)\n if isinstance(action, dict):\n try:\n action = forest.actions.Action.from_dict(action)\n except TypeError:\n return state.to_dict()\n\n if action.kind == SET_FIGURES:\n state.layers.figures = action.payload\n\n elif action.kind == ON_ADD:\n state.layers.mode.state = \"add\"\n\n elif action.kind == ON_CLOSE:\n row_index = action.payload\n try:\n layer_index = sorted(state.layers.index.keys())[row_index]\n del state.layers.index[layer_index]\n except IndexError:\n pass\n\n elif action.kind == ON_EDIT:\n row_index = action.payload\n layer_index = sorted(state.layers.index.keys())[row_index]\n state.layers.mode.state = \"edit\"\n state.layers.mode.index = layer_index\n\n elif action.kind == SAVE_LAYER:\n # NOTE: Layer index is stored in payload\n layer_index = action.payload[\"index\"]\n settings = action.payload[\"settings\"]\n if layer_index in state.layers.index:\n state.layers.index[layer_index].update(settings)\n else:\n state.layers.index[layer_index] = settings\n\n elif action.kind == SET_ACTIVE:\n active = action.payload[\"active\"]\n row_index = action.payload[\"row_index\"]\n row_to_layer = sorted(state.layers.index.keys())\n try:\n layer_index = row_to_layer[row_index]\n state.layers.index[layer_index][\"active\"] = active\n except IndexError:\n pass\n\n return state.to_dict()",
"def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n\n # @TODO Put this back to normal\n ret = self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))\n\n if self.i < 25:\n debug = ret[0]\n self.l.append(np.max(debug))\n self.i += 1\n #if debug == 1: # To Stop as soon as convergence happens\n #self.i = 25\n\n prefix = 'data/' # To indicate path\n\n if self.i == 25:\n\n if os.path.exists(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\")):\n os.remove(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"))\n\n f = open(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"), \"a\")\n first = True\n for data in self.l:\n if first:\n first = False\n f.write(str(data))\n else:\n f.write(\",\" + str(data))\n self.i += 1\n f.close()\n print(\"Done\")\n plt.plot(range(1, len(self.l)+1), self.l)\n plt.xlabel('Time step')\n plt.ylabel('Maximum probability')\n plt.title('Bayes Filter')\n plt.axis([0, self.i, 0, 1])\n plt.savefig(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".pdf\"), bbox_inches='tight')\n plt.show()\n\n return ret",
"def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n opIndices = self.getOpponents(gameState)\n opStates = [gameState.getAgentState(i) for i in opIndices]\n opCarry = [x.numCarrying for x in opStates]\n \n if max(opCarry) >= 5:\n self.isOffensive = False\n else:\n self.isOffensive = True\n\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n\n\n # print if get eaten\n myPos = gameState.getAgentPosition(self.index)\n prevGameState = self.getPreviousObservation()\n if prevGameState is not None:\n\n previousPos = prevGameState.getAgentPosition(self.index)\n if self.getMazeDistance(myPos, previousPos) > 1:\n print(\"prePostion\",previousPos)\n print()\n previousLegalAction = prevGameState.getLegalActions(self.index)\n print([(self.evaluate(prevGameState, a), a) for a in previousLegalAction])\n print()\n print(self.getNonScaredGhostPos(prevGameState))\n print()\n print()\n\n\n return random.choice(bestActions)",
"def update(self):\n if len(self.memory) < self.BATCH_SIZE * 20:\n return\n\n # == EXPERIENCE REPLAY ==\n transitions = self.memory.sample(self.BATCH_SIZE)\n # Transpose the batch (see https://stackoverflow.com/a/19343/3343043\n # for detailed explanation). This converts batch-array of Transitions\n # to Transition of batch-arrays.\n batch = Transition(*zip(*transitions))\n (non_final_mask, non_final_state_nxt, state, action, reward, g_x,\n l_x) = self.unpack_batch(batch)\n\n # == get Q(s,a) ==\n # `gather` reguires that idx is Long and input and index should have the\n # same shape with only difference at the dimension we want to extract.\n # value out[i][j][k] = input[i][j][ index[i][j][k] ], which has the\n # same dim as index\n # -> state_action_values = Q [ i ][ action[i] ]\n # view(-1): from mtx to vector\n state_action_values = (\n self.Q_network(state).gather(dim=1, index=action).view(-1)\n )\n\n # == get a' ==\n # u', d' = argmin_u' argmax_d' Q_policy(s', u', d')\n # a' = tuple2Int(u', d')\n with torch.no_grad():\n num_non_final = non_final_state_nxt.shape[0]\n state_nxt_action_values = self.Q_network(non_final_state_nxt)\n Q_mtx = state_nxt_action_values.detach().reshape(\n num_non_final, self.numActionList[0], self.numActionList[1]\n )\n # minmax values and indices\n pursuerValues, colIndices = Q_mtx.max(dim=-1)\n _, rowIdx = pursuerValues.min(dim=-1)\n colIdx = colIndices[np.arange(num_non_final), rowIdx]\n action_nxt = [\n actionIndexTuple2Int((r, c), self.numActionList)\n for r, c in zip(rowIdx, colIdx)\n ]\n action_nxt = (torch.LongTensor(action_nxt).to(self.device).view(-1, 1))\n\n # == get expected value ==\n state_value_nxt = torch.zeros(self.BATCH_SIZE).to(self.device)\n\n with torch.no_grad(): # V(s') = Q_tar(s', a'), a' is from Q_policy\n if self.double_network:\n Q_expect = self.target_network(non_final_state_nxt)\n else:\n Q_expect = self.Q_network(non_final_state_nxt)\n state_value_nxt[non_final_mask] = \\\n Q_expect.gather(dim=1, index=action_nxt).view(-1)\n\n # == Discounted Reach-Avoid Bellman Equation (DRABE) ==\n if self.mode == \"RA\":\n expected_state_action_values = (\n torch.zeros(self.BATCH_SIZE).float().to(self.device)\n )\n # Q(s, u) = V( f(s,u) )\n non_terminal = torch.max(\n g_x[non_final_mask],\n torch.min(l_x[non_final_mask], state_value_nxt[non_final_mask]),\n )\n terminal = torch.max(l_x, g_x)\n\n # normal state\n expected_state_action_values[non_final_mask] = (\n non_terminal * self.GAMMA + terminal[non_final_mask] *\n (1 - self.GAMMA)\n )\n # terminal state\n final_mask = torch.logical_not(non_final_mask)\n if self.terminalType == \"g\":\n expected_state_action_values[final_mask] = g_x[final_mask]\n elif self.terminalType == \"max\":\n expected_state_action_values[final_mask] = terminal[final_mask]\n else:\n raise ValueError(\"invalid terminalType\")\n\n # == regression: Q(s, a) <- V(s) ==\n self.Q_network.train()\n loss = smooth_l1_loss(\n input=state_action_values,\n target=expected_state_action_values.detach(),\n )\n\n # == backpropagation ==\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.Q_network.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n self.update_target_network()\n\n return loss.item()",
"def actions(self, state):\n \n #les actions sont définies comme étant les nombres possibles dans \n #la case i,j\n theActions = []\n for i in range(size):\n for j in range(size):\n line = i\n col = j\n if(state[i][j] == 0):\n possibleNumbers = [1,2,3,4,5,6,7,8,9]\n config = state\n for a in range(size):\n x = config[line][a]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n for b in range(size):\n x = config[b][col]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n #identifie quelle boite on veut vérifier\n hBox = col - col % 3\n vBox = line - line % 3\n \n for c in range(3):\n for d in range(3):\n x = config[c+vBox][d+hBox]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n for k in possibleNumbers:\n theActions.append((i,j,k))\n return theActions",
"def addAction(state, timeDifference, newAction) :\n newState = copy.deepcopy(state)\n # Sorts the nextActions array with the newAction added by their respective\n # Timestamps; adds the newAction at its relevant position\n newTs = newState['timeline']['timestamp'] + timeDifference\n nas = newState['timeline']['nextActions']\n newState['timeline']['nextActions'] = [ na for na in nas if na[0] <= newTs ] + [( newTs, newAction )] + [ na for na in nas if na[0] > newTs ]\n return newState",
"def get_action(self, state):\n assert np.shape(state) == (self._state_dim,)\n\n ### PROBLEM 2\n ### YOUR CODE HERE\n # raise NotImplementedError\n best_action = self._sess.run(self._best_action,\n feed_dict={self._state_ph: np.atleast_2d(state)})\n assert np.shape(best_action) == (self._action_dim,)\n\n return best_action",
"def update(self, action): \n self.memory.pop(-1) \n self.memory.insert(0, [self.last_state.cpu().numpy(), action.cpu().numpy()])\n\n self.last_action = action",
"def act(self, state):\n # Append the state to the short term memory (ie. History)\n self._history.append(state)\n\n # If policy requires agent to explore, sample random action\n if self._explorer.is_exploring(self._num_actions_taken):\n action = self._explorer(self.nb_actions)\n else:\n # Use the network to output the best action\n env_with_history = self._history.value\n q_values = self._action_value_net.eval(\n # Append batch axis with only one sample to evaluate\n env_with_history.reshape((1,) + env_with_history.shape)\n )\n\n self._episode_q_means.append(np.mean(q_values))\n self._episode_q_stddev.append(np.std(q_values))\n\n # Return the value maximizing the expected reward\n action = q_values.argmax()\n\n # Keep track of interval action counter\n self._num_actions_taken += 1\n return action"
]
| [
"0.65228754",
"0.5933773",
"0.5887716",
"0.58360565",
"0.58174783",
"0.5791254",
"0.57908595",
"0.5762855",
"0.5740069",
"0.5715256",
"0.56917787",
"0.56880116",
"0.5683548",
"0.56832623",
"0.5663208",
"0.56388277",
"0.5631226",
"0.5610196",
"0.56086373",
"0.5602409",
"0.5600761",
"0.5597179",
"0.5596488",
"0.55952996",
"0.5587483",
"0.5584674",
"0.55763793",
"0.55720365",
"0.5556796",
"0.5549642"
]
| 0.6131822 | 1 |
Optimize actions in states during conflict resolution | def optimize_states(old_state, committed_state, new_state):
old = old_state['actions']
committed = committed_state['actions']
new = new_state['actions']
old, new, committed = map(optimize_actions, [old, new, committed])
old_state['actions'] = old
committed_state['actions'] = committed
new_state['actions'] = new | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def optimize_actions(actions):\n result = {}\n\n def donothing(oid, index_oid, action1, action2):\n del result[(oid, index_oid)]\n\n def doadd(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action1\n\n def dochange(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = ReindexAction(\n action2.index, action2.mode, oid,\n )\n\n def dodefault(oid, index_oid, action1, action2):\n result[(oid, index_oid)] = action2\n\n statefuncs = {\n # txn asked to remove an object that previously it was\n # asked to add, conclusion is to do nothing\n (IndexAction, UnindexAction):donothing,\n # txn asked to change an object that was not previously added,\n # concusion is to just do the add\n (IndexAction, ReindexAction):doadd,\n # txn action asked to remove an object then readd the same\n # object. We translate this to a single change action.\n (UnindexAction, IndexAction):dochange,\n }\n\n for newaction in actions:\n oid = newaction.oid\n index_oid = newaction.index_oid\n oldaction = result.get((oid, index_oid))\n statefunc = statefuncs.get(\n (oldaction.__class__, newaction.__class__),\n dodefault,\n )\n statefunc(oid, index_oid, oldaction, newaction)\n\n result = list(sorted(result.values()))\n return result",
"def state_encod_arch2(self, state, action):",
"def resolve_conflicts(actions):\n\n # organize actions by discriminators\n unique = {}\n output = []\n for i, action in enumerate(actions):\n if not isinstance(action, dict):\n # old-style tuple action\n action = expand_action(*action)\n\n # \"order\" is an integer grouping. Actions in a lower order will be\n # executed before actions in a higher order. Within an order,\n # actions are executed sequentially based on original action ordering\n # (\"i\").\n order = action['order'] or 0\n discriminator = action['discriminator']\n\n # \"ainfo\" is a tuple of (order, i, action) where \"order\" is a\n # user-supplied grouping, \"i\" is an integer expressing the relative\n # position of this action in the action list being resolved, and\n # \"action\" is an action dictionary. The purpose of an ainfo is to\n # associate an \"order\" and an \"i\" with a particular action; \"order\"\n # and \"i\" exist for sorting purposes after conflict resolution.\n ainfo = (order, i, action)\n\n if discriminator is None:\n # The discriminator is None, so this action can never conflict.\n # We can add it directly to the result.\n output.append(ainfo)\n continue\n\n L = unique.setdefault(discriminator, []) # noqa\n L.append(ainfo)\n\n # Check for conflicts\n conflicts = {}\n\n for discriminator, ainfos in unique.items():\n\n # We use (order, i) as a sort key because we need to\n def byorder(ainfo):\n order, i = ainfo[0], ainfo[1]\n return order, i\n\n ainfos.sort(key=byorder)\n ainfo, rest = ainfos[0], ainfos[1:]\n output.append(ainfo)\n _, _, action = ainfo\n order = action['order']\n discriminator = action['discriminator']\n base_module_name = action['module_name']\n base_order = action['order']\n\n for _, _, action in rest:\n if action['order'] <= base_order:\n L = conflicts.setdefault(discriminator, [base_module_name, base_order]) # noqa\n L.append((action['module_name'], action['order']))\n\n if conflicts:\n raise ConfigurationConflictError(conflicts)\n\n # Sort conflict-resolved actions by (order, i) and return them.\n return [x[2] for x in sorted(output, key=operator.itemgetter(0, 1))]",
"def actions(self, state):\n\t\traise NotImplementedError",
"def resolve_conflicts(self, commit=True):\n pass # pragma: no cover",
"def update_action(self):\n self.action = self.automata > self.states\n self.inv_action = self.inv_automata > self.states",
"def choose_action(self, d, name):\n # some initializations\n current_state = d.state\n caution = False\n confidence = False\n buy_rules = [0,0,0,0]\n next_vec = self.transition_matrix[name][int(current_state)]\n num_undesirable_states = (self.trigger_states[name]+1)\n num_desirable_states = (next_vec.size-num_undesirable_states)\n if num_undesirable_states<5:\n left_basket_max = 2\n else:\n left_basket_max = num_undesirable_states//3\n if num_desirable_states<5:\n right_basket_min = next_vec.size-2\n else:\n right_basket_min = next_vec.size-num_undesirable_states//3\n # check if rules are satisfied\n # rule-1\n m1 = np.max(next_vec[:self.trigger_states[name]+1])\n m1_idx = np.argmax(next_vec[:self.trigger_states[name]+1])\n m2 = np.max(next_vec[self.trigger_states[name]+1:])\n m2_idx = np.argmax(next_vec[self.trigger_states[name]+1:])+\\\n next_vec[:self.trigger_states[name]+1].size\n if m2-m1>=0.1: # threshold\n #print('Rule #1 satisfied.')\n buy_rules[0]=1\n # rule-2\n if np.sum(next_vec[self.trigger_states[name]+1:])-\\\n np.sum(next_vec[:self.trigger_states[name]+1])>=0.25: # threshold\n #print('Rule #2 satisfied.')\n buy_rules[1]=1\n # rule-3 \n if m1_idx<left_basket_max: \n if buy_rules[0]!=1:\n caution=True\n #print('Predicted state is very undesirable.')\n # rule-3\n if m2_idx>=right_basket_min:\n if buy_rules[0]==1:\n confidence=True\n #print('Predicted state is very desirable.')\n if d.MACD>d.signal_line:\n #print('Rule #3 satisfied.')\n buy_rules[2] = True\n # sum of k most undesirable vs k most desirable\n temp_1 = np.sort(next_vec[self.trigger_states[name]+1:])\n temp_2 = np.sort(next_vec[:self.trigger_states[name]+1])\n size = 3\n if temp_1.size<size or temp_2.size<size:\n size = min(temp_1.size, temp_2.size)\n k1 = np.sum(temp_1[::-size])\n k2 = np.sum(temp_2[::-size])\n if k1-k2>0.25:\n #print('Rule #4 satisfied.')\n buy_rules[3] = True\n # finally, make a call using the rules\n if confidence or sum(buy_rules)>=3:\n return 'buy'\n elif caution or (buy_rules[0]==0 and sum(buy_rules)<=2 and m1-m2>0.05):\n return 'sell'\n else:\n return 'hold'",
"def which_action(a1, a2):\n def doconflict(a1, a2):\n raise ConflictError\n def dosecond(a1, a2):\n return a2\n def dofirst(a1, a2):\n return a1\n statefuncs = {\n (IndexAction, UnindexAction):doconflict,\n (UnindexAction, IndexAction):doconflict,\n (ReindexAction, UnindexAction):doconflict,\n (UnindexAction, ReindexAction):doconflict,\n (ReindexAction, IndexAction):dosecond,\n }\n return statefuncs.get((a1.__class__, a2.__class__), dofirst)(a1, a2)",
"def take_action(self, state):",
"def actions(self, state):\n raise NotImplementedError # Override this!",
"def choose_action(self, state, task=0):\n pass",
"def _state_actions(self) -> dict:\n return {}",
"def do_post_action_processing(self, i_state, low_level_actions):\n pass",
"def __call__(self, state, action):\n pass",
"def get_state_actions_mapping(self):\n return None",
"def select_action(self, state):",
"def transition_function(state, action):\n results = []\n\n if action.action_type == Action.NOOP:\n results.append((state, 1.0))\n\n elif action.action_type == Action.GRASP:\n # point distribution for success mixed with point distribution for failure\n alpha = 0.8\n\n # success - gripper moves to object position and holds object\n success_state = deepcopy(state)\n obj = utils.get_object(success_state, action.name)\n if obj is None:\n alpha = 0\n else:\n gripper = utils.get_object(success_state, 'gripper')\n if obj.__class__ == Drawer:\n gripper.x = obj.x + (obj.width - 1)/2 + 1\n gripper.y = obj.y\n gripper.z = 2\n else:\n gripper.x = obj.x\n gripper.y = obj.y\n gripper.z = obj.z\n gripper.holding = obj.name\n gripper.closed = True\n\n results.append((success_state, alpha*1.0))\n\n # failure - no change\n results.append((state, (1 - alpha)*1.0))\n\n elif action.action_type == Action.PLACE:\n gripper = utils.get_object(state, 'gripper')\n new_z = utils.ray_trace(action.position.x, action.position.y)\n\n # point distribution for success mixed with point distribution for failure\n alpha = 0.8\n\n # success - gripper moves to place position and releases object\n success_state = deepcopy(state)\n obj = utils.gripper_object(success_state)\n gripper_move = utils.get_object(success_state, 'gripper')\n if obj is not None and obj.__class__ == Drawer:\n alpha = 0\n else:\n if obj is not None:\n if obj.__class__ == Container:\n obj.x = action.position.x + obj.x - gripper_move.x\n obj.y = action.position.y + obj.y - gripper_move.y\n else:\n obj.x = action.position.x\n obj.y = action.position.y\n obj.z = new_z\n gripper_move.x = action.position.x\n gripper_move.y = action.position.y\n gripper_move.z = new_z\n gripper_move.closed = False\n gripper_move.holding = ''\n results.append((success_state, alpha*1.0))\n\n # failure - no change\n results.append((state, (1 - alpha)*1.0))\n\n elif action.action_type == Action.OPEN_GRIPPER:\n gripper = utils.get_object(state, 'gripper')\n if not gripper.closed:\n results.append((state, 1.0))\n else:\n success_state = deepcopy(state)\n gripper = utils.get_object(state, 'gripper')\n gripper.closed = False\n obj = utils.gripper_object(success_state)\n if obj is None:\n results.append((success_state, 1.0))\n else:\n states = [success_state]\n probs = [1.0]\n prob_sum = 0\n decay = 1.0\n for z in range(obj.z - 1, -1, -1):\n decay *= 0.8\n if obj.__class__ == Item:\n for i in range(obj.z - z, obj.z + z + 1):\n for j in range(obj.z - z, obj.z + z + 1):\n states.append(utils.copy_state_move_object(success_state, obj.unique_name, i, j, z - obj.z))\n p = 1.0/(pow(2*(obj.z - z) + 1, 2))\n p *= decay\n probs.append(p)\n prob_sum += p\n elif obj.__class__ == Container:\n for i in range(int((obj.z - z)/2), int((obj.z + z)/2) + 1):\n for j in range(int((obj.z - z)/2), int((obj.z + z)/2) + 1):\n states.append(utils.copy_state_move_object(success_state, obj.unique_name, i, j, z - obj.z))\n p = 1.0/(pow(2*(int((obj.z - z)/2)) + 1, 2))\n p *= decay\n probs.append(p)\n prob_sum += p\n elif obj.__class__ == Lid:\n states.append(utils.copy_state_move_object(success_state, obj, 0, 0, z - obj.z))\n probs.append(decay)\n for i in range(len(probs)):\n probs[i] /= prob_sum\n results.extend(zip(states, probs))\n\n elif action.action_type == Action.CLOSE_GRIPPER:\n gripper = utils.get_object(state, 'gripper')\n if gripper.closed:\n results.append((state, 1.0))\n else:\n success_state = deepcopy(state)\n gripper = utils.get_object(state, 'gripper')\n gripper.closed = True\n if 'gripper_on_apple' and 'gripper_level_with_apple':\n gripper.holding = 'apple'\n results.append((success_state, 1.0))\n elif 'gripper_on_batteries' and 'gripper_level_with_batteries':\n gripper.holding = 'batteries'\n results.append((success_state, 1.0))\n elif 'gripper_on_flashlight' and 'gripper_level_with_flashlight':\n gripper.holding = 'flashlight'\n results.append((success_state, 1.0))\n elif 'gripper_on_granola' and 'gripper_level_with_granola':\n gripper.holding = 'granola'\n results.append((success_state, 1.0))\n elif 'gripper_on_knife' and 'gripper_level_with_knife':\n gripper.holding = 'knife'\n results.append((success_state, 1.0))\n elif 'gripper_on_small' and 'gripper_level_with_small':\n gripper.holding = 'small'\n results.append((success_state, 1.0))\n elif 'gripper_on_lid' and 'gripper_level_with_lid':\n failure_state = deepcopy(success_state)\n gripper.holding = 'lid'\n results.append((success_state, 0.1))\n results.append((failure_state, 0.9))\n elif 'gripper_touching_drawer' and 'gripper_right_of_drawer' and 'gripper_level_with_drawer':\n failure_state = deepcopy(success_state)\n gripper.holding = 'drawer'\n results.append((success_state, 0.2))\n results.append((failure_state, 0.8))\n elif 'gripper_on_large' and 'gripper_level_with_large':\n failure_state = deepcopy(success_state)\n gripper.holding = 'large'\n results.append((success_state, 0.875))\n results.append((failure_state, 0.125))\n\n elif action.action_type == Action.MOVE_ARM:\n pass\n\n elif action.action_type == Action.RAISE_ARM:\n alpha = 1.0\n gripper = utils.get_object(state, 'gripper')\n if 'gripper_on_lid' in state.relations and 'gripper_below_lid' in state.relations:\n alpha *= 0.8\n if 'gripper_on_drawer' in state.relations and 'gripper_below_drawer' in state.relations:\n alpha *= 0.8\n if 'gripper_on_stack' in state.relations and 'gripper_below_stack' in state.relations:\n alpha *= 0.8\n if 'gripper_on_small' in state.relations and 'gripper_below_small' in state.relations:\n alpha *= 0.8\n if 'gripper_on_large' in state.relations and 'gripper_below_large' in state.relations:\n alpha *= 0.8\n if gripper.holding in ['lid', 'small', 'large']:\n alpha *= 0.8\n success_state = deepcopy(state)\n gripper = utils.get_object(success_state, 'gripper')\n gripper.z += 1\n if gripper.z > 4:\n gripper.z = 4\n obj = utils.gripper_object(success_state)\n obj.z += 1\n if obj.z > 4:\n obj.z = 4\n results.append((success_state, alpha*1.0))\n\n # failure - no change\n results.append((state, (1 - alpha)*1.0))\n\n elif action.action_type == Action.LOWER_ARM:\n alpha = 1.0\n if 'gripper_on_lid' in state.relations and 'gripper_level_with_lid' in state.relations \\\n or 'gripper_on_small' in state.relations and 'gripper_level_with_small' in state.relations \\\n or 'gripper_on_large' in state.relations and 'gripper_level_with_large' in state.relations:\n alpha = 0\n else:\n gripper = utils.get_object(state, 'gripper')\n if 'gripper_on_drawer' in state.relations and 'gripper_above_drawer' in state.relations:\n alpha *= 0.8\n if 'gripper_on_stack' in state.relations and 'gripper_above_stack' in state.relations:\n alpha *= 0.8\n if gripper.holding in ['lid', 'small', 'large']:\n alpha *= 0.8\n success_state = deepcopy(state)\n gripper = utils.get_object(success_state, 'gripper')\n gripper.z -= 1\n if gripper.z < 0:\n gripper.z = 0\n obj = utils.gripper_object(success_state)\n obj.z -= 1\n if obj.z < 0:\n obj.z = 0\n results.append((success_state, alpha*1.0))\n\n # failure - no change\n results.append((state, (1 - alpha)*1.0))\n\n elif action.action_type == Action.RESET_ARM:\n # point distribution for success mixed with point distribution for failure\n alpha = 0.8\n\n # success - gripper moves to object position and holds object\n success_state = deepcopy(state)\n gripper = utils.get_object(success_state, 'gripper')\n gripper.x = 8\n gripper.y = 1\n gripper.z = 2\n\n results.append((success_state, alpha*1.0))\n\n # failure - no change\n results.append((state, (1 - alpha)*1.0))\n\n return results",
"def computeActionFromValues(self, state):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n \n # Code to remove --- from here\n resultingAction = None\n if self.mdp.isTerminal(state):\n return resultingAction\n else:\n bestq = float(\"-inf\")\n actions = self.mdp.getPossibleActions(state)\n for action in actions:\n qvalue = self.computeQValueFromValues(state, action)\n if qvalue > bestq:\n bestq = qvalue\n resultingAction = action\n return resultingAction\n\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"",
"def _successor(self, state: S, action: Action, *args, **kwargs) -> D:\n pass",
"def explore_act(self, states_ns):\n raise NotImplementedError",
"def exploit_act(self, states_ns):\n raise NotImplementedError",
"def act(self, state):\n return",
"def state_inherit(state, options, *, old=None, **sparsity_kwargs):\n\n # model state is inherited anyway, optimizer is conditional.\n optim_state, source_mapper, inheritable = {}, {}, False\n if options[\"snapshot\"] is not None:\n # Cold: parameters and buffers are loaded from some snapshot\n snapshot = load_snapshot(options[\"snapshot\"])\n\n # get the saved state of the optimizer and a name-id map\n saved = snapshot.get(\"optim\", {})\n if saved is not None:\n optim_state = saved.get(\"state\", {})\n source_mapper = snapshot.get(\"mapper\", {})\n\n # see if stored state an state.optim's state are compatible\n inheritable = isinstance(state.optim, get_class(saved.get(\"cls\")))\n\n # overwrite the parameters and buffers of the model\n state.model.load_state_dict(snapshot[\"model\"], strict=True)\n\n del snapshot, saved\n\n elif isinstance(old, State) and old.model is not None:\n # Warm: the previous model provides the parameters\n optim_state, source_mapper = old.optim.state_dict(), old.mapper\n inheritable = isinstance(state.optim, type(old.optim))\n\n # Acquire sparsity masks and non-zero parameters\n state_dict, masks = state_dict_with_masks(old.model, **sparsity_kwargs)\n if options[\"reset\"]:\n # Explicitly instructed to transfer masks only (if available).\n # `reset=True` makes sure that every mask in the receiving\n # model is initialized. A forward pass through a model with\n # an uninitialized mask would raise a RuntimeError.\n deploy_masks(state.model, state_dict=masks, reset=True)\n\n else:\n # Models in each stage are instances of the same underlying\n # architecture just with different traits. Hence only here\n # we allow missing or unexpected parameters when deploying\n # the state\n state.model.load_state_dict(state_dict, strict=False)\n\n del state_dict, masks\n\n else: # snapshot is None and old.model is None\n pass\n\n # check optimizer inheritance and restart flag, and deploy the state\n if (optim_state and inheritable) and not options[\"restart\"]:\n # construct a map of id-s from `source` (left) to `target` (right)\n mapping = join(right=state.mapper, left=source_mapper, how=\"inner\")\n deploy_optimizer(dict(mapping.values()), source=optim_state,\n target=state.optim)\n del mapping\n\n del optim_state, source_mapper\n\n return state",
"def decide_place(self, action):\n pass",
"def _hard_update(self, active, target):\n\n target.load_state_dict(active.state_dict())",
"def actions(self, states, agent_indices):\n return NotImplementedError()",
"def successors(self, state):\n abstract",
"def transact(self):",
"def transact(self):",
"def _setup_action_selection(self, state_ph):\n ### PROBLEM 2\n ### YOUR CODE HERE\n # raise NotImplementedError\n # current_state = tf.reshape(state_ph, (1, tf.size(state_ph)))\n\n current_state = tf.tile(state_ph, (self._num_random_action_selection, 1))\n horizon_rewards = 0\n sample_actions = tf.random_uniform(shape=(self._horizon, self._num_random_action_selection, self._action_dim),\n minval=self._action_space_low, maxval=self._action_space_high)\n\n for i in range(self._horizon):\n next_state_pred = self._dynamics_func(current_state, sample_actions[i], reuse=True)\n horizon_rewards = horizon_rewards + self._reward_func(current_state, sample_actions[i], next_state_pred)\n current_state = next_state_pred\n\n best_action_idx = tf.argmax(horizon_rewards)\n best_action_idx = tf.squeeze(best_action_idx)\n best_action = sample_actions[0][best_action_idx]\n\n return best_action"
]
| [
"0.6161125",
"0.59908533",
"0.590098",
"0.5820195",
"0.5756483",
"0.5716084",
"0.5650024",
"0.55370075",
"0.54980445",
"0.5488844",
"0.5428865",
"0.5424252",
"0.5393492",
"0.5381599",
"0.5360048",
"0.5320444",
"0.5301779",
"0.5292332",
"0.5284766",
"0.5248218",
"0.52251375",
"0.5224877",
"0.5189101",
"0.5189054",
"0.51750416",
"0.5168675",
"0.516195",
"0.513474",
"0.513474",
"0.51226664"
]
| 0.6796256 | 0 |
Announce frames to the API that may be queued for frame capturing later. Should be called after the frame is created. Call startCapture after this method. | def announce(self, payload_size: Optional[int] = None) -> None:
if payload_size is None:
payload_size = self._camera.PayloadSize
else:
if payload_size < self._camera.PayloadSize:
raise ValueError("Specified frame buffer is not large enough!")
# allocate memory for the frame and keep a reference to keep alive
self._c_memory = create_string_buffer(payload_size)
address = c_void_p(addressof(self._c_memory))
if address is None:
# this seems to be None if too much memory is requested
raise VimbaException(VimbaException.ERR_FRAME_BUFFER_MEMORY)
# tell the frame about the memory
self.data.buffer = address
self.data.bufferSize = self._camera.PayloadSize
error = vimba_c.vmb_frame_announce(self._camera.handle,
byref(self.data),
sizeof(self.data))
if error:
raise VimbaException(error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_frame(frame: Frame, delay: Optional[int] = 1) -> None:\n # print('frame {}'.format(frame.data.frameID))\n \n # get a copy of the frame data\n _frames.append(frame)",
"def capture_frame(self):\n if self.capture_limit is None:\n capture_limit_reached = False\n else:\n capture_limit_reached = self.captured_frames >= \\\n self.capture_limit\n\n if not capture_limit_reached:\n for camera in self.get_cameras():\n camera.capture_image()\n self._captured_frames += 1\n else:\n for camera in self.get_cameras():\n camera.tear_down()",
"def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off",
"def _append_frame(self, observation):\n if self._counter % self._record_every == 0:\n self._frames.append(self._render_frame(observation[self.visual_key]))",
"def dispatch_frame(self, frame):",
"def start(self):\n\t\twhile self.capture_status:\n\t\t\t_, frame = self.cap.read()\n\t\t\tc_frame = frame[self.width / 2 - self.face_width / 2: self.width / 2 + self.face_width / 2,\n\t\t\t self.height / 2 - self.face_width / 2: self.height / 2 + self.face_height / 2, :]\n\t\t\tif not self.in_processing:\n\t\t\t\tself.frame = frame\n\t\t\t\tself.in_processing = True\n\t\t\tsleep(0.2)\n\t\tyield cv2.imdecode('png', c_frame)",
"def add_frame(self, frame: Frame):\n self.add(frame.timestep, frame.position, frame.orientation)",
"def __init__(self, frames):\n self._frames = frames",
"def __init__(self, frames):\n self._frames = frames",
"def __init__(self, frames):\n self._frames = frames",
"def __init__(self, frames):\n self._frames = frames",
"def run(self, frames, motions):\n detected = self._detector.run(frames, motions)\n\n for frame in detected:\n event = self._output_agent.process(frame)\n if event is not None:\n if event.action == EventVideoPolicy.START_RECORDING:\n timestamp = event.content[\"timestamp\"]\n thumbnail_key = self._take_snapshot(timestamp, frame)\n self._output_event(event, thumbnail_key,\n frame.metadata[\"labels\"])\n\n elif event.action == EventVideoPolicy.STOP_RECORDING:\n logging.info(\"End of event video\")",
"def show_frame(self, frame):\n self._i2c_write(_FRAME_REGISTER, frame, bank=_CONFIG_BANK)",
"def queue_for_capture(self, frame_callback: Optional[Callable] = None) -> None:\n self._frame_callback = frame_callback\n\n # define a callback wrapper here so it doesn't bind self\n def frame_callback_wrapper(camera_handle, frame_ptr):\n # call the user's callback with the self bound outside the wrapper\n # ignore the frame pointer since we already know the callback refers to this frame\n self._frame_callback(self)\n\n if self._frame_callback is None:\n self._frame_callback_wrapper_c = None\n else:\n # keep a reference to prevent gc issues\n self._frame_callback_wrapper_c = vimba_c.vmb_frame_callback_func(frame_callback_wrapper)\n\n error = vimba_c.vmb_capture_frame_queue(self._camera.handle,\n byref(self.data),\n self._frame_callback_wrapper_c)\n if error:\n raise VimbaException(error)",
"def capture_start(self):\n pass",
"def display_frame(self, frame=None):\n if frame is None:\n frame = self.get_frame()\n cv2.namedWindow('frame')\n cv2.imshow('frame', frame)\n cv2.waitKey(0)",
"def add_frame(\n self : \"animation\",\n frame : \"matplotlib.figure.Figure\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list([frame], facecolor=facecolor)",
"def stream_frames(video_capture):",
"def capture_video(self):\n while self.capturing:\n nparray = self.source.get_frame()\n self.frame_buffer.put(Frame(nparray, self.frame))\n self.frame += 1\n print \"Stopping Capture\"",
"def append_frames(self, frames):\n if len(frames) == 0:\n return\n new_channel = sppasChannel()\n new_channel.set_frames(self._channel.get_frames() + frames)\n new_channel.set_sampwidth(self._sampwidth)\n new_channel.set_framerate(self._framerate)\n self._channel = new_channel",
"def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame",
"def add_frame(self, pngfile, delay=None, delay_denominator=None):\n if self._idx == self.num_frames:\n raise RuntimeError(\"Already reached the declared number of frames\")\n self.delay_numerator = self.default_delay_numerator\n self.delay_denominator = self.default_delay_denominator\n self._actl_written = False\n self._fctl_written = False\n if delay is not None:\n self.delay_numerator = delay\n if delay_denominator is not None:\n self.delay_denominator = delay_denominator\n self._add_png(pngfile)\n self._idx += 1\n if self._idx == self.num_frames:\n self._chunk(b\"IEND\", b\"\")",
"def record(self):\n\t\twhile True:\n\t\t\tif not self.recording:\n\t\t\t\tbreak\n\t\t\t#print('hal')\n\t\t\telapsed = (datetime.datetime.now() - self.timestamps[-1]).total_seconds()\n\t\t\tif elapsed < self.timePerFrame:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t#print(len(self.Video))\n\t\t\t\tret,frame = self.vs.read()\n\t\t\t\tif ret:\n\t\t\t\t\tself.timestamps.append(datetime.datetime.now())\n\t\t\t\t\tself.Video.append(frame)\n\t\t\t\t\tself.FPStracker.update()\n\t\t\t\t\tself.newAvailable = True\n\t\t\t\t\tif not self.threaded:\n\n\t\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tprint('error: camera failed to capture image')\n\t\t\t\t\tprint('canceling recording session')\n\t\t\t\t\tself.stop()\n\t\t#print('\\n recording loop ended, returning to main')\n\t\tself.vs.stop()\n\t\treturn",
"def render(self, frame: Frame):\n\n cv2.imshow(winname=self.title, mat=frame)\n cv2.waitKey(delay=self.delay)\n\n if self.step:\n while cv2.waitKey(delay=0) != self.step_key:\n continue",
"def _device_start_capture(self):\n\n # TODO: we may want to provide an option to flush the SDRam buffer here before capture stops?\n self._start_capture_to_ram()\n self._start_streaming_ram_to_host()",
"def start(self):\n self.log('Start capturing.')\n # ---\n try:\n self.setup()\n # run camera thread\n self._worker = Thread(target=self.run)\n self._worker.start()\n except StopIteration:\n self.log('Exception thrown.')",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None",
"def __init__(self, frames):\n self._frames = frames\n self._out = None"
]
| [
"0.59467626",
"0.5877279",
"0.5710122",
"0.5584479",
"0.5575277",
"0.55211717",
"0.5515492",
"0.54915524",
"0.54915524",
"0.54915524",
"0.54915524",
"0.5471768",
"0.5418382",
"0.54165924",
"0.5404626",
"0.53927463",
"0.5368608",
"0.5352899",
"0.53498757",
"0.5316282",
"0.5300952",
"0.5297473",
"0.52829635",
"0.52781004",
"0.5274256",
"0.52675295",
"0.5265716",
"0.5265716",
"0.5265716",
"0.5265716"
]
| 0.63604873 | 0 |
Revoke a frame from the API. | def revoke(self) -> None:
error = vimba_c.vmb_frame_revoke(self._camera.handle,
byref(self.data))
if error:
raise VimbaException(error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def revoke_token():\n return server.create_endpoint_response(RevocationEndpoint.ENDPOINT_NAME)",
"def del_frame(*args):\n return _ida_frame.del_frame(*args)",
"def fusion_api_revoke_certificate(self, name=None, api=None, headers=None):\n return self.ca.revoke(name=name, api=api, headers=headers)",
"def revoke_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n RefreshToken.revoke(refresh_token)\n db.session.commit()\n return msg.success('Token is successfully revoked')",
"def revoke(self):\r\n return http.Request('DELETE', self.get_url()), parsers.parse_empty",
"def revoke(self, token):\n client = self.connect(VAULT_TOKEN)\n client.revoke_token(token)",
"def revoke_token(token):\n token.delete_instance()",
"def deauthorize():\n\tPAYLOAD_HEADERS.pop('Authorization', None)",
"async def revoke_token(self, request: Request, token: str) -> None:\n token_record = ...\n token_record.revoked = True\n token_record.save()",
"def revoke_agent(request):\n from .models import Agent\n\n request.agent = Agent.untrusted_agent(request.user)",
"def revoke_certificate(self):\n return self.__query(\"certificateRevoke\", kwargs)",
"def fusion_api_remove_rack(self, name=None, uri=None, api=None, headers=None):\n return self.rack.delete(name, uri, api, headers)",
"def revoke(self, request):\n if not self.interactive:\n raise exceptions.InvalidValue(\n \"Revoke is only enabled under interactive mode.\"\n )\n self._validate_running_mode()\n\n if not _helpers.is_python_3():\n raise exceptions.RefreshError(\n \"Pluggable auth is only supported for python 3.7+\"\n )\n\n # Inject variables\n env = os.environ.copy()\n self._inject_env_variables(env)\n env[\"GOOGLE_EXTERNAL_ACCOUNT_REVOKE\"] = \"1\"\n\n # Run executable\n result = subprocess.run(\n self._credential_source_executable_command.split(),\n timeout=self._credential_source_executable_interactive_timeout_millis\n / 1000,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=env,\n )\n\n if result.returncode != 0:\n raise exceptions.RefreshError(\n \"Auth revoke failed on executable. Exit with non-zero return code {}. Error: {}\".format(\n result.returncode, result.stdout\n )\n )\n\n response = json.loads(result.stdout.decode(\"utf-8\"))\n self._validate_revoke_response(response)",
"def delFrameByName(self, frameName):\n self.data.frames[frameName].destroy()\n del self.data.frames[frameName]",
"def revoke_api_access(application):\n try:\n file = open(PATH + '/../DB/access.json', 'r')\n accessData = json.load(file)\n if (application in accessData):\n accessData.pop(application, None)\n\n with open(PATH + '/../DB/access.json', 'w') as f:\n f.write(json.dumps(accessData, indent=4, sort_keys=True)) \n except:\n raise",
"def unlink_obj(self, ref_frame, obj_name=None, delete=True):\n self.scene.remove_attached_object(ref_frame, obj_name)\n if delete:\n self.remove_obj(obj_name)",
"def revoke(self):\n if self.access_token is None:\n raise InvalidInvocation('no token available to revoke')\n\n self._authenticator.revoke_token(self.access_token, 'access_token')\n self._clear_access_token()",
"def RevokeAccessURI(self) -> None:\n logger.info('Revoking SAS URI for snapshot {0:s}'.format(self.name))\n request = self.compute_client.snapshots.begin_revoke_access(\n self.resource_group_name, self.name)\n request.wait()\n logger.info('SAS URI revoked for snapshot {0:s}'.format(self.name))",
"def deleteModFrame(self,name):\n del self.data.activeMod[name]",
"def kill(self, signum, frame):\n self.delete()\n os.kill(os.getpid(), signum)",
"def fusion_api_remove_license(self, uri=None, api=None, headers=None):\n return self.license.delete(uri=uri, api=api, headers=headers)",
"def revoke_token(self, token, orphan=False, accessor=False):\n if accessor and orphan:\n msg = \"revoke_token does not support 'orphan' and 'accessor' flags together\"\n raise InvalidRequest(msg)\n elif accessor:\n self._post('/v1/auth/token/revoke-accessor/{0}'.format(token))\n elif orphan:\n self._post('/v1/auth/token/revoke-orphan/{0}'.format(token))\n else:\n self._post('/v1/auth/token/revoke/{0}'.format(token))",
"def DeleteVideo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def fusion_api_remove_enclosure(self, name=None, uri=None, param='', api=None, headers=None):\n return self.enclosure.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def test_revoke_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)",
"def DismissApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def RemoveFrameObj(self, frameName):\n if self.frame_objects.has_key(frameName):\n self.frame_objects.pop(frameName)",
"def deauth(request):\n\n if(request.token):\n request.token.delete()\n return JsonResponse({'message': 'Your token is revoked'}) \n else:\n return HttpResponseBadRequest('It does not make sense to revoke a token ' +\n 'if no token are supplied to the request')",
"def revoke_access_token(self):\n response = self._telegraph.method('revokeAccessToken')\n\n self._telegraph.access_token = response.get('access_token')\n\n return response",
"def gdisconnect():\n\taccess_token = session.get('access_token')\n\tuname = session.get('username')\n\n\tif not access_token:\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Current user not connected.'), 401)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\n\turl = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n\th = httplib2.Http()\n\tresult = h.request(url, 'GET')[0]\n\n\tif result['status'] != '200':\n\t\t# For whatever reason, the given token was invalid.\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Failed to revoke token for given user.'), 400)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\t[session.pop(k, None) for k, _ in session.items()]\n\t\treturn response\n\t#Clearing out session data\n\t[session.pop(k, None) for k, _ in session.items()]\n\treturn redirect(request.referrer)"
]
| [
"0.6457462",
"0.63507867",
"0.6242425",
"0.6187559",
"0.6143227",
"0.6090711",
"0.5983966",
"0.5945101",
"0.5867708",
"0.5800802",
"0.5716971",
"0.55593175",
"0.55128205",
"0.5473526",
"0.54718405",
"0.5422895",
"0.54017884",
"0.5385355",
"0.5370714",
"0.53591573",
"0.5344741",
"0.5330144",
"0.5321092",
"0.5265948",
"0.5259794",
"0.52451235",
"0.52430576",
"0.522579",
"0.5218087",
"0.5217631"
]
| 0.7699508 | 0 |
Queue frames that may be filled during frame capturing. Call after announceFrame and startCapture. Callback must accept argument of type frame. Remember to requeue the frame by calling frame.queue_capture() at the end of your callback function. | def queue_for_capture(self, frame_callback: Optional[Callable] = None) -> None:
self._frame_callback = frame_callback
# define a callback wrapper here so it doesn't bind self
def frame_callback_wrapper(camera_handle, frame_ptr):
# call the user's callback with the self bound outside the wrapper
# ignore the frame pointer since we already know the callback refers to this frame
self._frame_callback(self)
if self._frame_callback is None:
self._frame_callback_wrapper_c = None
else:
# keep a reference to prevent gc issues
self._frame_callback_wrapper_c = vimba_c.vmb_frame_callback_func(frame_callback_wrapper)
error = vimba_c.vmb_capture_frame_queue(self._camera.handle,
byref(self.data),
self._frame_callback_wrapper_c)
if error:
raise VimbaException(error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def capture_frame(self):\n if self.capture_limit is None:\n capture_limit_reached = False\n else:\n capture_limit_reached = self.captured_frames >= \\\n self.capture_limit\n\n if not capture_limit_reached:\n for camera in self.get_cameras():\n camera.capture_image()\n self._captured_frames += 1\n else:\n for camera in self.get_cameras():\n camera.tear_down()",
"def on_queue_declared(frame):\n start_consuming(frame)",
"def callback(indata, frames, time, status):\n if status:\n print(status, flush=True)\n queue.put(indata.copy())",
"def run_capture(video_q):\n cam = cv2.VideoCapture(0)\n print(f\"width: {cam.get(3)}, height: {cam.get(4)}, fps: {cam.get(5)}\")\n while is_running:\n\n if not video_q.full(): \n ok, frame = cam.read()\n if not ok:\n # camera disconnected\n break\n\n video_q.put(frame)\n\n cam.release()\n\n # empty the queue otherwise the main process will hand as the queue feeder\n # thread will not terminate while the queue has items. Empty it here as this\n # is the only place that adds to the queue\n while not video_q.empty():\n video_q.get()\n\n print(\"camera thread exited\")",
"def capture_video(self):\n while self.capturing:\n nparray = self.source.get_frame()\n self.frame_buffer.put(Frame(nparray, self.frame))\n self.frame += 1\n print \"Stopping Capture\"",
"def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame",
"def capture(camera=None,\n width=None,\n height=None,\n filename=None,\n start_frame=None,\n end_frame=None,\n frame=None,\n format='qt',\n compression='h264',\n off_screen=False,\n viewer=True,\n isolate=None,\n maintain_aspect_ratio=True,\n overwrite=False,\n raw_frame_numbers=False,\n camera_options=None,\n viewport_options=None,\n display_options=None,\n complete_filename=None):\n\n from maya import cmds\n\n camera = camera or \"persp\"\n\n # Ensure camera exists\n if not cmds.objExists(camera):\n raise RuntimeError(\"Camera does not exist: {0}\".format(camera))\n\n width = width or cmds.getAttr(\"defaultResolution.width\")\n height = height or cmds.getAttr(\"defaultResolution.height\")\n if maintain_aspect_ratio:\n ratio = cmds.getAttr(\"defaultResolution.deviceAspectRatio\")\n height = width / ratio\n\n\n start_frame = start_frame or cmds.playbackOptions(minTime=True, query=True)\n end_frame = end_frame or cmds.playbackOptions(maxTime=True, query=True)\n\n # We need to wrap `completeFilename`, otherwise even when None is provided\n # it will use filename as the exact name. Only when lacking as argument\n # does it function correctly.\n playblast_kwargs = dict()\n if complete_filename:\n playblast_kwargs['completeFilename'] = complete_filename\n if frame:\n playblast_kwargs['frame'] = frame\n\n with _independent_panel(\n width=width+10,\n height=height+10) as panel:\n\n cmds.lookThru(panel, camera)\n cmds.setFocus(panel)\n\n assert panel in cmds.playblast(activeEditor=True)\n\n with _applied_viewport_options(viewport_options, panel):\n with _applied_camera_options(camera_options, panel, camera):\n with _applied_display_options(display_options):\n with _isolated_nodes(isolate, panel):\n output = cmds.playblast(\n compression=compression,\n format=format,\n percent=100,\n quality=100,\n viewer=viewer,\n startTime=start_frame,\n endTime=end_frame,\n offScreen=off_screen,\n forceOverwrite=overwrite,\n filename=filename,\n widthHeight=[width, height],\n rawFrameNumbers=raw_frame_numbers,\n **playblast_kwargs)\n\n return output",
"def start(self):\n\t\twhile self.capture_status:\n\t\t\t_, frame = self.cap.read()\n\t\t\tc_frame = frame[self.width / 2 - self.face_width / 2: self.width / 2 + self.face_width / 2,\n\t\t\t self.height / 2 - self.face_width / 2: self.height / 2 + self.face_height / 2, :]\n\t\t\tif not self.in_processing:\n\t\t\t\tself.frame = frame\n\t\t\t\tself.in_processing = True\n\t\t\tsleep(0.2)\n\t\tyield cv2.imdecode('png', c_frame)",
"def dispatch_frame(self, frame):\n if frame.command == 'RECEIPT':\n self.receipt_queue.put(frame)\n elif frame.command == 'MESSAGE':\n with self.subscription_lock:\n if frame.destination in self.subscribed_destinations:\n enqueue = True\n else:\n enqueue = False\n if self.debug:\n self.log.debug(\"Ignoring frame for unsubscribed destination: %s\" % frame)\n if enqueue:\n self.message_queue.put(frame)\n elif frame.command == 'ERROR':\n self.error_queue.put(frame)\n elif frame.command == 'CONNECTED':\n self.connected_queue.put(frame)\n else:\n self.log.info(\"Ignoring frame from server: %s\" % frame)",
"def stream_frames(video_capture):",
"def dispatch_frame(self, frame):\n if frame.command == 'RECEIPT':\n self.receipt_queue.put(frame)\n elif frame.command == 'MESSAGE':\n with self.subscription_lock:\n if frame.destination in self.subscribed_destinations:\n handler = self.subscribed_destinations[frame.destination]\n else:\n handler = lambda f: None\n if self.debug:\n self.log.debug(\"Ignoring frame for unsubscribed destination: %s\" % frame)\n handler(frame)\n elif frame.command == 'ERROR':\n self.error_queue.put(frame)\n elif frame.command == 'CONNECTED':\n self.connected_queue.put(frame)\n else:\n self.log.info(\"Ignoring frame from server: %s\" % frame)",
"def run_frame_thread(frame_reader_class, camera_id, video_source, queue, fps):\n logging.debug(\"starting frame_reader run loop\")\n try:\n frame_reader = frame_reader_class(camera_id, video_source)\n except Exception as e:\n logger.critical(\"Failed to load CV2FrameReader: %s\" % e)\n raise e\n\n # dump initial frames, as it seems certain cameras\n # flub the first few for some reason:\n for i in range(5):\n frame_reader.get_frame()\n while True:\n try:\n frame = frame_reader.get_frame()\n except queue.Empty:\n continue\n except Exception as e:\n logger.error(\"Failed to instantiate Frame: %s\" % e)\n try:\n queue.put(frame)\n except Exception as e:\n print(\"Failed to put frame onto queue: %s\" % e)\n time.sleep(1.0/fps)",
"def display_frame(frame: Frame, delay: Optional[int] = 1) -> None:\n # print('frame {}'.format(frame.data.frameID))\n \n # get a copy of the frame data\n _frames.append(frame)",
"def __videoThread(self):\n\n self.frameList = []\n\n fpsTimer = FpsTimer(self.fps)\n printf(\"Starting videoStream thread.\")\n while self.running:\n fpsTimer.wait()\n if not fpsTimer.ready(): continue\n if self.setCamera is not None: self.__setNewCamera(self.setCamera)\n if self.paused: continue\n if self.cap is None: continue\n\n\n # Get a new frame\n ret, newFrame = self.cap.read()\n\n if not ret: # If a frame was not successfully returned\n printf(\"ERROR: while reading frame from Cam. Setting camera again...\")\n self.__setNewCamera(self.cameraID)\n cv2.waitKey(1000)\n continue\n\n\n # Do frame related work\n with self.frameLock:\n self.frame = newFrame\n\n # Add a frame to the frameList that records the 5 latest frames for Vision uses\n self.frameList.insert(0, self.frame.copy())\n # print(\"len\", len(self.frameList), \"Curr frames: \", [id(frame) for frame in self.frameList])\n while len(self.frameList) > 10:\n del self.frameList[-1]\n\n # Keep track of new frames by counting them. (100 is an arbitrary number)\n if self.frameCount >= 100:\n self.frameCount = 0\n else:\n self.frameCount += 1\n\n\n # Run any work functions that must be run. Expect no results. Work should be run before filters.\n if len(self.workList) > 0:\n # print(\"Work: \", self.workList)\n with self.workLock:\n for workFunc in self.workList:\n workFunc(self.frame)\n\n\n\n # Run any filters that must be run, save the results in self.filterFrame\n if len(self.filterList) > 0:\n # print(\"Filters: \", self.filterList)\n with self.filterLock:\n filterFrame = self.getFrame()\n for filterFunc in self.filterList:\n filterFrame = filterFunc(filterFrame)\n\n # Draw FPS on the screen\n fps = str(int(round(fpsTimer.currentFPS, 0)))\n cv2.putText(filterFrame, fps, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.25, (255, 255, 255), 2)\n\n self.filterFrame = filterFrame\n\n\n else:\n self.filterFrame = self.frame\n\n printf(\"VideoStream Thread has ended\")",
"def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off",
"def _frameSaving(self):\n self.mmc.clearCircularBuffer()\n imageCount=0\n self.mmc.startContinuousSequenceAcquisition(1)\n while(imageCount<(self.nbFrames) and self.acqRunning and self.loopRunning):\n if self.mmc.getRemainingImageCount() > 0: #Returns number of image in circular buffer, stop when seq acq finished #Enter this loop BETWEEN acquisition\n #trigImage(labjack) #Generate a pulse, which allows to flag the entry in this code statement with the oscilloscope\n img = self.mmc.popNextImage() #Gets and removes the next image from the circular buffer\n saveFrame(img, self.tiffWriterList, (imageCount), self.maxFrames) # saving frame of previous acquisition\n imageCount +=1\n self.progressSig.emit(imageCount)\n\n\n\n #Stop camera acquisition #Ensure that no more frames are taken\n self.mmc.stopSequenceAcquisition()\n\n #### IF ABORTED acquisition #####\n self._circularBufferCleaning(imageCount)\n\n #Close tiff file open\n tiffWritersClose(self.tiffWriterList)\n print('end of the _frameSavingThread')\n return imageCount",
"def dispatch_frame(self, frame):",
"def dummy_videoframe_handler(frame, userdata=None):\n sys.stdout.write('Got frame %d\\r' % userdata.count())\n sys.stdout.flush()\n userdata.increment()",
"def arm_video(self, nframes, timeout=30):\n pass",
"def on_queue_declared(self, frame):\n\t\tself.channel.basic_qos(prefetch_count=1)\n\t\tself.channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\t\tself.consumer_tag = self.channel.basic_consume(\n\t\t\tself.handle_delivery, \n\t\t\tframe.method.queue\n\t\t)",
"def captureNextFrame(self):\n ret, readFrame=self.capture.read()\n if(ret==True):\n self.currentFrame=cv2.cvtColor(readFrame,cv2.COLOR_BGR2RGB)",
"def animate(self, frames, callback=None):\n if isinstance(frames, numbers.Integral):\n frames = (frames, 30.0)\n\n if isinstance(frames, tuple):\n frame_count, frame_rate = frames\n frames = numpy.linspace(0, frame_count / frame_rate, frame_count + 1, endpoint=True)\n\n for index in range(0, len(frames) - 1):\n frame = AnimationFrame(index, frames[index], frames[index + 1], self._animation)\n if callback:\n callback(frame)\n\n # Record the end-time of the last frame, so backends can calculate frame durations.\n self._animation[frames[-1]]",
"def record(self):\n\t\twhile True:\n\t\t\tif not self.recording:\n\t\t\t\tbreak\n\t\t\t#print('hal')\n\t\t\telapsed = (datetime.datetime.now() - self.timestamps[-1]).total_seconds()\n\t\t\tif elapsed < self.timePerFrame:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t#print(len(self.Video))\n\t\t\t\tret,frame = self.vs.read()\n\t\t\t\tif ret:\n\t\t\t\t\tself.timestamps.append(datetime.datetime.now())\n\t\t\t\t\tself.Video.append(frame)\n\t\t\t\t\tself.FPStracker.update()\n\t\t\t\t\tself.newAvailable = True\n\t\t\t\t\tif not self.threaded:\n\n\t\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tprint('error: camera failed to capture image')\n\t\t\t\t\tprint('canceling recording session')\n\t\t\t\t\tself.stop()\n\t\t#print('\\n recording loop ended, returning to main')\n\t\tself.vs.stop()\n\t\treturn",
"def run(self, frames, motions):\n detected = self._detector.run(frames, motions)\n\n for frame in detected:\n event = self._output_agent.process(frame)\n if event is not None:\n if event.action == EventVideoPolicy.START_RECORDING:\n timestamp = event.content[\"timestamp\"]\n thumbnail_key = self._take_snapshot(timestamp, frame)\n self._output_event(event, thumbnail_key,\n frame.metadata[\"labels\"])\n\n elif event.action == EventVideoPolicy.STOP_RECORDING:\n logging.info(\"End of event video\")",
"def get_frame_by_frame(name=None, fps=4, write_to_disk=False, display_feed=False, on_capture=None):\n\n reset_camera()\n\n if name is None:\n name = \"fbf_\" + str(int(time()))\n \n dname = None\n if write_to_disk:\n chdir(cwd)\n dname = join(dirname(realpath(sys.argv[0])), \"train\", \"data\", name)\n if not exists(dname):\n print(\"Created dir: %s\" % dname)\n mkdir(dname)\n else:\n print(\"Using dir: %s\" % dname)\n else:\n print('Not writing to disk')\n\n def _snap(name, dname, write, display, capture_callback):\n global camera\n s, img = camera.read()\n\n if s and capture_callback:\n img = capture_callback(img)\n\n if s and display:\n cv2.imshow(name, img)\n cv2.waitKey(1) \n\n if write:\n chdir(dname)\n number_of_files = len([item for item in os.listdir(dname) if os.path.isfile(os.path.join(dname, item))])\n path = \"./\" + str(number_of_files + 1) + \".png\"\n if s:\n imwrite(path, img)\n print(\"Saved to \" + dname + \"/\" + str(number_of_files + 1) + \".png\")\n else:\n print(\"Could not read image %d from camera\" % (number_of_files + 1))\n chdir(cwd)\n\n return Timer(1 / fps, _snap, name, dname, write_to_disk, display_feed, on_capture).use_mp()",
"def on_bindok(self, unused_frame):\n logger.info('Queue bound')\n self.setup_error_queue()",
"def callback(indata, frames, time, status):\n if status:\n print(status)\n q.put(indata.copy())",
"def callback(indata, frames, time, status):\n if status:\n print(status)\n q.put(indata.copy())",
"def add_frame(\n self : \"animation\",\n frame : \"matplotlib.figure.Figure\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list([frame], facecolor=facecolor)",
"def analyse(self, frame_queue, analysed_frame_queue, autonomous_states_queue, \\\n commands_queue, car_states_queue):\n current_thread = threading.currentThread()\n self.__command_timer = time.time()\n bln_knn_training_successful = DetectChars.loadKNNDataAndTrainKNN() # attempt KNN training\n if bool(bln_knn_training_successful) is False:\n return\n\n self.__fps_timer = time.time()\n while getattr(current_thread, 'is_running', True):\n string_data = frame_queue.get(True, None)\n frame = numpy.fromstring(string_data, dtype='uint8')\n self.__current_frame = cv2.imdecode(frame, 1)\n\n self.__get_cruise_states_data(car_states_queue)\n\n if getattr(current_thread, 'is_analysing', True):\n self.__car_detection(autonomous_states_queue)\n self.__detect_objects()\n self.__lane_assist()\n\n if getattr(current_thread, 'is_deciding', True):\n self.__take_cruise_decision(commands_queue)\n self.__avoid_detected_objects(commands_queue)\n\n self.__draw_rect_around_plate(self.__current_frame)\n self.__draw_distance_to_car()\n self.__draw_car_cruise_watch_area()\n self.__draw_lane_assist_decision()\n self.__draw_detected_objects()\n\n self.__draw_fps()\n\n result, encrypted_image = \\\n cv2.imencode('.jpg', self.__current_frame, self.__encode_parameter)\n\n if bool(result) is False:\n break\n\n analysed_frame = numpy.array(encrypted_image)\n analysed_frame_queue.put(str(analysed_frame.tostring()), True, None)\n frame_queue.task_done()\n\n self.__fps_counter = self.__fps_counter + 1\n\n if time.time() - self.__fps_timer > 1:\n self.__frame_fps = self.__fps_counter\n self.__fps_counter = 0\n self.__fps_timer = time.time()"
]
| [
"0.6528172",
"0.6219949",
"0.6177433",
"0.60546577",
"0.59445876",
"0.5801518",
"0.5797259",
"0.57847846",
"0.57812697",
"0.5759052",
"0.5702521",
"0.5698096",
"0.56379294",
"0.5625848",
"0.5597318",
"0.5510694",
"0.5507248",
"0.5477359",
"0.54491526",
"0.54260653",
"0.5425414",
"0.54126656",
"0.5408698",
"0.53987163",
"0.53928417",
"0.5384752",
"0.5362047",
"0.5362047",
"0.53516304",
"0.5347635"
]
| 0.809999 | 0 |
Get a copy of the frame's buffer data as a ctypes c_ubyte array. | def buffer_data(self):
# create a ctypes pointer to the buffer
buffer_ptr = cast(self.data.buffer, POINTER(c_ubyte * self.data.bufferSize))
# contents always returns a copy
return buffer_ptr.contents | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toubytes(data, buf_len):\n buf = bytearray(data)\n buffer = (ctypes.c_ubyte * buf_len).from_buffer(buf)\n return buffer",
"def toubyte(data):\n buf = bytearray(data)\n buffer = (ctypes.c_ubyte).from_buffer(buf)\n return buffer",
"def get_data(self):\n oshape = (ctypes.c_uint * 4)()\n ostride = ctypes.c_uint()\n ret = cxnlib.CXNIOGetData(self.handle,\n oshape, ctypes.byref(ostride))\n return ctypes2numpyT(ret, [x for x in oshape], 'float32', ostride.value)",
"def get_raw(self):\r\n check_mixer()\r\n return ffi.buffer(ffi.cast('char*', self.chunk.abuf),\r\n self.chunk.alen)[:]",
"async def _retrieve_frame(self, mode: BufferRetrieveMode) -> RawArray:",
"def buffer(self) -> np.ndarray:\n return np.array(self._image_data, copy=False)",
"def tobytes(data, buf_len):\n buf = bytearray(data)\n buffer = (ctypes.c_byte * buf_len).from_buffer(buf)\n return buffer",
"async def get(self) -> RawArray:\r\n if self.empty():\r\n return None\r\n frame = self.frames[self._read_index]\r\n\r\n self._read_index = (self._read_index + 1) % self.capacity()\r\n self._is_full = False\r\n\r\n return frame",
"def raw_data(self):\n return self._buf[self.data_offset():self.data_offset() + self.size()]",
"def get_buf(self, data_type = \"void\"):\n if self.buf is not None:\n return ffi.cast(data_type + \"*\", self.buf)\n else:\n raise RuntimeError(\"Buffer not created.\")",
"def as_bytearray(self):\n\n if self.index < 7:\n return self.buf + bytearray([self.byte])\n else:\n return self.buf",
"def getBufferedData(self):\n if not self.ringBuffer: # first time when buffer is empty\n return np.zeros((1, self.windowLength, self.sensorChannels)) \n return np.array(self.ringBuffer)",
"def data(self):\n return self._buf[self._offset : self._offset + self._size]",
"def as_numpy_array(self):\n return self.frame",
"def readRaw(self):\r\n count = 2048 #this is as big as the library buffer, so the user doesn't have to poll as often\r\n buf = [] #buffer that will hold the read raw data and be returned to the user\r\n dataPtr = (c_int * count)()\r\n length = c_int()\r\n length.value = count;\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetIR_getRawData(self.handle, dataPtr, byref(length))\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)\r\n \r\n for i in range(length.value):\r\n buf.append(dataPtr[i])\r\n \r\n return buf",
"def rawdata(self):\n data_size = gdef.ULONG()\n winproxy.CM_Get_Res_Des_Data_Size(data_size, self)\n if not self:\n return None\n data_size = data_size.value\n buffer = ctypes.create_string_buffer(data_size)\n winproxy.CM_Get_Res_Des_Data(self, buffer, data_size)\n return bytearray(buffer[:data_size])",
"def buffer_data_numpy(self) -> np.ndarray:\n # mask the last 4 bytes to reduce pixel format to mono/color mode and bit width info\n pixel_format = self.data.pixelFormat & 0xFFFF0000\n try:\n arr_dtype, arr_channels = PIXELFORMAT_TO_DTYPE_CHANNELS[pixel_format]\n except KeyError as ex:\n raise NotImplementedError('Pixel format not supported!') from ex\n\n arr_shape = (self.data.height, self.data.width, arr_channels) if arr_channels > 1 \\\n else (self.data.height, self.data.width)\n\n return np.ndarray(buffer=self.buffer_data(),\n dtype=arr_dtype,\n shape=arr_shape)",
"def read(self) -> np.array:\n return self._stream.read(self._frame_size)",
"def to_numpy(self) -> np.ndarray:\n return self.frame",
"def grab_frame(self):\n with self._buflock:\n if self._buffer is None:\n return None\n buf = self._buffer.tostring()\n return buf",
"def snapshot2(self) -> np.array:\n fbo = self.fbo\n data = fbo.read(components=3, dtype='f4')\n w, h = self.size\n return np.flipud(np.frombuffer(data, dtype='f4').reshape((h, w, 3)))",
"def recv_array(self, flags=0, copy=True, track=False):\n md = self.recv_json(flags=flags)\n msg = self.recv(flags=flags, copy=copy, track=track)\n A = numpy.frombuffer(msg, dtype=md['dtype'])\n return A.reshape(md['shape'])",
"def recv_array(self, flags=0, copy=True, track=False):\n md = self.recv_json(flags=flags)\n msg = self.recv(flags=flags, copy=copy, track=track)\n A = numpy.frombuffer(msg, dtype=md['dtype'])\n return A.reshape(md['shape'])",
"def _make_array(self, c):\n return (c * ctypes.py_object)()",
"def GetDataBuffer(self):\n pass",
"def get_data(self):\n return self.data[self._size:self._size + self._len]",
"def ctypes2numpy(cptr, length, dtype=numpy.float32):\n #assert isinstance(cptr, ctypes.POINTER(ctypes.c_float))\n res = numpy.zeros(length, dtype=dtype)\n if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]):\n raise AssertionError('ctypes.memmove failed')\n return res",
"def decode_frame(self, buf):\n import numpy as np\n from cv2 import cvtColor\n\n w, h = self._resolution\n arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))\n arr = cvtColor(arr, 93) # NV21 -> BGR\n return arr",
"def read(self):\r\n\t\t# get data from camera\r\n\t\tarray = self.ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)\r\n\t\t# get frame as numpy array\r\n\t\tframe = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))\r\n\t\t\r\n\t\t\"\"\"\r\n\t\tcamera_matrix = np.array([\r\n\t\t\t[4.5330796457901283e+02, 0., 6.1902229288626302e+02],\r\n\t\t\t[0., 4.5369175559310276e+02, 5.1298362120979994e+02],\r\n\t\t\t[0., 0., 1.]])\r\n\t\t\r\n\t\tdist_coeffs = np.array([\r\n\t\t\t-3.1812973406286371e-01, 9.6396352148682182e-02,\r\n\t\t\t2.9601124432187590e-03, 9.7700591472463412e-04,\r\n\t\t\t-1.1929681608809075e-02\r\n\t\t])\r\n\r\n\t\tframe = cv2.undistort(frame, camera_matrix, dist_coeffs, camera_matrix)\r\n\t\t\"\"\"\r\n\r\n\t\treturn frame",
"def readbytes(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_readbytes(self, *args)"
]
| [
"0.7058171",
"0.67911583",
"0.65706354",
"0.65090454",
"0.64190936",
"0.63389236",
"0.6210867",
"0.6066091",
"0.605125",
"0.6025154",
"0.59680426",
"0.5928322",
"0.5925776",
"0.58375514",
"0.5806147",
"0.57676566",
"0.5759585",
"0.5745099",
"0.5737871",
"0.571216",
"0.56842846",
"0.56719553",
"0.56719553",
"0.56407815",
"0.5590547",
"0.5572849",
"0.55570555",
"0.5551071",
"0.5536644",
"0.55200505"
]
| 0.7441798 | 0 |
Join the gene tables to a single gene table | def join_gene_tables(gene_tables,output,verbose=None):
gene_table_data={}
start_column_id=""
samples=[]
file_basenames=[]
index=0
for gene_table in gene_tables:
if verbose:
print("Reading file: " + gene_table)
lines=util.process_gene_table_with_header(gene_table, allow_for_missing_header=True)
header=next(lines)
# get the basename of the file
file_basename='.'.join(os.path.basename(gene_table).split('.')[:-1])
file_basenames.append(file_basename)
if header:
header_info=header.split(GENE_TABLE_DELIMITER)
if not start_column_id:
start_column_id=header_info[0]
# allow for multiple samples
sample_names=header_info[1:]
else:
# if there is no header in the file then use the file name as the sample name
sample_names=[file_basename]
for line in lines:
data=line.split(GENE_TABLE_DELIMITER)
try:
gene=data[0]
# if the header names multiple samples, merge all samples
# this prevents extra columns from being included in some rows
# this requires files containing multiple samples to include a header
data_points=data[1:len(sample_names)+1]
except IndexError:
gene=""
if gene:
current_data=gene_table_data.get(gene,"")
fill = index - current_data.count(GENE_TABLE_DELIMITER)
if fill > 0:
# fill in zeros for samples without data then add data point
gene_table_data[gene]=current_data + GENE_TABLE_DELIMITER.join(["0"]*fill) + GENE_TABLE_DELIMITER + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER
elif fill < 0:
# add data point to other data point from the same sample
current_data_points=current_data.split(GENE_TABLE_DELIMITER)
for i,point in enumerate(data_points):
store_index=len(data_points)*-1-1+i
current_data_points[store_index]=str(float(current_data_points[store_index])+float(point))
gene_table_data[gene] = GENE_TABLE_DELIMITER.join(current_data_points)
else:
# add data point to end of list
gene_table_data[gene] = current_data + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER
samples+=sample_names
index+=len(sample_names)
# if all of the header names for the files are the same
# then use the file names as headers
if samples.count(samples[0]) == len(samples):
samples=file_basenames
# write the joined gene table
if not start_column_id:
start_column_id="# header "
sample_header=[start_column_id]+samples
total_gene_tables=len(samples)
sorted_gene_list=util.fsort(list(gene_table_data))
try:
file_handle=open(output,"w")
file_handle.write(GENE_TABLE_DELIMITER.join(sample_header)+"\n")
except EnvironmentError:
sys.exit("Unable to write file: " + output)
for gene in sorted_gene_list:
# extend gene data for any gene that is not included in all samples
current_data=gene_table_data[gene]
fill = total_gene_tables - current_data.count(GENE_TABLE_DELIMITER)
if fill:
current_data=current_data + GENE_TABLE_DELIMITER.join(["0"]*fill) + GENE_TABLE_DELIMITER
file_handle.write(gene+GENE_TABLE_DELIMITER+current_data.rstrip(GENE_TABLE_DELIMITER)+"\n")
file_handle.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _join_gene(query, gene_name, gene_symbol, gene_id):\n if gene_name or gene_symbol:\n query = query.join(models.Gene)\n\n if gene_symbol:\n query = query.filter(models.Gene.gene_symbol.like(gene_symbol))\n\n if gene_name:\n query = query.filter(models.Gene.gene_name.like(gene_name))\n\n if gene_id:\n query = query.filter(models.Gene.gene_id.like(gene_id))\n\n return query",
"def join_tables(self) -> DataFrame:\n if not self.source_tables:\n raise ValueError(f\"No source tables were provided to GlueTransformer: {self}\")\n\n if len(self.source_tables) == 1:\n # extract the first value from the dictionary\n source_table = list(self.source_tables.values())[0]\n return source_table.get_dyf_and_apply_mapping().toDF()\n \n print(self.join_map.get_chain())\n table_chain = self.join_map.get_chain()\n \n print(f\"table_chain: {table_chain}\")\n \n master_table_name = table_chain[0]\n master_table = self.source_tables[master_table_name]\n #print(dir(master_table))\n print(\"master_table.get_columns()\")\n print(master_table.get_columns())\n master_df = master_table.get_dyf_and_apply_mapping().toDF()\n print(master_df.columns)\n \n absorbed_table_names = [master_table_name]\n \n for next_table_name in table_chain[1:]:\n next_table = self.source_tables[next_table_name]\n\n '''\n print(\"next_table.get_columns()\")\n print(next_table.get_columns())\n '''\n absorbed_table_names.append(next_table_name)\n \n next_df = next_table.get_dyf_and_apply_mapping().toDF()\n next_table_def = next_table.table_def\n next_table_alias = self.resolver.to_alias(*next_table_def.table_spec)\n \n absorbed_source = None\n master_alias = None\n for absorbed_table_name in absorbed_table_names:\n absorbed_table_def = self.source_tables[absorbed_table_name].table_def\n \n master_alias = self.resolver.to_alias(*absorbed_table_def.table_spec)\n # once we find the relevant JOIN, stop and process it\n if (master_alias, next_table_alias) in self.join_map:\n break\n else:\n raise ValueError(f'No link found for \"{next_alias}\"')\n \n join_spec = self.join_map.get(master_alias, next_table_alias)\n # not in base code\n if join_spec.left.table_spec == next_table_alias:\n '''\n print('\\n'.join([\n 'A',\n f'{join_spec.left.table_spec == next_table_alias}',\n f'{join_spec.left.table_spec}',\n f'{next_table_alias}',\n ]))\n '''\n next_key, master_key = join_spec.left, join_spec.right\n else:\n '''\n print('\\n'.join([\n 'B',\n f'{join_spec.left.table_spec == next_table_alias}',\n f'{join_spec.left.table_spec}',\n f'{next_table_alias}',\n ]))\n '''\n master_key, next_key = join_spec.left, join_spec.right\n\n '''\n # DEBUG\n print(f\"JOINDEBUG1 {join_spec}\")\n print(f\"{master_key}, {next_key}\")\n '''\n\n join_type = join_spec.type\n \n master_df_key = GlueTable.translate_alias(*master_key)\n next_df_key = GlueTable.translate_alias(*next_key)\n \n master_df_key = GlueTable.translate_alias(*master_key)\n next_df_key = GlueTable.translate_alias(*next_key)\n \n ''' \n # DEBUG\n print(f'Master table name : {master_table_name} with key {master_df_key}')\n # print(f'No. of records before join master : {master_df.count()}')\n print(master_df.columns)\n print(f'Next table name : {next_table_name} with key {next_df_key}')\n # print(f'No. of records before join next : {next_df.count()}')\n print(next_df.columns)\n \n print(f\"{master_key}, {next_key}\")\n \n '''\n \n try:\n master_df = master_df.join(\n next_df,\n master_df[master_df_key] == next_df[next_df_key],\n join_type,\n )\n except Exception as exc:\n print(exc)\n \n # DEBUG\n # print(f'No. of records after join : {master_df.count()}')\n \n return master_df",
"def associate_generator_tables(gf, gen, gens):\n stack_gens = stack_generators(\n gens, cat_col='energy_source_code_num', stacked_col='fuel_type')\n\n gen_assoc = (\n pd.merge(\n stack_gens,\n gen,\n on=IDX_GENS,\n how='outer')\n .pipe(remove_retired_generators)\n .merge(\n gf.groupby(by=IDX_PM_FUEL, as_index=False)\n .sum(min_count=1),\n on=IDX_PM_FUEL,\n suffixes=('_g_tbl', '_gf_tbl'),\n how='outer',\n )\n )\n\n gen_assoc = (\n pd.merge(\n gen_assoc,\n gen_assoc.groupby(by=IDX_FUEL)\n [['capacity_mw', 'net_generation_mwh_g_tbl']].sum(min_count=1)\n .add_suffix('_fuel')\n .reset_index(),\n on=IDX_FUEL,\n )\n .pipe(pudl.helpers.convert_cols_dtypes, 'eia')\n .pipe(_associate_unconnected_records)\n .pipe(_associate_fuel_type_only, gf=gf)\n )\n return gen_assoc",
"def link_genes(self, genes: List[Gene]):\n\n # do a double-check to make sure we don't add duplicate genes\n for gene in genes:\n if gene.locus_tag is not None:\n if gene.locus_tag not in [gene.locus_tag for gene in self.genes]:\n self.genes.append(gene)\n gene.link_transcription_unit(self)\n elif gene.id is not None:\n if gene.id not in [gene.id for gene in self.genes]:\n self.genes.append(gene)\n gene.link_transcription_unit(self)",
"def join_experiments_file_table(exp_table, file_table):\n file_table[\"Experiment\"] = \\\n file_table[\"Dataset\"].str.split(\"/\").apply(lambda l: l[2])\n merged = exp_table.merge(\n file_table, left_on=\"Accession\", right_on=\"Experiment\",\n suffixes=(\"_exp\", \"_file\")\n )\n return merged",
"def join(upstream, product):\n a = pd.read_parquet(str(upstream[\"get\"]))\n b = pd.read_parquet(str(upstream[\"features\"]))\n df = a.join(b)\n df.to_parquet(str(product))",
"def _join():\n df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n 'B': ['B0', 'B1', 'B2']})\n print(df.join(other, lsuffix='_caller', rsuffix='_other')) # 为重复 column 添加前缀\n print(df.set_index('key').join(other.set_index('key')))\n print(df.join(other.set_index('key'), on='key', how='right')) # left,right表示以哪边的index为准\n print(df.join(other.set_index('key'), on='key', how='inner')) # inner,outer 表示交集、并集",
"def sjoin(left_df, right_df, how=..., op=..., lsuffix=..., rsuffix=...):\n ...",
"def get_mult_gene_RNA(ensemble, genes, grouping, max_points='10000'):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping:\n\t\treturn None\n\n\tgenes = [gene+\"%\" for gene in genes]\n\n\t# This query is just to fix gene id's missing the ensemble version number.\n\t# Necessary because the table name must match exactly with whats on the MySQL database.\n\t# Ex. ENSMUSG00000026787 is fixed to ENSMUSG00000026787.3\n\tfirst_query = \"SELECT gene_id FROM genes WHERE gene_id LIKE %s\" + \" OR gene_id LIKE %s\" * (len(genes)-1)\n\tresult = db.get_engine(current_app, 'methylation_data').execute(first_query, (genes,)).fetchall()\n\n\tgene_table_names = ['gene_' + gene_id[0].replace('.','_') for gene_id in result]\n\n\tdf_all = pd.DataFrame()\n\n\tfirst = True\n\tfor gene_table_name in gene_table_names:\n\t\tquery = \"SELECT cells.cell_id, cells.cell_name, cells.dataset, \\\n\t\t\t%(ensemble)s.annotation_RNA, %(ensemble)s.cluster_RNA, \\\n\t\t\t%(ensemble)s.tsne_x_RNA, %(ensemble)s.tsne_y_RNA, \\\n\t\t\t%(gene_table_name)s.normalized_counts, \\\n\t\t\tdatasets.target_region \\\n\t\t\tFROM cells \\\n\t\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\t\tLEFT JOIN %(gene_table_name)s ON %(ensemble)s.cell_id = %(gene_table_name)s.cell_id \\\n\t\t\tLEFT JOIN datasets ON cells.dataset = datasets.dataset\" % {'ensemble': ensemble,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'gene_table_name': gene_table_name}\n\t\tif max_points.isdigit():\n\t\t\tquery = query+\" ORDER BY RAND() LIMIT %(max_points)s()\" % {'max_points': max_points}\n\n\t\ttry:\n\t\t\tdf_all = df_all.append(pd.read_sql(query, db.get_engine(current_app, 'RNA_data')))\n\t\texcept exc.ProgrammingError as e:\n\t\t\tnow = datetime.datetime.now()\n\t\t\tprint(\"[{}] ERROR in app(get_mult_gene_RNA): {}\".format(str(now), e))\n\t\t\tsys.stdout.flush()\n\t\t\treturn None\n\n\t\tif first:\n\t\t\tdf_coords = df_all\n\t\tfirst = False\n\n\tif df_all.empty: # If no data in column, return None\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_gene_RNA): No RNA data for {}\".format(str(now), ensemble))\n\t\tsys.stdout.flush()\n\t\treturn None\n\n\tdf_all['normalized_counts'].fillna(0, inplace=True)\n\n\tdf_avg_methylation = df_all.groupby(by='cell_id', as_index=False)['normalized_counts'].mean()\n\tdf_coords.update(df_avg_methylation)\n\n\tif grouping == 'annotation':\n\t\tdf_coords.fillna({'annotation_RNA': 'None'}, inplace=True)\n\t\tdf_coords['annotation_cat'] = pd.Categorical(df_coords['annotation_RNA'], cluster_annotation_order)\n\t\tdf_coords.sort_values(by='annotation_cat', inplace=True)\n\t\tdf_coords.drop('annotation_cat', axis=1, inplace=True)\n\telif grouping == 'cluster':\n\t\tdf_coords.sort_values(by='cluster_RNA', inplace=True)\n\treturn df_coords",
"def to_db(self, df_combined, gene_name):\n\n # (1) Creates database schema\n curs.execute(\"CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, \"\n \"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT\"\n \", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, \"\n \"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, \"\n \"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)\")\n\n curs.execute(\"CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, \"\n \"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, \"\n \"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)\")\n\n curs.execute(\"CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)\")\n\n # (2) Drops unnecessary columns to make two tables and removes duplicates.\n primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',\n 'check_by']\n snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',\n 'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']\n\n df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)\n df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))\n df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)\n\n # (3) Performs data checks using CheckPrimers and CheckSNPs classes.\n check = CheckPrimers(df_primertable, df_snptable)\n total_errors, error_details = check.check_all()\n\n # (4) Checks if gene data already in database.\n uni_gene = '(u\\'%s\\',)' % gene_name\n gene = self.check_in_db(gene_name) # this outputs a unicode string\n\n # (5) Adds to database if no errors. Overrides data if already present.\n archived_filename = None\n if total_errors == 0:\n if str(uni_gene) == str(gene):\n # Add query to data frame then save to excel.\n get_old_query = \"SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, \" \\\n \"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, \" \\\n \"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, \" \\\n \"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'\" % \\\n gene_name\n today_date = datetime.datetime.now().strftime(\"%d-%m-%Y_%H%M\")\n df_sql = pd.read_sql_query(get_old_query, con=con)\n archived_filename = '%s_%s' % (gene_name, today_date)\n writer = ExcelWriter('%s.xlsx' % archived_filename)\n df_sql.to_excel(writer, '%s' % today_date, index=False)\n writer.save()\n os.system(\"mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx \"\n \"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/\" % archived_filename)\n\n curs.execute(\"DELETE FROM Primers WHERE Gene='%s'\" % gene_name)\n curs.execute(\"DELETE FROM Genes WHERE Gene='%s'\" % gene_name)\n curs.execute(\"DELETE FROM SNPs WHERE Gene='%s'\" % gene_name)\n\n info = \"Data updated.\"\n\n else:\n info = \"New gene added.\"\n\n # Insert new data into SQL tables.\n curs.execute(\"INSERT INTO Genes (Gene) VALUES (?)\", (gene_name,))\n df_primertable.to_sql('Primers', con, if_exists='append', index=False)\n df_snptable.to_sql('SNPs', con, if_exists='append', index=False)\n\n print \"Primers successfully added to database.\"\n else:\n info = error_details\n\n con.commit()\n return info, archived_filename",
"def link_gene(self, gene: Gene):\n self.gene = gene\n gene.link_attenuator(self)",
"def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models",
"def link_genes(self, genes: List[Gene]):\n for gene in genes:\n if gene.locus_tag not in [gene.locus_tag for gene in self.genes]:\n self.genes.append(gene)\n gene.link_i_modulon(self)",
"def link_gene(self, gene: Gene):\n if self.gene is None:\n self.gene = gene",
"def link_gene(self, gene: Gene):\n if self.gene is None:\n self.gene = gene",
"def join(self, geometry, coarser, rangeTable, continuous = True):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n cursor.execute('SELECT table_name FROM all_tables WHERE table_name = :1',[rangeTable.upper(),])\r\n length = len(cursor.fetchall())\r\n connection.close()\r\n if length:\r\n return 0, 0, 0, 0\r\n else:\r\n start1 = time.time()\r\n if isinstance(geometry, list):\r\n _, data1, _, Levels = self.structure.getMortonRanges(geometry[0], coarser, \r\n continuous = False,\r\n maxRanges = self.maxRanges,\r\n numLevels = self.numLevels)\r\n _, data2, _, Levels = self.structure.getMortonRanges(geometry[1], coarser, \r\n continuous = False,\r\n maxRanges = self.maxRanges,\r\n numLevels = self.numLevels)\r\n morPrep = time.time() - start1\r\n ranges = len(data1) + len(data2)\r\n start2 = time.time()\r\n if len(data1) == 0 and len(data2) == 0:\r\n print 'None morton range in specified extent!'\r\n self.mortonJoinWhere = ''\r\n elif len(data1) or len(data2):\r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n ora.createIOT(cursor, rangeTable, self.joinColumns, 'low', True)\r\n if len(data1):\r\n sqlldrCommand = self.sqlldr(rangeTable, ['LOW', 'UPPER'], format_lst(data1)) #TODO: separate time between formatting and performing the loading\r\n os.system(sqlldrCommand) \r\n if len(data2):\r\n sqlldrCommand = self.sqlldr(rangeTable, ['LOW', 'UPPER'], format_lst(data2))\r\n os.system(sqlldrCommand)\r\n else:\r\n _, data, _, Levels = self.structure.getMortonRanges(geometry, coarser, \r\n continuous, \r\n maxRanges = self.maxRanges,\r\n numLevels = self.numLevels)\r\n morPrep = time.time() - start1\r\n start2 = time.time()\r\n if len(data) == 0:\r\n print 'None morton range in specified extent!'\r\n ranges = 0\r\n else:\r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n ora.createIOT(cursor, rangeTable, self.joinColumns, 'low', True)\r\n sqlldrCommand = self.sqlldr(rangeTable, ['LOW', 'UPPER'], format_lst(data))\r\n os.system(sqlldrCommand)\r\n ranges = len(data)\r\n insert = time.time() - start2\r\n return ranges, morPrep, insert, Levels",
"def crossover(self, gene2):\r\n assert self.key == gene2.key\r\n\r\n \r\n new_gene = self.__class__(self.key)\r\n for a in self._gene_attributes:\r\n if random() > 0.5:\r\n setattr(new_gene, a.name, getattr(self, a.name))\r\n else:\r\n setattr(new_gene, a.name, getattr(gene2, a.name))\r\n\r\n return new_genes",
"def get_genera_sequences_from(self, table):\n log.info(\"Joining the sequences of all the scaffolds with the same genus\")\n if not self.table_exists(table):\n raise ValueError(\"The database does not have table {0}\".format(table))\n # Get all the scaffolds assigned\n sql_command = \"\"\"SELECT {0}.scaffold, {0}.genus, {1}.sequence\n FROM {0}\n INNER JOIN {1}\n WHERE {0}.scaffold={1}.scaffold\n \"\"\".format(table, self.ScaffoldsTable)\n genus2sequence_dict = dict() # dictionary of sequences indexed by genus\n assigned_scaffolds = set()\n cursor = self.execute(sql_command)\n record = cursor.fetchone()\n while record:\n genus = record[\"genus\"]\n if not genus in genus2sequence_dict:\n genus2sequence_dict[genus] = [record[\"sequence\"]]\n else:\n genus2sequence_dict[genus].append(record[\"sequence\"])\n assigned_scaffolds.add(record[\"scaffold\"])\n record = cursor.fetchone()\n # join all sequences\n for genus in genus2sequence_dict:\n genus2sequence_dict[genus] = \"\".join(genus2sequence_dict[genus])\n return genus2sequence_dict, assigned_scaffolds",
"def merge_orf_and_funtax( orf_file, funtax_file ):\n orf_df = pd.read_table(orf_file, header=None, names=orf_names, index_col='ORF_ID', usecols=orf_names, engine='python', encoding=\"ISO-8859-1\", quoting=3)\n funtax_df = pd.read_table(funtax_file, index_col='ORF_ID', engine='python', encoding=\"ISO-8859-1\", quoting=3)\n funtax_df[['COG','KO']] = orf_df[['COG','KO']]\n funtax_df['taxonId'] = funtax_df['taxonomy'].replace(r'.+\\(([0-9]+)\\)', value=r'\\1', regex=True)\n genes = funtax_df.reset_index()\n genes['gene'] = genes['ORF_ID']\n return genes.set_index('gene')",
"def create_new_genes_file(old_genes_file, df, new_genes_file):\n genes = pd.read_table(\n old_genes_file, names=['gene_name'], index_col=0\n )\n genes.ix[df.index].to_csv(\n new_genes_file,\n index=True, header=False, sep='\\t'\n )",
"def gene2generelate(gene1,gene2):\n server = \"https://string-db.org/api/json/network?identifiers=\" +\\\n gene1 + \"%0D\" + gene2\n r = requests.get(server)\n if not r.ok:\n r.raise_for_status()\n sys.exit()\n decoded = r.content.decode()\n dict_decoded = json.loads(decoded)\n if len(dict_decoded):\n ID1 = dict_decoded[0]['stringId_A']\n ID2 = dict_decoded[0]['stringId_B']\n score = dict_decoded[0]['score']\n ns = dict_decoded[0]['nscore']\n fs = dict_decoded[0]['fscore']\n ps = dict_decoded[0]['pscore']\n ass = dict_decoded[0]['ascore']\n es = dict_decoded[0]['escore']\n ds = dict_decoded[0]['dscore']\n ts = dict_decoded[0]['tscore']\n print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\".\\\n format(ID1,ID2,score,ns,fs,ps,ass,es,ds,ts),file=Output)\n else:\n print(\"The String Database: None Results\",file=Output)",
"def add_entrez_ids(lookup: pd.DataFrame) -> pd.DataFrame:\n \n celeg_ens2entrez = get_species_ens_entrez_lookup(CELEGANS_DATASET_NAME)\n celeg_ens2entrez.columns = ['celeg_ensembl_id',\n 'celeg_entrez_id']\n \n droso_ens2entrez = get_species_ens_entrez_lookup(DROSO_DATASET_NAME)\n droso_ens2entrez.columns = ['dmelanogaster_ensembl_id',\n 'dmelanogaster_entrez_id']\n \n lookup_with_entrez = pd.merge(lookup, celeg_ens2entrez,\n left_on=\"Gene stable ID\",\n right_on=\"celeg_ensembl_id\",\n how=\"left\")\n \n lookup_with_entrez = pd.merge(lookup_with_entrez, droso_ens2entrez,\n left_on=\"Drosophila melanogaster gene stable ID\",\n right_on=\"dmelanogaster_ensembl_id\",\n how=\"left\")\n \n lookup_with_entrez.to_csv(LOOKUP_FILENAME, header=True, index=False)\n return lookup_with_entrez",
"def join(self, other, on):\n\t\t# check for correct join\n\t\tif not (on in self.headers or on in other.headers):\n\t\t\tprint \"Error: header '{0}' not found in both collections\".format(on)\n\t\t\treturn None\n\n\t\t# create new dataset\n\t\tjoined = Dataset()\n\t\t\n\t\t# fill new dataset with combined data\n\t\tmappedHeaders = joinHeaders(self, other, joined, on)\n\t\tmergeRows(self, other, joined, on, mappedHeaders)\n\t\tjoined.ensureFilled()\n\n\t\t# return newly created dataset\n\t\treturn joined",
"def mergelots(bigdict, tblstojoin, joincol, how='outer'):\n for tbl in tblstojoin:\n if tbl == tblstojoin[0]:\n bigtbl = bigdict[tbl].copy()\n else:\n bigtbl = bigtbl.merge(bigdict[tbl], how=how, on=joincol)\n return bigtbl",
"def merge_embedding_with_GO_labels(emb_df, GO_df):\n # get df with gene_symbols and entrez_ids from fetal data (more updated than adult probes data)\n all_genes = pd.read_csv(\n './data/raw/allen_human_fetal_brain/lmd_matrix_12566/rows_metadata.csv')\n all_genes = all_genes[~((all_genes.gene_symbol.str.startswith('A_')) | (\n all_genes.gene_symbol.str.startswith('CUST_')))].gene_symbol.drop_duplicates()\n all_genes_w_entrez = utils.genesymbols_2_entrezids(all_genes)\n\n emb_df = emb_df.add_prefix('emb_')\n df = emb_df.merge(all_genes_w_entrez, left_index=True,\n right_on='gene_symbol')\n df = df.merge(GO_df, left_on='entrez_id', right_index=True)\n\n return df.set_index(['entrez_id', 'gene_symbol'])",
"def dump_probeset2gene(db):\n\n f = \"_\".join((\"probeset2gene\",db[\"database\"], db[\"host\"], db[\"port\"],\".txt\"))\n if not os.path.exists(f):\n cmd = \"\"\"mysql -h %s -P%s -u ensadmin -pensembl \\\n -e \"select dbprimary_acc, stable_id from xref x, object_xref ox, transcript t, gene_stable_id gsi \\\n where %s and x.xref_id=ox.xref_id and t.transcript_id=ensembl_id \\\n and ox.ensembl_object_type='Transcript' \\\n and gsi.gene_id=t.gene_id group by stable_id, dbprimary_acc \" %s > %s\"\"\" % (db[\"host\"],\n db[\"port\"],\n\tAFFY_XREF_FILTER_CLAUSE,\n db[\"database\"],\n f)\n\n exec_command(cmd)\n return f",
"def get_genes_and_gpr(model,gene_outfile,gpr_outfile):\n model_dict = model_to_dict(model, sort=False)\n genes = pd.DataFrame(model_dict['genes']).set_index(['id'])\n genes.to_csv(gene_outfile)\n all_gpr = pd.DataFrame(model_dict['reactions']).set_index(['id'])\n all_gpr.to_csv(gpr_outfile)\n return [genes, all_gpr]",
"def merge_tables():\r\n\r\n # get sql connection\r\n conn = get_sql_conn()\r\n\r\n # get all info from materials table\r\n query_mat = 'Select * from material_procurement'\r\n df_mat = pd.read_sql_query(query_mat, con=conn)\r\n df_mat = df_mat.drop(['uid'], axis=1)\r\n df_mat = df_mat.pivot(index='ball_milling_uid',\r\n columns='material_name',\r\n values='mass_fraction')\r\n df_mat = df_mat.reset_index()\r\n df_mat = df_mat.add_prefix('MT-')\r\n\r\n # get all info from ball mill table\r\n query_ball = 'Select * from ball_milling'\r\n df_ball = pd.read_sql_query(query_ball, con=conn)\r\n\r\n # added prefix to distinctly identify a column\r\n df_ball = df_ball.add_prefix('BM-')\r\n\r\n # get all info from hot process\r\n query_hot = 'Select * from hot_press'\r\n df_hot = pd.read_sql_query(query_hot, con=conn)\r\n\r\n # added prefix to distinctly identify a column\r\n df_hot = df_hot.add_prefix('HP-')\r\n\r\n # get all info from hall measurements table\r\n query_hall = 'Select * from hall_measurement'\r\n df_hall = pd.read_sql_query(query_hall, con=conn)\r\n\r\n # get all info from icp measurements table\r\n query_icp = 'Select * from icp_measurement'\r\n df_icp = pd.read_sql_query(query_icp, con=conn)\r\n\r\n # Left merge tables in database starting from materials area to lab reports\r\n df_com = df_ball.merge(df_mat, how='left', left_on='BM-uid',\r\n right_on='MT-ball_milling_uid')\r\n df_com = df_com.merge(df_hot, how='left', left_on='BM-hot_press_uid'\r\n , right_on='HP-uid')\r\n df_com = df_com.merge(df_hall.add_prefix('BM-HA-'), how='left',\r\n left_on='BM-output_material_uid',\r\n right_on='BM-HA-material_uid')\r\n df_com = df_com.merge(df_icp.add_prefix('BM-ICP-'), how='left',\r\n left_on='BM-output_material_uid',\r\n right_on='BM-ICP-material_uid')\r\n df_com = df_com.merge(df_hall.add_prefix('HP-HA-'), how='left',\r\n left_on='HP-output_material_uid',\r\n right_on='HP-HA-material_uid')\r\n df_com = df_com.merge(df_icp.add_prefix('HP-ICP-'), how='left',\r\n left_on='HP-output_material_uid',\r\n right_on='HP-ICP-material_uid')\r\n\r\n # close connection\r\n conn.close()\r\n\r\n # return complete db tables\r\n return df_com",
"def link_gene(self, gene: Gene, reference_sequence: Seq = None):\n self.gene = gene\n self.add_strand(gene.location.strand, reference_sequence=reference_sequence)\n gene.link_shine_dalgarno(self)",
"def get_gene_from_mysql(ensemble, gene_table_name, methylation_type, clustering, tsne_type, grouping='cluster', max_points='10000'):\n\n\tcontext = methylation_type[1:]\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telif grouping in ['dataset','sex','brain_region','target_region']:\n\t\tgroupingu = \"datasets.\"+grouping\n\telif grouping in ['broad_brain_region']:\n\t\tgroupingu = \"ABA_regions.ABA_broad_acronym\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\n\tt0=datetime.datetime.now()\n\t# print(' Running get_gene_from_mysql for '+gene_table_name+' : '+str(t0)+'; ', file=open(log_file,'a'))# EAM - Profiling SQL\n\tif tsne_type=='noTSNE':\n\t\tquery = \"SELECT %(gene_table_name)s.%(methylation_type)s, %(gene_table_name)s.%(context)s \\\n\t\t\tFROM %(ensemble)s \\\n\t\t\tLEFT JOIN %(gene_table_name)s ON %(ensemble)s.cell_id = %(gene_table_name)s.cell_id\" % {'ensemble': ensemble,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'gene_table_name': gene_table_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'methylation_type': methylation_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'context': context,}\n\telif 'ndim2' in tsne_type:\n\t\tquery = \"SELECT cells.cell_id, cells.dataset, %(ensemble)s.cluster_%(clustering)s, datasets.target_region, \\\n\t\t\t%(ensemble)s.annotation_%(clustering)s, %(gene_table_name)s.%(methylation_type)s, \\\n\t\t\tcells.global_%(methylation_type)s, %(groupingu)s as grouping, \\\n\t\t\t%(ensemble)s.tsne_x_%(tsne_type)s, %(ensemble)s.tsne_y_%(tsne_type)s, \\\n\t\t\t%(gene_table_name)s.%(context)s, datasets.sex \\\n\t\t\tFROM cells \\\n\t\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\t\tLEFT JOIN %(gene_table_name)s ON %(ensemble)s.cell_id = %(gene_table_name)s.cell_id \\\n\t\t\tLEFT JOIN datasets ON cells.dataset = datasets.dataset \\\n\t\t\tLEFT JOIN ABA_regions ON datasets.brain_region=ABA_regions.ABA_acronym \" % {'ensemble': ensemble, 'groupingu': groupingu,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'gene_table_name': gene_table_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'tsne_type': tsne_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'methylation_type': methylation_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'context': context,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'clustering': clustering,}\n\telse: # 3D tSNE\n\t\tquery = \"SELECT cells.cell_id, cells.cell_name, cells.dataset, cells.global_%(methylation_type)s, \\\n\t\t\t%(ensemble)s.annotation_%(clustering)s, %(ensemble)s.cluster_%(clustering)s, \\\n\t\t\t%(ensemble)s.tsne_x_%(tsne_type)s, %(ensemble)s.tsne_y_%(tsne_type)s, %(ensemble)s.tsne_z_%(tsne_type)s, \\\n\t\t\t%(gene_table_name)s.%(methylation_type)s, %(gene_table_name)s.%(context)s, \\\n\t\t\tdatasets.target_region, datasets.sex \\\n\t\t\tFROM cells \\\n\t\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\t\tLEFT JOIN %(gene_table_name)s ON %(ensemble)s.cell_id = %(gene_table_name)s.cell_id \" % {'ensemble': ensemble,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'gene_table_name': gene_table_name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'tsne_type': tsne_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'methylation_type': methylation_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'context': context,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'clustering': clustering,}\n\tif max_points.isdigit():\n\t\tquery = query+\" ORDER BY RAND() LIMIT %(max_points)s\" % {'max_points': max_points}\n\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_mult_gene_methylation): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\treturn None\n\n\treturn df"
]
| [
"0.6119691",
"0.58555377",
"0.5789409",
"0.572956",
"0.556574",
"0.5557195",
"0.54311585",
"0.5387678",
"0.53696585",
"0.5367406",
"0.5350316",
"0.5341653",
"0.53085417",
"0.5301041",
"0.5301041",
"0.52331257",
"0.5225479",
"0.5223402",
"0.520401",
"0.5197744",
"0.51367563",
"0.5135594",
"0.51329935",
"0.51292205",
"0.5127918",
"0.5111448",
"0.5102298",
"0.50873876",
"0.5038895",
"0.5024259"
]
| 0.69409186 | 0 |
Build a test model for children testing | def __build_test_model_children_tree_1(self) -> Model:
self.model_builder.clear()
r_a = SystemFile("a", 1024, True)
r_aa = SystemFile("aa", 512, False)
r_a.add_child(r_aa)
r_ab = SystemFile("ab", 512, False)
r_a.add_child(r_ab)
r_b = SystemFile("b", 3090, True)
r_ba = SystemFile("ba", 2048, True)
r_b.add_child(r_ba)
r_baa = SystemFile("baa", 2048, False)
r_ba.add_child(r_baa)
r_bb = SystemFile("bb", 42, True) # only in remote
r_b.add_child(r_bb)
r_bba = SystemFile("bba", 42, False) # only in remote
r_bb.add_child(r_bba)
r_bd = SystemFile("bd", 1000, False)
r_b.add_child(r_bd)
r_c = SystemFile("c", 1234, False) # only in remote
r_d = SystemFile("d", 5678, True) # only in remote
r_da = SystemFile("da", 5678, False) # only in remote
r_d.add_child(r_da)
l_a = SystemFile("a", 1024, True)
l_aa = SystemFile("aa", 512, False)
l_a.add_child(l_aa)
l_ab = SystemFile("ab", 512, False)
l_a.add_child(l_ab)
l_b = SystemFile("b", 1611, True)
l_ba = SystemFile("ba", 512, True)
l_b.add_child(l_ba)
l_baa = SystemFile("baa", 512, False)
l_ba.add_child(l_baa)
l_bc = SystemFile("bc", 99, True) # only in local
l_b.add_child(l_bc)
l_bca = SystemFile("bca", 99, False) # only in local
l_bc.add_child(l_bca)
l_bd = SystemFile("bd", 1000, False)
l_b.add_child(l_bd)
s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, "b", "")
s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)
s_b.add_active_file_transfer_state("ba/baa", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))
s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, "c", "")
s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, "d", "")
self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])
self.model_builder.set_local_files([l_a, l_b])
self.model_builder.set_lftp_statuses([s_b, s_c, s_d])
return self.model_builder.build_model() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_build_model(arguments):\n ...",
"def create_model(self):\n self.skipTest(\"Base module should not be tested.\")",
"def _child_build_new_model(self):\n self._build_new_gp()",
"def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None",
"def build_model():",
"def gen_model(children, tree_structure):\n referenced = False\n extended = False\n for child in children:\n #print child.arg\n node = dict()\n extended = False\n if hasattr(child, 'substmts'):\n for attribute in child.substmts:\n # process the 'type' attribute:\n # Currently integer, enumeration and string are supported.\n if attribute.keyword == 'type':\n if len(attribute.arg.split(':'))>1:\n attribute.arg = attribute.arg.split(':')[-1]\n # Firstly, it is checked if the attribute type has been previously define in typedefs.\n if attribute.arg in TYPEDEFS:\n if TYPEDEFS[attribute.arg]['type'][:3] == 'int':\n node['type'] = 'integer'\n node['format'] = TYPEDEFS[attribute.arg]['format']\n elif TYPEDEFS[attribute.arg]['type'] == 'enumeration':\n node['type'] = 'string'\n node['enum'] = [e\n for e in TYPEDEFS[attribute.arg]['enum']]\n # map all other types to string\n else:\n node['type'] = 'string'\n elif attribute.arg[:3] == 'int':\n node['type'] = 'integer'\n node['format'] = attribute.arg\n elif attribute.arg == 'decimal64':\n node['type'] = 'number'\n node['format'] = 'double'\n elif attribute.arg == 'boolean':\n node['type'] = attribute.arg\n elif attribute.arg == 'enumeration':\n node['type'] = 'string'\n node['enum'] = [e[0]\n for e in attribute.i_type_spec.enums]\n # map all other types to string\n else:\n node['type'] = 'string'\n elif attribute.keyword == 'mandatory':\n parent_model = to_upper_camelcase(child.parent.arg)\n if parent_model not in PARENT_MODELS.keys():\n PARENT_MODELS[parent_model] = {'models':[],'discriminator':to_lower_camelcase(child.arg)}\n # Process the reference to another model.\n # We differentiate between single and array references.\n elif attribute.keyword == 'uses':\n if len(attribute.arg.split(':'))>1:\n attribute.arg = attribute.arg.split(':')[-1]\n\n ref = to_upper_camelcase(attribute.arg)\n ref = '#/definitions/' + ref\n if str(child.keyword) == 'list':\n node['items'] = {'$ref': ref}\n node['type'] = 'array'\n for attribute in child.substmts:\n if attribute.keyword == 'key':\n listkey = to_lower_camelcase(attribute.arg)\n if listkey:\n node['x-key'] = listkey\n referenced = True\n elif str(child.keyword) == 'grouping':\n ref = to_upper_camelcase(attribute.arg)\n if ref in tree_structure:\n PARENT_MODELS[ref]['models'].append(child.arg)\n list_properties = [item for item in tree_structure[ref]['properties']]\n ref = '#/definitions/' + ref\n node['allOf'] = []\n node['allOf'].append({'$ref': ref})\n index = 0\n for i in range(0, len(child.i_children)):\n #print len(child.i_children)\n if to_lower_camelcase(child.i_children[index].arg) in list_properties:\n del child.i_children[index]\n else:\n index+=1\n extended = True\n else:\n pending_models.append(child)\n else:\n node['$ref'] = ref\n referenced = True\n\n # When a node contains a referenced model as an attribute the algorithm\n # does not go deeper into the sub-tree of the referenced model.\n if not referenced :\n if not extended:\n node = gen_model_node(child, node)\n else:\n node_ext = dict()\n node_ext = gen_model_node(child, node_ext)\n node['allOf'].append( node_ext)\n extended = False\n\n # Leaf-lists need to create arrays.\n # Copy the 'node' content to 'items' and change the reference\n if child.keyword == 'leaf-list':\n ll_node = {'type': 'array', 'items': node}\n node = ll_node\n # Groupings are class names and upper camelcase.\n # All the others are variables and lower camelcase.\n if child.keyword == 'grouping':\n tree_structure[to_upper_camelcase(child.arg)] = node\n else:\n tree_structure[to_lower_camelcase(child.arg)] = node\n # TODO: do we really need this return value? We are working on the\n # reference anyhow.\n return tree_structure",
"def build_model(self):\n pass",
"def build_model(self):\n pass",
"def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents",
"def make_test_object(self):\n return self.orm_cls.testing_create()",
"def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()",
"def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )",
"def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )",
"def setUp(self):\n\n pages = {}\n\n pages['A'] = Page.objects.create(title='A')\n pages['B'] = Page.objects.create(title='B', parent=pages['A'])\n pages['C'] = Page.objects.create(title='C', parent=pages['B'])\n pages['D'] = Page.objects.create(title='D', parent=pages['C'])\n pages['E'] = Page.objects.create(title='E', parent=pages['C'])\n pages['F'] = Page.objects.create(title='F', parent=pages['B'])\n pages['G'] = Page.objects.create(title='G', parent=pages['B'])\n pages['H'] = Page.objects.create(title='H', parent=pages['G'])\n pages['I'] = Page.objects.create(title='I', parent=pages['A'])\n pages['J'] = Page.objects.create(title='J')\n\n self.pages = pages",
"def test_Tree():",
"def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()",
"def create_child(self, **kw):\n m = self.model_class.create(**kw)\n self.add(m)\n return m",
"def test_create_parent_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def create_models( self ):",
"def buildHierarchy(self, test_input):\n for entry in test_input:\n if entry['manager']not in self.relations:\n self.relations[entry['manager']] = Node(entry['manager'], entry['name'])\n else:\n self.relations[entry['manager']].employees.append(entry['name'])",
"def _build_model(self):\n raise NotImplementedError()",
"def setUp(self):\n self.org_234 = Organization(\n '234',\n display_name='Organization 234',\n full_name='organization/234/',\n data='fake_org_data_234')\n\n self.folder_56 = Folder(\n '56',\n display_name='Folder 56',\n full_name='folder/56',\n data='fake_folder_data456456')\n\n self.proj_1 = Project(\n 'proj-1',\n project_number=11223344,\n display_name='My project 1',\n parent=self.org_234,\n full_name='organization/234/project/proj-1/',\n data='fake_project_data_2341')",
"def build_model(self):\n raise NotImplementedError",
"def setUp(self):\n self.test1 = BaseModel()\n self.test1json = self.test1.to_dict()\n self.newtest1 = BaseModel(self.test1json)",
"def test_father(self):\n review1 = Review()\n self.assertTrue(issubclass(review1.__class__, BaseModel))",
"def test_create_parent(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())",
"def _build_model(self, **kwargs):\n pass",
"def test_build_children_state_default_extra(self):\n l_a = SystemFile(\"a\", 150, True)\n l_aa = SystemFile(\"aa\", 50, True)\n l_a.add_child(l_aa)\n l_aaa = SystemFile(\"aaa\", 50, False)\n l_aa.add_child(l_aaa)\n l_ab = SystemFile(\"ab\", 100, False)\n l_a.add_child(l_ab)\n\n self.model_builder.set_local_files([l_a])\n model = self.model_builder.build_model()\n m_a = model.get_file(\"a\")\n self.assertEqual(ModelFile.State.DEFAULT, m_a.state)\n m_a_ch = {m.name: m for m in model.get_file(\"a\").get_children()}\n m_aa = m_a_ch[\"aa\"]\n self.assertEqual(ModelFile.State.DEFAULT, m_aa.state)\n m_aaa = m_aa.get_children()[0]\n self.assertEqual(ModelFile.State.DEFAULT, m_aaa.state)\n m_ab = m_a_ch[\"ab\"]\n self.assertEqual(ModelFile.State.DEFAULT, m_ab.state)",
"def testMotherChild(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"mother_child\")\n\n self.util.stringPropertyTest(self, attr, \"mother_child\")",
"def test_pod_valid_parent(self):\n session = self.login_to_apic()\n parent = PhysicalModel()\n pod = Pod.get(session, parent)\n children = parent.get_children()\n self.assertEqual(pod, children)"
]
| [
"0.6332726",
"0.61563665",
"0.61315244",
"0.6126765",
"0.61122686",
"0.59221065",
"0.5898088",
"0.5898088",
"0.5894476",
"0.58900094",
"0.5854989",
"0.58097064",
"0.58097064",
"0.5794394",
"0.5790386",
"0.5776492",
"0.5752513",
"0.5751743",
"0.5749239",
"0.5743566",
"0.57105345",
"0.57010037",
"0.569723",
"0.56856006",
"0.563859",
"0.5629025",
"0.559797",
"0.55952424",
"0.55821764",
"0.5574605"
]
| 0.7178186 | 0 |
Mismatching is_dir in a child raises error | def test_build_children_mismatch_is_dir(self):
r_a = SystemFile("a", 0, True)
r_aa = SystemFile("aa", 0, True)
r_a.add_child(r_aa)
l_a = SystemFile("a", 0, True)
l_aa = SystemFile("aa", 0, False)
l_a.add_child(l_aa)
self.model_builder.set_remote_files([r_a])
self.model_builder.set_local_files([l_a])
with self.assertRaises(ModelError) as context:
self.model_builder.build_model()
self.assertTrue(str(context.exception).startswith("Mismatch in is_dir between child")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_dir(self, path):",
"def is_dir(self, path: PathLike):",
"def isdir (self, path):\r\n pass",
"def is_valid_directory(parser, arg):",
"def test_isdir(self, remote_mock_dir):\n\n with HdfsHook() as hook:\n assert hook.isdir(posixpath.join(remote_mock_dir, \"subdir\"))\n assert not hook.isdir(posixpath.join(remote_mock_dir, \"test.txt\"))",
"def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR",
"def ChildOrMatch(self, other):\n return self._dir == other or other.startswith(self._dir + \"/\")",
"def IsADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.EISDIR",
"def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))",
"def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False",
"def test_valid_dir_raises():\n with pytest.raises(ValueError):\n assert cli._valid_dir(__file__)",
"def __is_dir(path):\n if path[-2:] == \"..\":\n return False\n try:\n os.listdir(path)\n return True\n except OSError:\n return False",
"def test_ensure_dir_exists(self):\n pass",
"def test_ls_no_shareddir():\n\n with bad_fixture() as root:\n assert next(pipeline.ls(root=root), None) is None",
"def check_is_dir(path):\n if not os.path.isdir(path):\n raise DirectoryNotFoundError(path)",
"def is_directory(self):\n return self._security_class == \"dir\"",
"def isdir(self):\n return self.path.endswith(self.sep)",
"def is_directory(self):\n return all(isinstance(child, PyModule) for child in self._children())",
"def ParentOrMatch(self, other):\n return self._dir == other or self._dir.startswith(other + \"/\")",
"def is_dir(value):\n if not (type(value) is str and os.path.isdir(value)):\n return False\n else:\n return True",
"def _is_directory(input_data) -> bool:\n # TODO(cezequiel): Implement in phase 2.\n _ = input_data\n return False",
"def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False",
"def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)",
"def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False",
"def ensure_dir(self, *args):\n return self.ensure(*args, **{\"dir\": True})",
"def assert_is_dir_and_exists(self):\n if not self.is_dir():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)",
"def hisdir(file_path: str) -> bool:\n return os.path.isdir(file_path)",
"def is_dir(argstr):\n arg = Path(argstr)\n return arg.exists() and arg.is_dir()",
"def _is_dir(path: str)->bool:\n if _is_s3(path):\n return path.endswith(\"/\")\n else:\n return os.path.isdir(os.path.abspath(path))",
"def _is_child(self, parent, child): # type: (str, str) -> bool\n return child != parent and child.startswith(parent + \".\")"
]
| [
"0.73621666",
"0.70751095",
"0.70362127",
"0.68978703",
"0.66969264",
"0.66785115",
"0.66231143",
"0.6613813",
"0.65570325",
"0.65134746",
"0.6488475",
"0.6424379",
"0.635782",
"0.6347883",
"0.634357",
"0.6342583",
"0.63372755",
"0.63299614",
"0.63158935",
"0.6306682",
"0.6299065",
"0.6272218",
"0.624947",
"0.62392694",
"0.62162405",
"0.6211405",
"0.6160278",
"0.6159876",
"0.6159272",
"0.6158954"
]
| 0.77840406 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.