query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
append name with postfix
def append_name(name, postfix): if name is None: ret = None elif name == '': ret = postfix else: ret = '%s_%s' % (name, postfix) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def add_name(self, node):\n if 'name' in self.options:\n name = nodes.fully_normalize_name(self.options.pop('name'))\n if 'name' in node:\n del(node['name'])\n node['names'].append(name)\n self.state.document.note_explicit_target(node, node)", "def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'", "def append_to_path(path, name):\n if path[-1] == '/' or path[-1] == ':':\n return path + name\n else:\n return str(path) + str('/') + str(name)", "def create_policy_name(self, role_name, postfix):\n return '{}-{}-{}'.format(role_name, 'policy', postfix)", "def _add_name(self, msg, name):\n try:\n names = self.get_local(msg, \"names\")\n except KeyError:\n names = set()\n names.add(name)\n self.set_local(msg, \"names\", names)", "def addAlias(self, name):\r\n self._otherNames.append(name.strip().lower())", "def __add__(self, new_name: Tuple[str, str]) -> None:\n self.formal_names.update({new_name[0]: new_name[1]})", "def get_name(self, old_name):\n if old_name not in self.record:\n self.record[old_name] = [self.PLACEHOLDER]\n suffix = \"\"\n else:\n self.record[old_name].append(self.PLACEHOLDER)\n suffix = f\"{len(self.record[old_name]) - 1}\"\n\n new_name = f\"{old_name}{suffix}\"\n self.topo_order.append(new_name)\n\n return new_name", "def record_name(self, name: str) -> None:\n if self.is_top_level():\n self._toplevel_names.append(name)", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_prefix(self, name, uri):\n\n self.prefixes.append('%s: %s' % (name, uri))", "def new_name(self,new_name):\n self.name = new_name", "def _name (self, incAggr = True):\n\t\taggrName = \"@%s\" % self.aggr if self.aggr and incAggr else \"\"\n\t\ttag = \".%s\" % self.tag if self.tag != \"notag\" else \"\"\n\t\treturn \"%s%s%s\" % (self.id, tag, aggrName)", "def updateName(g):\n try:\n n = int(g.group(2))\n except TypeError:\n n = 0\n\n return \"%s-%d\" % (g.group(1), n + 1)", "def add_name(self, name):\n self.name = name", "def namePush(ctxt, value):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.namePush(ctxt__o, value)\n return ret", "def named_back_reference(name:str) -> str:\n # TODO error handling \n return f\"\\\\k<{name}>\"", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def fullname(self, name):\n f, l = name.split(' ')\n self.first = f\n self.last = l", "def add_nickname(self, nickname):\n if 'Nicknames' not in self.properties:\n self.properties['Nicknames'] = []\n if (len(self.properties['Nicknames']) == 1 and self.properties['Nicknames'][0].startswith('Temp')):\n self.properties['Nicknames'][0] = nickname.title()\n else:\n self.properties['Nicknames'].append(nickname.title())", "def print_name(nome, sobrenome):\r\n return nome + \" \" + sobrenome", "def _generate_node_name(\n self,\n prefix,\n middle,\n suffix,\n ):\n name = ''\n if prefix:\n name += prefix + '-'\n name += middle\n if suffix:\n name += '-' + suffix\n\n return name", "def increment_name(base, existing):\r\n if not base in existing:\r\n return base\r\n n = 1\r\n make_name = lambda: base + str(n)\r\n while make_name() in existing:\r\n n += 1\r\n return make_name()", "def append_service_to_name(self, data, **kwargs):\n\n data['name'] = f'{data.get(\"name\").upper()}_SERVICE'\n return data", "def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest", "def _Name(self, t):\n self.write(t.id)", "def _prefix_name(self, name: str) -> str:\n name_to_check: str = name.split(\".\", 1)[0]\n if self.prefix == \"auto\":\n return f\"-{name}\" if len(name_to_check) <= 1 else f\"--{name}\"\n return f\"{self.prefix}{name}\"" ]
[ "0.6697877", "0.662184", "0.6614897", "0.6473923", "0.6254014", "0.6248267", "0.6226696", "0.6212746", "0.62123245", "0.60669625", "0.6044965", "0.6044965", "0.6044435", "0.60033596", "0.5903342", "0.5898558", "0.588601", "0.5874177", "0.5855147", "0.58351374", "0.5803622", "0.5779463", "0.57719445", "0.57650536", "0.5749083", "0.57302135", "0.57257324", "0.57250845", "0.5724131", "0.5718624" ]
0.8279311
0
Lands the rover, and makes it part of the grid Throws an exception if A rover with that name already existed The rover being landed has a bad direction The rovers coordinates are off the grid A rover already exists on the gird at the rover's coordinates
def land_rover(self, rover): if self.rovers.get(rover.name): raise RoverException(ExceptionMessages.ROVER_ALREADY_LANDED) if not Rover.valid_direction(rover.direction): raise RoverException(ExceptionMessages.BAD_DIRECTION) if not self._is_coordinate_in_the_grid(rover.coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(rover.coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) self.rovers[rover.name] = rover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def make_move(self, playername, coordinates, direction):\n\n pass", "def place_pillar_e(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_e = x, y\r\n if self.pillar_e_room() == self.pillar_a_room() or \\\r\n self.pillar_e_room() == self.pillar_i_room() or \\\r\n self.pillar_e_room() == self.pillar_p_room() or \\\r\n self.pillar_e_room() == self.entrance_room() or \\\r\n self.pillar_e_room() == self.exit_room():\r\n return self.place_pillar_e()\r\n self.__maze[x][y].set_pillar_e(True)", "def place_road(self, road):\n\n # Check if space is empty\n if not self.environment.grid.is_cell_empty(road.pos):\n return False\n\n # Place Road\n self.environment.grid.place_agent(agent=road, pos=road.pos)\n\n # Add road to environment's road list\n self.environment.agents['roads'].append(road)\n\n # Update the list of cells where other things can be built\n self.update_available_cells(road)", "def process_rover(grid, start_at, instructions, name='rover'):\n plateu = None\n try:\n if isinstance(grid, str):\n x_end, y_end = grid.split(' ')\n x_end = int(x_end)\n y_end = int(y_end)\n plateu = Plateu(x_end, y_end, name)\n\n elif isinstance(grid, Plateu):\n plateu = grid\n\n else:\n raise ValueError(\"'grid' must be of type str or Plateu.\")\n\n except Exception as e:\n # Error handling code here for plateu here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n try:\n x, y, f = start_at.split(' ')\n x = int(x)\n y = int(y)\n rover = Rover(x, y, f, plateu, name)\n for i in range(len(instructions)):\n rover.position_rover(instructions[i])\n # Leaving this in comments for later debugging.\n # print(instructions[i] +\n # repr(rover.position_rover(instructions[i])))\n\n except Exception as e:\n # Error handling code here for rover here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n print(rover.get_position())\n return rover", "def path(self):\n\n path_direction = random.randrange(1, 6)\n # print(path_direction)\n\n # if current room is on the edge of the level (column 0 or column 4 and we did not drop to that room\n # because of hitting an edge in the previous assignment, assign the current room to be of type 2 and the\n # new room above it to be of type 3 so that the rooms connect\n if self.current_room_y in (0, 4) and self.edge_row_jump is False:\n self.room_type[self.current_room_x][self.current_room_y] = 3\n self.current_room_x += 1\n # if we are at the bottom of level and attempt to go down again, we will have found our start room. In this\n # we save the parameter and exit the loop\n if self.current_room_x > 4:\n self.room_type[self.current_room_x - 1][self.current_room_y] = 4\n self.start_room['row'] = self.current_room_x - 1\n self.start_room['column'] = self.current_room_y\n return True\n self.room_type[self.current_room_x][self.current_room_y] = 2\n # this is set to true so that we don't continue jumping up the side of the level\n self.edge_row_jump = True\n self.number_of_rooms += 1\n\n # if random number is 1 or 2 we move the path left and give that new room left/right exits\n elif path_direction in (1, 2):\n\n # if we are on the left edge of level then we shouldn't move left any further\n # if cell we are moving to has already been assigned then we should not move there either\n if self.current_room_y > 0 and self.room_type[self.current_room_x][self.current_room_y - 1] is 0:\n # we now have a new direction without jumping rows because of hitting an edge\n self.edge_row_jump = False\n # move current room to the left\n self.current_room_y -= 1\n # assign that room with a left/right exit\n self.room_type[self.current_room_x][self.current_room_y] = 1\n self.number_of_rooms += 1\n\n # if random number is 3 or 4 we move right and give that new room left/right exits\n elif path_direction in (3, 4):\n # check if the room we are moving to has already been assigned or is off the screen\n if self.current_room_y < 4 and self.room_type[self.current_room_x][self.current_room_y + 1] == 0:\n # we now have a new direction without jumping rows because of hitting an edge\n self.edge_row_jump = False\n # move current room to the right\n self.current_room_y += 1\n # assign that room with a left/right exit\n self.room_type[self.current_room_x][self.current_room_y] = 1\n self.number_of_rooms += 1\n\n # if random number is 5 then we are moving down\n elif self.number_of_rooms != 0 and path_direction is 5:\n self.edge_row_jump = False\n self.room_type[self.current_room_x][self.current_room_y] = 3\n # print cell to screen\n self.current_room_x += 1\n # if we are at bottom of level and attempt to go down again, we will have found our start room. In this\n # we save the parameter and exit the loop\n if self.current_room_x > 4:\n self.room_type[self.current_room_x - 1][self.current_room_y] = 4\n self.start_room['row'] = self.current_room_x - 1\n self.start_room['column'] = self.current_room_y\n return True\n self.room_type[self.current_room_x][self.current_room_y] = 2\n self.number_of_rooms += 1\n\n # print array to see if movements are correct\n # for row in self.room_type:\n # print(row)\n return False", "def test_human_cannot_move_through_grid_wall(mock_random):\n mock_random.randint.return_value = 0\n human = Human()\n\n coordinates = [0, 0]\n dimensions = [4, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n assert new_coordinates == [0, 0]", "def place_entrance(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__current_room = x, y # places adventurer in dungeon at start of game\r\n self.__entrance_room = x, y\r\n self.__maze[x][y].set_entrance(True)", "def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def roof(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None, makeroof=True, makeceiling=True):\r\n global wallnum\r\n\r\n roof = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, y+height+self.ceilingthickness / 2, z), 0)\r\n self.walls.append(roof)\r\n roofmodel = Plane(w=length, h=width, name=name+str(wallnum))\r\n mergeshape.add(roofmodel,x,y+height+self.ceilingthickness,z,rx=90.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def __init__(self, _filename):\n # -- open text file containing maze\n self.file = open(_filename, 'r')\n self._grid = []\n # -- initialize line_list and append into list\n line_list = []\n lines = self.file.readlines()\n for line in lines:\n line = line.strip('\\n')\n line_list = [char for char in line]\n self._grid.append(line_list)\n # -- placing the player at the very start\n self._player = Player(1,2)\n self._grid[self._player._x][self._player._y] = POINT_OF_PLAYER\n self._grid[3][-1] = POINT_OF_EXIT\n \n \n\n # --- Rename the check method to can_move_to\n \"\"\" \n :return: return False if the location is a wall, otherwise return True\n :rtype: bool\n \"\"\"", "def create_neighborhood(self):\n if len(self.available_building_cells) == 0:\n return False\n # Pick cell\n shuffle(self.available_building_cells)\n\n neighborhood_origin = self.available_building_cells[0]\n if not self.creates_valid_building(neighborhood_origin):\n # If not a valid placement, remove location from list\n self.available_building_cells.remove(neighborhood_origin)\n # Retry!\n self.create_neighborhood()\n return True # Exit after neighborhood is created\n\n final_cells = [neighborhood_origin]\n self.available_building_cells.remove(neighborhood_origin)\n\n # Place building on origin\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_origin, attractiveness=random()))\n neighborhood_cells = self.environment.grid.get_neighborhood(neighborhood_origin, moore=True, include_center=True)\n\n # Create a random number of residence buildings in this neighborhood\n number_of_residences = randrange(2,6)\n for i in range(number_of_residences):\n while len(neighborhood_cells) > 0:\n shuffle(neighborhood_cells)\n # Only place building if space is empty\n if self.environment.grid.is_cell_empty(neighborhood_cells[0]):\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_cells[0], attractiveness=random()))\n final_cells.append(neighborhood_cells[0])\n try:\n # If this space was available before, remove it from list\n self.available_building_cells.remove(neighborhood_cells[0])\n except:\n pass\n\n continue\n\n # Remove cell from list\n neighborhood_cells.remove(neighborhood_cells[0])\n\n # Fill surrounding space around buildings with roads!\n for building_location in final_cells:\n for surrounding_cell in self.environment.grid.get_neighborhood(building_location, moore=True):\n if self.environment.grid.is_cell_empty(surrounding_cell):\n self.place_road(Road(surrounding_cell))\n\n return True", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def make_swivelknife_move(self):\n offset = self.shape.parentLayer.getToolRadius()\n drag_angle = self.shape.drag_angle\n\n startnorm = offset*Point(1, 0) # TODO make knife direction a config setting\n prvend, prvnorm = Point(), Point()\n first = True\n\n for geo in self.shape.geos.abs_iter():\n if isinstance(geo, LineGeo):\n geo_b = deepcopy(geo)\n if first:\n first = False\n prvend = geo_b.Ps + startnorm\n prvnorm = startnorm\n norm = offset * (geo_b.Pe - geo_b.Ps).unit_vector()\n geo_b.Ps += norm\n geo_b.Pe += norm\n if not prvnorm == norm:\n direction = prvnorm.to3D().cross_product(norm.to3D()).z\n swivel = ArcGeo(Ps=prvend, Pe=geo_b.Ps, r=offset, direction=direction)\n swivel.drag = drag_angle < abs(swivel.ext)\n self.append(swivel)\n self.append(geo_b)\n\n prvend = geo_b.Pe\n prvnorm = norm\n elif isinstance(geo, ArcGeo):\n geo_b = deepcopy(geo)\n if first:\n first = False\n prvend = geo_b.Ps + startnorm\n prvnorm = startnorm\n if geo_b.ext > 0.0:\n norma = offset*Point(cos(geo_b.s_ang+pi/2), sin(geo_b.s_ang+pi/2))\n norme = Point(cos(geo_b.e_ang+pi/2), sin(geo_b.e_ang+pi/2))\n else:\n norma = offset*Point(cos(geo_b.s_ang-pi/2), sin(geo_b.s_ang-pi/2))\n norme = Point(cos(geo_b.e_ang-pi/2), sin(geo_b.e_ang-pi/2))\n geo_b.Ps += norma\n if norme.x > 0:\n geo_b.Pe = Point(geo_b.Pe.x+offset/(sqrt(1+(norme.y/norme.x)**2)),\n geo_b.Pe.y+(offset*norme.y/norme.x)/(sqrt(1+(norme.y/norme.x)**2)))\n elif norme.x == 0:\n geo_b.Pe = Point(geo_b.Pe.x,\n geo_b.Pe.y)\n else:\n geo_b.Pe = Point(geo_b.Pe.x-offset/(sqrt(1+(norme.y/norme.x)**2)),\n geo_b.Pe.y-(offset*norme.y/norme.x)/(sqrt(1+(norme.y/norme.x)**2)))\n if prvnorm != norma:\n direction = prvnorm.to3D().cross_product(norma.to3D()).z\n swivel = ArcGeo(Ps=prvend, Pe=geo_b.Ps, r=offset, direction=direction)\n swivel.drag = drag_angle < abs(swivel.ext)\n self.append(swivel)\n prvend = geo_b.Pe\n prvnorm = offset*norme\n if -pi < geo_b.ext < pi:\n self.append(ArcGeo(Ps=geo_b.Ps, Pe=geo_b.Pe, r=sqrt(geo_b.r**2+offset**2), direction=geo_b.ext))\n else:\n geo_b = ArcGeo(Ps=geo_b.Ps, Pe=geo_b.Pe, r=sqrt(geo_b.r**2+offset**2), direction=-geo_b.ext)\n geo_b.ext = -geo_b.ext\n self.append(geo_b)\n # TODO support different geos, or disable them in the GUI\n # else:\n # self.append(copy(geo))\n if not prvnorm == startnorm:\n direction = prvnorm.to3D().cross_product(startnorm.to3D()).z\n self.append(ArcGeo(Ps=prvend, Pe=prvend-prvnorm+startnorm, r=offset, direction=direction))\n\n self.geos.insert(0, RapidPos(self.geos.abs_el(0).Ps))\n self.geos[0].make_abs_geo()", "async def land(self, msg, distance):\n\t\tself.tile[self.p] += distance\n\t\tif self.tile[self.p] >= 40: #past go\n\t\t\tself.tile[self.p] -= 40\n\t\t\tdoDoubleGo = await self.cog.config.guild(self.ctx.guild).doDoubleGo()\n\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\tif self.tile[self.p] == 0 and doDoubleGo:\n\t\t\t\tadd = goValue * 2\n\t\t\telse:\n\t\t\t\tadd = goValue\n\t\t\tself.bal[self.p] += add\n\t\t\tmsg += (\n\t\t\t\tf'You {\"landed on\" if self.tile[self.p] == 0 else \"passed\"} go, +${add}! '\n\t\t\t\tf'You now have ${self.bal[self.p]}.\\n'\n\t\t\t)\n\t\tmsg += f'You landed at {TILENAME[self.tile[self.p]]}.\\n'\n\t\tif self.ownedby[self.tile[self.p]] == self.p: #player is owner\n\t\t\tmsg += 'You own this property already.\\n'\n\t\telif self.ismortgaged[self.tile[self.p]] == 1: #mortgaged\n\t\t\tmsg += 'It is currently mortgaged. No rent is due.\\n'\n\t\telif self.ownedby[self.tile[self.p]] == -2: #unownable\n\t\t\tif self.tile[self.p] == 0: #go\n\t\t\t\tpass #already handled when moving\n\t\t\telif self.tile[self.p] == 10: #jail\n\t\t\t\tmsg += 'Just visiting!\\n'\n\t\t\telif self.tile[self.p] == 20: #free parking\n\t\t\t\tfreeParkingValue = await self.cog.config.guild(self.ctx.guild).freeParkingValue()\n\t\t\t\tif freeParkingValue is None: #no reward\n\t\t\t\t\tpass\n\t\t\t\telif freeParkingValue == 'tax': #tax reward\n\t\t\t\t\tself.bal[self.p] += self.freeparkingsum\n\t\t\t\t\tmsg += (\n\t\t\t\t\t\tf'You earned ${self.freeparkingsum}. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\t)\n\t\t\t\t\tself.freeparkingsum = 0\n\t\t\t\telse: #hard coded reward\n\t\t\t\t\tself.bal[self.p] += freeParkingValue\n\t\t\t\t\tmsg += f'You earned ${freeParkingValue}. You now have ${self.bal[self.p]}.\\n'\n\t\t\telif self.tile[self.p] == 30: #go to jail\n\t\t\t\tself.injail[self.p] = True\n\t\t\t\tself.tile[self.p] = 10\n\t\t\t\tself.was_doubles = False\n\t\t\t\tmsg += 'You are now in jail!\\n'\n\t\t\telif self.tile[self.p] in (2, 17, 33): #cc\n\t\t\t\tcard = self.ccorder[self.ccn]\n\t\t\t\tmsg += f'Your card reads:\\n{CCNAME[card]}\\n'\n\t\t\t\tif card == 0:\n\t\t\t\t\tself.tile[self.p] = 0\n\t\t\t\t\tdoDoubleGo = await self.cog.config.guild(self.ctx.guild).doDoubleGo()\n\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\tif doDoubleGo:\n\t\t\t\t\t\tself.bal[self.p] += goValue * 2\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 1:\n\t\t\t\t\tself.bal[self.p] += 200\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 2:\n\t\t\t\t\tself.bal[self.p] -= 50\n\t\t\t\t\tself.freeparkingsum += 50\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 3:\n\t\t\t\t\tself.bal[self.p] += 50\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 4:\n\t\t\t\t\tself.goojf[self.p] += 1\n\t\t\t\t\tif self.goojf[self.p] == 1:\n\t\t\t\t\t\tmsg += 'You now have 1 get out of jail free card.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg += f'You now have {self.goojf[self.p]} get out of jail free cards.\\n'\n\t\t\t\telif card == 5:\n\t\t\t\t\tself.tile[self.p] = 10\n\t\t\t\t\tself.injail[self.p] = True\n\t\t\t\t\tself.was_doubles = False\n\t\t\t\telif card == 6:\n\t\t\t\t\tself.bal[self.p] += 50 * (self.numalive - 1)\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tfor i in range(self.num):\n\t\t\t\t\t\tif self.isalive[i] and not i == self.p:\n\t\t\t\t\t\t\tmem = await self.get_member(self.uid[i])\n\t\t\t\t\t\t\tself.bal[i] -= 50\n\t\t\t\t\t\t\tmsg += f'{mem.display_name} now has ${self.bal[i]}.\\n'\n\t\t\t\telif card in (7, 10, 16):\n\t\t\t\t\tself.bal[self.p] += 100\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 8:\n\t\t\t\t\tself.bal[self.p] += 20\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card in (9, 15):\n\t\t\t\t\tself.bal[self.p] += 10\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 11:\n\t\t\t\t\tself.bal[self.p] -= 100\n\t\t\t\t\tself.freeparkingsum += 100\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 12:\n\t\t\t\t\tself.bal[self.p] -= 150\n\t\t\t\t\tself.freeparkingsum += 150\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 13:\n\t\t\t\t\tself.bal[self.p] += 25\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 14:\n\t\t\t\t\tpay = 0\n\t\t\t\t\tfor i in range(40):\n\t\t\t\t\t\tif self.ownedby[i] == self.p:\n\t\t\t\t\t\t\tif self.numhouse[i] == 0 or self.numhouse[i] == -1:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telif self.numhouse[i] == 5:\n\t\t\t\t\t\t\t\tpay += 115\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpay += 40 * self.numhouse[i]\n\t\t\t\t\tself.bal[self.p] -= pay\n\t\t\t\t\tmsg += f'You paid ${pay} in repairs. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\tself.ccn += 1\n\t\t\t\tif self.ccn > 16:\n\t\t\t\t\tshuffle(self.ccorder)\n\t\t\t\t\tself.ccn = 0\n\t\t\telif self.tile[self.p] in (7, 22, 36): #chance\n\t\t\t\tcard = self.chanceorder[self.chancen]\n\t\t\t\tmsg += f'Your card reads:\\n{CHANCENAME[card]}\\n'\n\t\t\t\tif card == 0:\n\t\t\t\t\tself.tile[self.p] = 0\n\t\t\t\t\tdoDoubleGo = await self.cog.config.guild(self.ctx.guild).doDoubleGo()\n\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\tif doDoubleGo:\n\t\t\t\t\t\tself.bal[self.p] += goValue * 2\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 1:\n\t\t\t\t\tif self.tile[self.p] > 24:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tself.tile[self.p] = 24\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 2:\n\t\t\t\t\tif self.tile[self.p] > 11:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tself.tile[self.p] = 11\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 3:\n\t\t\t\t\tif self.tile[self.p] <= 12:\n\t\t\t\t\t\tself.tile[self.p] = 12\n\t\t\t\t\telif 12 < self.tile[self.p] <= 28:\n\t\t\t\t\t\tself.tile[self.p] = 28\n\t\t\t\t\telse:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\t\tself.tile[self.p] = 12\n\t\t\t\t\t#must pay 10x rent if owned\n\t\t\t\t\tif (\n\t\t\t\t\t\tself.ownedby[self.tile[self.p]] != self.p\n\t\t\t\t\t\tand self.ownedby[self.tile[self.p]] >= 0\n\t\t\t\t\t\tand self.ismortgaged[self.tile[self.p]] != 1\n\t\t\t\t\t):\n\t\t\t\t\t\tmemown = await self.get_member(\n\t\t\t\t\t\t\tself.uid[self.ownedby[self.tile[self.p]]]\n\t\t\t\t\t\t)\n\t\t\t\t\t\tself.bal[self.p] -= distance * 10\n\t\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += distance * 10\n\t\t\t\t\t\tmsg += (\n\t\t\t\t\t\t\tf'You paid ${distance * 10} of rent to {memown.display_name}. '\n\t\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 4:\n\t\t\t\t\tif self.tile[self.p] <= 5:\n\t\t\t\t\t\tself.tile[self.p] = 5\n\t\t\t\t\telif self.tile[self.p] <= 15:\n\t\t\t\t\t\tself.tile[self.p] = 15\n\t\t\t\t\telif self.tile[self.p] <= 25:\n\t\t\t\t\t\tself.tile[self.p] = 25\n\t\t\t\t\telif self.tile[self.p] <= 35:\n\t\t\t\t\t\tself.tile[self.p] = 35\n\t\t\t\t\telse:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\t\tself.tile[self.p] = 5\n\t\t\t\t\t#must pay 2x rent if owned\n\t\t\t\t\tif (\n\t\t\t\t\t\tself.ownedby[self.tile[self.p]] != self.p\n\t\t\t\t\t\tand self.ownedby[self.tile[self.p]] >= 0\n\t\t\t\t\t\tand self.ismortgaged[self.tile[self.p]] != 1\n\t\t\t\t\t):\n\t\t\t\t\t\tmemown = await self.get_member(\n\t\t\t\t\t\t\tself.uid[self.ownedby[self.tile[self.p]]]\n\t\t\t\t\t\t)\n\t\t\t\t\t\trrcount = 0\n\t\t\t\t\t\tif self.ownedby[5] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tif self.ownedby[15] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tif self.ownedby[25] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tif self.ownedby[35] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tself.bal[self.p] -= RRPRICE[rrcount] * 2\n\t\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += RRPRICE[rrcount] * 2\n\t\t\t\t\t\tmsg += (\n\t\t\t\t\t\t\tf'You paid ${RRPRICE[rrcount] * 2} of rent to {memown.display_name}. '\n\t\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 5:\n\t\t\t\t\tself.bal[self.p] += 50\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 6:\n\t\t\t\t\tself.goojf[self.p] += 1\n\t\t\t\t\tif self.goojf[self.p] == 1:\n\t\t\t\t\t\tmsg += 'You now have 1 get out of jail free card.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg += f'You now have {self.goojf[self.p]} get out of jail free cards.\\n'\n\t\t\t\telif card == 7:\n\t\t\t\t\tself.tile[self.p] -= 3\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 8:\n\t\t\t\t\tself.tile[self.p] = 10\n\t\t\t\t\tself.injail[self.p] = True\n\t\t\t\t\tself.was_doubles = False\n\t\t\t\telif card == 9:\n\t\t\t\t\tpay = 0\n\t\t\t\t\tfor i in range(40):\n\t\t\t\t\t\tif self.ownedby[i] == self.p:\n\t\t\t\t\t\t\tif self.numhouse[i] == 0 or self.numhouse[i] == -1:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telif self.numhouse[i] == 5:\n\t\t\t\t\t\t\t\tpay += 100\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpay += 25 * self.numhouse[i]\n\t\t\t\t\tself.bal[self.p] -= pay\n\t\t\t\t\tmsg += f'You paid ${pay} in repairs. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 10:\n\t\t\t\t\tself.bal[self.p] -= 15\n\t\t\t\t\tself.freeparkingsum += 15\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 11:\n\t\t\t\t\tif self.tile[self.p] > 5:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tself.tile[self.p] = 5\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 12:\n\t\t\t\t\tself.tile[self.p] = 39\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 13:\n\t\t\t\t\tself.bal[self.p] -= 50 * (self.numalive - 1)\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tfor i in range(self.num):\n\t\t\t\t\t\tif self.isalive[i] and not i == self.p:\n\t\t\t\t\t\t\tmem = await self.get_member(self.uid[i])\n\t\t\t\t\t\t\tself.bal[i] += 50\n\t\t\t\t\t\t\tmsg += f'{mem.display_name} now has ${self.bal[i]}.\\n'\n\t\t\t\telif card == 14:\n\t\t\t\t\tself.bal[self.p] += 150\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 15:\n\t\t\t\t\tself.bal[self.p] += 100\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\tself.chancen += 1\n\t\t\t\tif self.chancen > 15:\n\t\t\t\t\tshuffle(self.chanceorder)\n\t\t\t\t\tself.chancen = 0\n\t\t\telif self.tile[self.p] == 4: #income tax\n\t\t\t\tincomeValue = await self.cog.config.guild(self.ctx.guild).incomeValue()\n\t\t\t\tself.bal[self.p] -= incomeValue\n\t\t\t\tself.freeparkingsum += incomeValue\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You paid ${incomeValue} of Income Tax. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t)\n\t\t\telif self.tile[self.p] == 38: #luxury tax\n\t\t\t\tluxuryValue = await self.cog.config.guild(self.ctx.guild).luxuryValue()\n\t\t\t\tself.bal[self.p] -= luxuryValue\n\t\t\t\tself.freeparkingsum += luxuryValue\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You paid ${luxuryValue} of Luxury Tax. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t)\n\t\telif self.ownedby[self.tile[self.p]] == -1: #unowned and ownable\n\t\t\tif self.bal[self.p] >= PRICEBUY[self.tile[self.p]]: #can afford\n\t\t\t\tmsg += (\n\t\t\t\t\tf'Would you like to buy {TILENAME[self.tile[self.p]]} '\n\t\t\t\t\tf'for ${PRICEBUY[self.tile[self.p]]}? (y/n) You have ${self.bal[self.p]}.'\n\t\t\t\t)\n\t\t\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\t\t\tawait self.ctx.send(msg)\n\t\t\t\tchoice = await self.bot.wait_for(\n\t\t\t\t\t'message',\n\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t\tand m.content.lower() in ('y', 'yes', 'n', 'no')\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\tchoice = choice.content[0].lower()\n\t\t\t\tif choice == 'y': #buy property\n\t\t\t\t\tself.bal[self.p] -= PRICEBUY[self.tile[self.p]]\n\t\t\t\t\tself.ownedby[self.tile[self.p]] = self.p\n\t\t\t\t\tmsg = (\n\t\t\t\t\t\tf'You now own {TILENAME[self.tile[self.p]]}!\\n'\n\t\t\t\t\t\tf'You have ${self.bal[self.p]} remaining.\\n'\n\t\t\t\t\t)\n\t\t\t\telse: #pass on property\n\t\t\t\t\tmsg = ''\n\t\t\t\t\tdoAuction = await self.cog.config.guild(self.ctx.guild).doAuction()\n\t\t\t\t\tif doAuction:\n\t\t\t\t\t\tmsg = await self.auction(msg)\n\t\t\telse: #cannot afford\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You cannot afford to buy {TILENAME[self.tile[self.p]]}, '\n\t\t\t\t\tf'you only have ${self.bal[self.p]} of ${PRICEBUY[self.tile[self.p]]}.\\n'\n\t\t\t\t)\n\t\t\t\tdoAuction = await self.cog.config.guild(self.ctx.guild).doAuction()\n\t\t\t\tif doAuction:\n\t\t\t\t\tmsg = await self.auction(msg)\n\t\telif RENTPRICE[self.tile[self.p]*6] == -1: #pay rr/util rent\n\t\t\tmemown = await self.get_member(self.uid[self.ownedby[self.tile[self.p]]])\n\t\t\tif self.tile[self.p] in (12, 28): #utility\n\t\t\t\tif self.ownedby[12] == self.ownedby[28]: #own both\n\t\t\t\t\tself.bal[self.p] -= distance * 10\n\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += distance * 10\n\t\t\t\t\tmsg += (\n\t\t\t\t\t\tf'You paid ${distance * 10} of rent to {memown.display_name}. '\n\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t)\n\t\t\t\telse: #own only one\n\t\t\t\t\tself.bal[self.p] -= distance * 4\n\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += distance * 4\n\t\t\t\t\tmsg += (\n\t\t\t\t\t\tf'You paid ${distance * 4} of rent to {memown.display_name}. '\n\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t) \n\t\t\telif self.tile[self.p] in (5, 15, 25, 35): #railroad\n\t\t\t\trrcount = 0\n\t\t\t\tif self.ownedby[5] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tif self.ownedby[15] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tif self.ownedby[25] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tif self.ownedby[35] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tself.bal[self.p] -= RRPRICE[rrcount]\n\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += RRPRICE[rrcount]\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You paid ${RRPRICE[rrcount]} of rent to {memown.display_name}. '\n\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t)\n\t\telse: #pay normal rent\n\t\t\tmemown = await self.get_member(self.uid[self.ownedby[self.tile[self.p]]])\n\t\t\tisMonopoly = False\n\t\t\tfor group in PROPGROUPS:\n\t\t\t\tif self.tile[self.p] in group:\n\t\t\t\t\tif all(\n\t\t\t\t\t\t[self.ownedby[self.tile[self.p]] == self.ownedby[prop] for prop in group]\n\t\t\t\t\t):\n\t\t\t\t\t\tisMonopoly = True\n\t\t\t\t\tbreak\n\t\t\tif isMonopoly and self.numhouse[self.tile[self.p]] == 0: #2x rent\n\t\t\t\trent = 2 * RENTPRICE[self.tile[self.p] * 6]\n\t\t\telse: #normal rent\n\t\t\t\trent = RENTPRICE[(self.tile[self.p] * 6) + self.numhouse[self.tile[self.p]]]\n\t\t\tself.bal[self.p] -= rent\n\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += rent\n\t\t\tmsg += (\n\t\t\t\tf'You paid ${rent} of rent to {memown.display_name}. '\n\t\t\t\tf'You now have ${self.bal[self.p]}. '\n\t\t\t\tf'{memown.display_name} now has ${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t)\n\t\tif self.bal[self.p] < 0:\n\t\t\tmsg = await self.debt(msg)\n\t\treturn msg", "def repairWall(self, game_state):\n first_row = [[0, 13], [1, 13],[2, 13],[3, 13],[4, 13],[5, 13],[6, 13],[7, 13],[8, 13],[9, 13],[10, 13],[11, 13],[12, 13],[13, 13],[15, 13],[16, 13],[17, 13],[18, 13],[19, 13],[20, 13],[21, 13],[22, 13],[23, 13],[24, 13],[25, 13],[26, 13],[27, 13]]\n destructor_loc1 = [[12,11], [16,11]]\n second_row = [[13, 12],[15, 12],[12, 12],[16, 12],[11, 12],[17, 12],[1, 12],[2, 12],[3, 12],[4, 12],[5, 12],[6, 12],[7, 12],[8, 12],[9, 12],[10, 12],[18, 12],[19, 12],[20, 12],[21, 12],[22, 12],[23, 12],[24, 12],[25, 12],[26, 12]]\n destructor_loc2 = [[8,11], [20,11]]\n encryptor_loc1 = [[13,11], [15,11]]\n destructor_loc3 = [[4,11], [24,11]]\n encryptor_row1 = [[13,10], [15,10]]\n destructor_row1 = [[12,10], [16,10]]\n encryptor_row2 = [[13,9], [15,9]]\n destructor_row2 = [[12,9], [16,9]]\n encryptor_row3 = [[13,8], [15,8]]\n destructor_row3 = [[12,8], [16,8]]\n\n for location in first_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in second_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_loc1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_loc3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row2:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row3:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)", "def rop():\n return", "def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())", "def new_move(self, grid_x, grid_y, player):\n #duplication /!\\\n if player == self.X:\n self.draw_X(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.X\n\n elif player == self.O:\n self.draw_O(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.O", "def place_pillar_a(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_a = x, y\r\n if self.pillar_a_room() == self.pillar_e_room() or \\\r\n self.pillar_a_room() == self.pillar_i_room() or \\\r\n self.pillar_a_room() == self.pillar_p_room() or \\\r\n self.pillar_a_room() == self.entrance_room() or \\\r\n self.pillar_a_room() == self.exit_room():\r\n return self.place_pillar_a()\r\n self.__maze[x][y].set_pillar_a(True)", "def navigate_rover(self, name, instruction_str):\n\n rover = self.rovers.get(name)\n if not rover:\n raise RoverException(ExceptionMessages.BAD_NAME)\n\n coordinate = copy.deepcopy(rover.coordinate)\n direction = rover.direction\n\n for instruction in instruction_str:\n\n if instruction == 'L' or instruction == 'R':\n direction = self._direction_after_turning(direction, instruction)\n elif instruction == 'M':\n coordinate = self._coordinate_after_moving(direction, coordinate)\n else:\n raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)\n\n # This means we have processed all the instructions without exception\n # assign new direction and coordinates to rover\n rover.direction = direction\n rover.coordinate = coordinate", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def move_repeatedly():\n check = check50.run(run_command)\n check.stdin(\"WEST\").stdout(room_2_description)\n check.stdin(\"EAST\").stdout(room_1_name)\n check.stdin(\"WEST\").stdout(room_2_name)", "def move_marble(self, coordinates, direction):\n pass", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move_buildings(self):" ]
[ "0.62105554", "0.620282", "0.59880394", "0.5978847", "0.5967129", "0.5945851", "0.5943394", "0.59243786", "0.5899155", "0.5898589", "0.5803447", "0.5776576", "0.5759452", "0.5755554", "0.5738934", "0.57336867", "0.5733618", "0.5726307", "0.57162607", "0.5699025", "0.5692675", "0.5677311", "0.56651026", "0.56284827", "0.56274664", "0.56126356", "0.56100196", "0.56078", "0.56078", "0.55932343" ]
0.68636113
0
Tries to navigate and reposition the rover on the gird. Throws an exception if It cannot find that rover on the grid A bad instruction is passed Executing the instruction string will cause a collision with another rover on the gird
def navigate_rover(self, name, instruction_str): rover = self.rovers.get(name) if not rover: raise RoverException(ExceptionMessages.BAD_NAME) coordinate = copy.deepcopy(rover.coordinate) direction = rover.direction for instruction in instruction_str: if instruction == 'L' or instruction == 'R': direction = self._direction_after_turning(direction, instruction) elif instruction == 'M': coordinate = self._coordinate_after_moving(direction, coordinate) else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION) # This means we have processed all the instructions without exception # assign new direction and coordinates to rover rover.direction = direction rover.coordinate = coordinate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_rover(grid, start_at, instructions, name='rover'):\n plateu = None\n try:\n if isinstance(grid, str):\n x_end, y_end = grid.split(' ')\n x_end = int(x_end)\n y_end = int(y_end)\n plateu = Plateu(x_end, y_end, name)\n\n elif isinstance(grid, Plateu):\n plateu = grid\n\n else:\n raise ValueError(\"'grid' must be of type str or Plateu.\")\n\n except Exception as e:\n # Error handling code here for plateu here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n try:\n x, y, f = start_at.split(' ')\n x = int(x)\n y = int(y)\n rover = Rover(x, y, f, plateu, name)\n for i in range(len(instructions)):\n rover.position_rover(instructions[i])\n # Leaving this in comments for later debugging.\n # print(instructions[i] +\n # repr(rover.position_rover(instructions[i])))\n\n except Exception as e:\n # Error handling code here for rover here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n print(rover.get_position())\n return rover", "def test_7_replay_4(self):\n self._execute_replay_nr(4)\n\n self.grid.add_pawn(5, 'H')\n self.grid.add_pawn(3, 'B')\n self.grid.add_pawn(2, 'H')\n self.grid.add_pawn(1, 'B')\n self.grid.add_pawn(1, 'H')\n\n # self.grid.print_grid()\n # print(self.minmaxBot_7.choose_move(self.grid))", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move_right():\n return __maze.move_right()", "def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')", "def nextmove(x,y,xr,yr,count,xsgn=1,ysgn=1,redo=False,redo_fail=False,back=False,\n noback=False,backret=True,wander=True,silent=False):\n\n global BTRACK, GSTRUC, NPIX\n\n endflag = False\n \n\n # This is the very end \n if (x==xr[1] and y==yr[1]):\n endflag = True\n return None,None,None,None,None,False,False,endflag\n \n\n # If back, redo and BACKRET=1 then return to pre-redo position\n #=============================================================\n # This is done separately from the normal algorithm \n if backret and back and redo: \n back = False\n lastcount = BTRACK['count']\n newx = BTRACK['data'][-1]['lastx']\n newy = BTRACK['data'][-1]['lasty']\n lastx = BTRACK['data'][-1]['x']\n lasty = BTRACK['data'][-1]['y']\n par0 = BTRACK['data'][-1]['par'] # parameters from the current position\n\n # p0 is the redo position, p5 is the pre-redo position \n p0,res0 = gfind(lastx,lasty,xr=xr,yr=yr)\n p5,res5 = gfind(newx,newy,xr=xr,yr=yr)\n\n b,dbic = gbetter(res0,res5)\n redo = gredo(newx,newy,lastx,lasty,par0) \n\n # back position better, redo pre-redo position \n if (dbic<0) and redo: \n # Getting the guess\n guesspar,guessx,guessy = gguess(x,y,xr,yr,xsgn,ysgn)\n redo = True\n skip = False\n # back position worse, or can't redo pre-position, skip \n else:\n redo = False\n skip = True\n guesspar,guessx,guessy = None,None,None\n \n return newx,newy,guessx,guessy,guesspar,back,redo,skip,endflag\n\n\n # Redo Failed! Return to original position\n # If we went back and backret=1 then return to pre-redo position\n # if we went forward then don't do anything, should continue forward \n if redo and redo_fail and back: \n # Go back to pre-redo position and skip\n newx = BTRACK[-1]['data']['lastx']\n newy = BTRACK[-1]['data']['lasty']\n skip = True\n return newx,newy,None,None,None,False,False,skip,endflag\n \n\n # Some default values\n skip = False\n newx,newy = None,None\n guesspar,guessx,guessy = None,None,None\n\n \n # Positions\n #\n # ^ Y P2 \n # | P3 P0 P1\n # | P4\n # --------> X\n \n # Get the positions, THIS IS THE PROPER WAY TO DO IT!!!!! \n x1,y1 = gincrement(x,y,xr,yr,xsgn=xsgn,ysgn=ysgn)\n x2,y2 = gincrement(x,y,xr,yr,xsgn=xsgn,ysgn=ysgn,p2=True) \n x3,y3 = gincrement(x,y,xr,yr,xsgn=-xsgn,ysgn=-ysgn)\n x4,y4 = gincrement(x,y,xr,yr,xsgn=xsgn,ysgn=-ysgn,p2=True)\n\n # Have they been visited before?\n # ps are 0 or 1\n p0,res0 = gfind(x,y,xr=xr,yr=yr)\n par0 = res0['par']\n p1,res1 = gfind(x1,y1,xr=xr,yr=yr)\n p2,res2 = gfind(x2,y2,xr=xr,yr=yr)\n p3,res3 = gfind(x3,y3,xr=xr,yr=yr)\n p4,res4 = gfind(x4,y4,xr=xr,yr=yr)\n\n # Comparing the solutions at neighboring positions\n # bs are 0, 1 or -1\n b1,dbic1 = gbetter(res0,res1)\n res1['better'] = b1\n res1['dbic'] = dbic1\n b2,dbic2 = gbetter(res0,res2)\n res2['better'] = b2\n res2['dbic'] = dbic2\n b3,dbic3 = gbetter(res0,res3)\n res3['better'] = b3\n res3['dbic'] = dbic3\n b4,dbic4 = gbetter(res0,res4)\n res4['better'] = b4\n res4['dbic'] = dbic4 \n \n # Do we need to redo?\n red1,red2,red3,red4 = False,False,False,False\n if (p1==1) and (b1==0): \n red1 = True\n if (p2==1) and (b2==0): \n red2 = True\n if (p3==1) and (b3==0): \n red3 = True\n if (p4==1) and (b4==0): \n red4 = True\n\n xx = [x1,x2,x3,x4]\n yy = [y1,y2,y3,y4]\n pp = [p1,p2,p3,p4]\n bb = [b1,b2,b3,b4]\n rr = [red1,red2,red3,red4]\n \n # Printing out the info\n if silent==False:\n if count>0:\n print(' ')\n print('Count = %d' % count)\n print('Last/Current Position = (%d,%d)' %(x,y))\n print('Neighbors (position) visited better redo')\n for i in range(4):\n if xx[i] is not None:\n strx = '%5d' % xx[i]\n else:\n strx = '-----'\n if yy[i] is not None:\n stry = '%5d' % yy[i]\n else:\n stry = '-----' \n print('P%1d (%5s,%5s) %7d %7d %7s' % (i+1,strx,stry,pp[i],bb[i],str(rr[i]))) \n print('')\n\n \n # If P3 or P4 worse than P0 then move back to worst decomp \n # If P3 and P4 better than P0 then move forward,\n # -if both have been visited before then do the worst decomp \n # -if neither has been visited before then move to P1. \n\n \n\n # Starting Normal Algorithm\n # (not redo+back+backred)\n #==========================\n\n # More generic algorithm, checks all 4 positions and possible redos\n newscheme = True\n if noback==False and newscheme:\n endflag = False\n res1['redo'] = False\n if res1['visited']==True:\n res1['redo'] = gredo(res1['x'],res1['y'],x,y,par0)\n res2['redo'] = False\n if res2['visited']==True:\n res2['redo'] = gredo(res2['x'],res2['y'],x,y,par0) \n res3['redo'] = False\n if res3['visited']==True:\n res3['redo'] = gredo(res3['x'],res3['y'],x,y,par0) \n res4['redo'] = False\n if res4['visited']==True:\n res4['redo'] = gredo(res4['x'],res4['y'],x,y,par0)\n res = [res1,res2,res3,res4]\n redos = [res1['redo'],res2['redo'],res3['redo'],res4['redo']]\n dbic = np.array([res1['dbic'],res2['dbic'],res3['dbic'],res4['dbic']])\n toredo, = np.where((np.array(redos)==True) & (dbic<0))\n # Some redos\n if len(toredo)>0:\n # Find the one with the worst solution\n if len(toredo)>1:\n best1 = np.argmin(dbic[toredo])\n best = toredo[best1]\n else:\n best = toredo[0]\n if best>=2:\n back = True\n else:\n back = False\n newres = res[best]\n newx,newy = newres['x'],newres['y']\n guessx,guessy,guesspar = x,y,par0\n redo = True\n # No redos, more foward to P1\n else:\n redo = False\n back = False\n newx,newy = x1,y1\n guessx,guessy,guesspar = x,y,par0\n # if we already visited P1, then skip\n if res1['visited']:\n skip = True\n else:\n skip = False\n \n return newx,newy,guessx,guessy,guesspar,back,redo,skip,endflag\n \n # check if the position is a \"valid\" one\n # check if it was previously visited\n # check if it CAN be redone\n # for all that can be redone, which one has the largest negative dbic (worse solution)\n \n \n\n #============================== \n #---- CHECKING BACKWARDS ----\n #============================== \n if ((p3==1) or (p4==1)) and (noback==False): \n\n # Only P3 visited before\n #=======================\n if (p3==1) and (p4==0): \n # Can this position be redone\n redo = gredo(x3,y3,x,y,par0)\n # P3 worse than P0, moving back\n #------------------------------\n if (b3==0) and redo: \n newx,newy = x3,y3\n back = True # moving backwards\n guessx,guessy,guesspar = x,y,par0\n else: \n back = False\n redo = False\n\n # Only P4 visited before\n #=======================\n elif (p3==0) and (p4==1): \n # Can this position be redone\n redo = gredo(x4,y4,x,y,par0)\n # P4 worse than P0, moving back\n #------------------------------\n if (b4==0) and redo: \n newx,newy = x4,y4\n back = True # moving backwards\n guessx,guessy,guesspar = x,y,par0\n else: \n back = False\n redo = False\n\n # Both visited before\n #====================\n elif (p3==1) and (p4==1): \n redo = False # not redo unless proven otherwise \n # Can these positions be redone\n redo3 = gredo(x3,y3,x,y,par0) \n redo4 = gredo(x4,y4,x,y,par0) \n\n # P3 worse than P0, but P4 better than P0 (or no gauss) (b3==0 and b4!=0)\n #----------------------------------------\n if (b3==0) and (b4!=0): \n # We can redo it, moving back to P3 \n if redo3:\n redo = True\n newx,newy = x3,y3\n # Can't redo, move forward \n else:\n redo = False\n back = False\n\n # P4 worse than P0, but P3 better than P0 (or no gauss) (b3!=0 and b4==0)\n #----------------------------------------\n elif (b3!=0) and (b4==0): \n # We can redo it, moving back to P4 \n if redo4:\n redo = True\n newx,newy = x4,y4\n # Can't redo, move forward \n else: \n redo = False\n back = False\n\n # Both P3 and P4 are worse than P0\n #---------------------------------\n elif (b3==0) and (b4==0): \n # Can redo either one, redo the one with the worse solution\n if redo3 and redo4:\n redo = True\n b34,dbic34 = gbetter(res3,res4)\n # Moving back to P3 (P3 worse than P4) \n if (b34==1): # to P3 \n newx,newy = x3,y3\n # Moving back to P4 (P4 worse than P3)\n if (b34==0): # to P4 \n newx,newy = x4,y4\n # Can't redo P4, go to P3 \n if redo3 and (redo4==False):\n redo = True\n newx,newy = x3,y3 # to P3\n # Can't redo P3, go to P4 \n if (redo3==False) and redo4:\n redo = True\n newx,newy = x4,y4 # to P4\n # Can't do either, move forward \n if (redo3==False) and (redo4==False): \n redo = False \n back = False\n\n # Both are better than P0 or both no Gaussians, move forward\n #-----------------------------------------------------------\n elif (b3!=0) and (b4!=0):\n back = False\n redo = False\n\n # Shouldn't ever happen\n else:\n import pdb; pdb.set_trace()\n \n # One is worse than P0\n #---------------------\n if redo: \n back = True # moving backwards \n guessx,guessy,guesspar = x,y,par0\n\n # Neither visited before, backwards not possible\n # p3==0 and p4==0\n else:\n back = False\n\n\n #==============================\n # ---- CHECKING FORWARD ----\n #==============================\n if ((p3==0) and (p4==0)) or (back==False) or noback: \n\n # This is the very end \n if (x1 is None) or (x==xr[1] and y==yr[1]):\n endflag = True\n return None,None,None,None,None,False,False,False,endflag\n\n back = False # moving forward \n\n # Only P1 has been visited before\n #================================\n if (p1==1) and (p2==0): \n redo = True\n # Can this position be redone\n redo1 = gredo(x1,y1,x,y,par0) \n # Moving to P1 (P1 worse than P0) \n if (b1==0) and redo1: \n newx,newy = x1,y1\n # Can't redo P1, or P1 better than P0, move another step ahead \n else: \n newx,newy = x1,y1\n redo = False\n skip = True # don't fit this one \n\n # Only P2 has been visited before, THIS SHOULD NEVER HAPPEN\n #================================ \n elif (p1==0) and (p2==1): \n print('This should never happen!!')\n import pdb; pdb.set_trace() \n\n # Both have been visited before\n #============================== \n elif (p1==1) and (p2==1):\n # Can this position be redone \n redo1 = gredo(x1,y1,x,y,par0) \n redo2 = gredo(x2,y2,x,y,par0) \n if (redo1==False) and (redo2==False): # no redo \n redo = False\n\n # P1 worse than P0, and P2 better than P0 (or no gauss) (b1==0 and b2!=0)\n #------------------------------------------------------\n if (b1==0) and (b2!=0): \n # Can redo, moving to P1 \n if redo1: \n newx,newy = x1,y1\n redo = True\n # Can't redo, increment to P1 and skip \n else: \n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True\n\n # P2 worse than P0, and P1 better than P0 (or no gauss) (b1==1 and b2==0)\n #------------------------------------------------------\n elif (b1!=0) and (b2==0): \n # Can redo, moving to P2 \n if redo2: \n newx,newy = x2,y2\n redo = True\n # Can't redo, increment to P1 and skip \n else: \n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True\n\n # Both worse than P0\n #-------------------\n elif (b1==0) and (b2==0): # both bad, find worst \n # Can redo either one, move to the one with the worse solution\n if redo1 and redo2:\n redo = True\n b12,dbic12 = gbetter(res1,res2)\n # Moving to P1 (P1 worse than P2) \n if (b12==1): # to P1 \n newx,newy = x1,y1\n # Moving to P2 (P2 worse than P1) \n if (b12==0): # to P2\n newx,newy = x2,y2\n\n # Can't redo P2, go to P1 \n if redo1 and (redo2==False):\n redo = True\n newx,newy = x1,y1 # to P1 \n # Can't redo P1, go to P2 \n if (redo1==False) and redo2:\n redo = True\n newx,newy = x2,y2 # to P2 \n # Can't do either, increment to P1 and skip \n if (redo1==False) and (redo2==False): \n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True \n\n # Both better than P0 or both no Gaussians, increment to P1 and skip\n #-------------------------------------------------------------------\n elif (b1!=0) and (b2!=0):\n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True \n\n # Shouldn't ever happen\n else:\n print('Should not happen 1')\n import pdb; pdb.set_trace()\n \n\n # Neither has been visited before, increment to P1\n #=================================================\n elif (p1==0) and (p2==0): \n # Increment to P1\n newx,newy = x1,y1\n\n # Should never happen\n else:\n print('Should not happen 2')\n import pdb; pdb.set_trace() \n \n\n # No new position determined yet, move forward to P1\n if newx is None or newy is None:\n # Increment to P1\n newx,newy = x1,y1\n\n # Getting guess\n if newx is not None and newy is not None and guesspar is None:\n guesspar,guessx,guessy = gguess(newx,newy,xr,yr,xsgn,ysgn)\n \n try:\n dumx,dumy = newx,newy\n except:\n print('problem')\n import pdb; pdb.set_trace()\n\n \n return newx,newy,guessx,guessy,guesspar,back,redo,skip,endflag", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def move(self, usercmd):\n newPosX = self.robot.posX\n newPosY = self.robot.posY\n logging.info(\"Avant action :: newPosX={} / newPosY={}\".\\\n format(newPosX, newPosY))\n step = 1\n cmd = usercmd[0:1]\n if (len(usercmd) != 1):\n stpStr = usercmd[1:]\n if (stpStr.isdigit()):\n step = int(stpStr)\n else:\n step = 0\n if cmd.startswith(\"E\"):\n newPosX = newPosX + step\n elif cmd.startswith(\"W\"):\n newPosX = newPosX - step\n elif cmd.startswith(\"N\"):\n newPosY = newPosY - step\n elif cmd.startswith(\"S\"):\n newPosY = newPosY + step\n elif (cmd == \"Q\"):\n #quit\n print(\"Quit\")\n return False\n logging.info(\"newPosX={} / newPosY={}\".format(newPosX, newPosY))\n oldCar = \"\"\n newCar = \"\"\n if (self.canMove(cmd, self.robot, newPosX, newPosY)):\n oldCar = self.map[newPosY][newPosX]\n logging.info(\"originalMap[{}] : {}\".format(self.robot.posY, \\\n self.originalMap[self.robot.posY]))\n if (self.originalMap[self.robot.posY][self.robot.posX] == \".\"):\n self.map[self.robot.posY][self.robot.posX] = \".\"\n else:\n self.map[self.robot.posY][self.robot.posX] = \" \"\n self.robot.posX = newPosX\n self.robot.posY = newPosY\n self.map[newPosY][newPosX] = \"X\"\n logging.info(\"self.map[{}]={}\".format(newPosY, self.map[newPosY]))\n newCar = self.map[newPosY][newPosX]\n #print(oldCar, newCar)\n if (oldCar == \"U\" and newCar == \"X\"):\n print(\"Bravo, vous avez gagné !!!!!\")\n #Quit\n return False\n return True", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def move(self, direction):\n command = self.DIRECTIONS[direction][\"command\"]\n mem, out = self.cpu.run_program(inputs=[command])\n status = out.pop()\n if status in (1, 2):\n self.position = Point(\n self.position.x + self.DIRECTIONS[direction][\"mask\"][0],\n self.position.y + self.DIRECTIONS[direction][\"mask\"][1]\n )\n if self.display:\n self.draw_grid()\n sleep(self.delay)\n return status", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def Recharge_Method2(r, c):\n if state.loc[r] != state.pos[c] and state.pos[c] != r:\n if state.pos[c] in rv.LOCATIONS:\n alg.do_task('moveTo', r, state.pos[c])\n else:\n robot = state.pos[c]\n alg.do_command(put, robot, c)\n alg.do_task('moveTo', r, state.pos[c])\n alg.do_command(charge, r, c)", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def move(self, board, player_mark='o'):\n # First things first, let's check if the board is full first before we\n # make a move\n full = 1\n for location in board.keys():\n if board[location] == '-':\n full = 0\n\n if not full:\n # Storm Spirit is a dumb yet aggressive AI, so he does not need to\n # check whether the opponent has created a line.\n\n # Initialize a move variable that determines the location that the\n # AI will mark.\n move = ''\n\n # Let's see if there are any potential lines that we can form,\n # then mark the location that would finish that line.\n print('Searching for potential lines...')\n move = self.find_line_attempt(board, 'x')\n\n if(move == ''):\n print('No potential lines found. Marking random location.')\n # Initialize a boolean variable that tracks whether we have\n # marked a location or not.\n marked = 0\n while not marked:\n location = random.randint(1,9)\n\n # The location will have to be empty\n if(location == 1 and board['topleft'] == '-'):\n marked = 1\n print('Marking topleft location\\n')\n elif(location == 2 and board['topcenter'] == '-'):\n marked = 1\n print('Marking topcenter location\\n')\n elif(location == 3 and board['topright'] == '-'):\n marked = 1\n print('Marking topright location\\n')\n elif(location == 4 and board['middleleft'] == '-'):\n marked = 1\n print('Marking middleleft location\\n')\n elif(location == 5 and board['middlecenter'] == '-'):\n marked = 1\n print('Marking middlecenter location\\n')\n elif(location == 6 and board['middleright'] == '-'):\n marked = 1\n print('Marking middleright location\\n')\n elif(location == 7 and board['bottomleft'] == '-'):\n marked = 1\n print('Marking bottomleft location\\n')\n elif(location == 8 and board['bottomcenter'] == '-'):\n marked = 1\n print('Marking bottomcenter location\\n')\n elif(location == 9 and board['bottomright'] == '-'):\n marked = 1\n print('Marking bottomright location\\n')\n else:\n # There are no more locations to mark, but set marked to\n # true anyway\n print('No empty spaces found! Re-rolling')\n # Mark the location chosen\n if(location == 1):\n board['topleft'] = self.mark\n elif(location == 2):\n board['topcenter'] = self.mark\n elif(location == 3):\n board['topright'] = self.mark\n elif(location == 4):\n board['middleleft'] = self.mark\n elif(location == 5):\n board['middlecenter'] = self.mark\n elif(location == 6):\n board['middleright'] = self.mark\n elif(location == 7):\n board['bottomleft'] = self.mark\n elif(location == 8):\n board['bottomcenter'] = self.mark\n elif(location == 9):\n board['bottomright'] = self.mark\n else:\n # We found a line attempt, let's mark the finishing location\n board[move] = self.mark\n print('Marked location at ' + move)", "def move(x, y, direction, board):\n\n piece_at_xy = starter.get_piece(x, y, board); # Getting necessary pieces\n\n assert piece_at_xy != '*', \"Error in swipe logic\"; # Logical debug case\n valid_direction = (direction == \"left\" or\n direction == \"right\" or\n direction == \"up\" or\n direction == \"down\");\n assert valid_direction, \"Invalid direction passed in\"; # Logical debug case\n\n # The new x and y for the current piece (adjacent's current position) are stored alongside adjacent (fewer ifs + redundant code)\n if direction == \"left\":\n adjacent = (starter.get_piece(x - 1, y, board), x - 1, y);\n elif direction == \"right\":\n adjacent = (starter.get_piece(x + 1, y, board), x + 1, y);\n elif direction == \"up\":\n adjacent = (starter.get_piece(x, y - 1, board), x, y - 1);\n elif direction == \"down\":\n adjacent = (starter.get_piece(x, y + 1, board), x, y + 1);\n\n if adjacent[0] == None: # Edge of the board case (no action taken)\n return False;\n\n elif piece_at_xy != adjacent[0] and adjacent[0] != '*': # Can't combine two numbers case (no action taken)\n return False;\n\n elif adjacent[0] == '*': # Empty spot adjacent case (recursive movement in direction)\n starter.place_piece('*', x, y, board);\n starter.place_piece(piece_at_xy, adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n elif piece_at_xy == adjacent[0]: # Adjacent same numbers case (combine them)\n starter.place_piece('*', x, y, board);\n starter.place_piece(str(int(adjacent[0]) * 2), adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n else:\n # Logical debug case\n assert False, \"No way you should be in here. Error in move logic\";\n\n return False;", "def move(argument, player):\n current_tile = world.tile_exists(player.location_x, player.location_y)\n if argument == \"north\":\n if world.tile_exists(player.location_x, player.location_y-1):\n new_tile = world.tile_exists(player.location_x, player.location_y-1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y-1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"south\":\n if world.tile_exists(player.location_x, player.location_y+1):\n new_tile = world.tile_exists(player.location_x, player.location_y+1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y+1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"east\":\n if world.tile_exists(player.location_x+1, player.location_y):\n new_tile = world.tile_exists(player.location_x + 1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x+1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"west\":\n if world.tile_exists(player.location_x-1, player.location_y):\n new_tile = world.tile_exists(player.location_x-1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x-1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"Movement not recognized. Specify a cardinal direction.\")\n return", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def execute_solution(offset_x, offset_y, moves):\n\n\t# Offsets for approximately where everything is given 1600x900 game window size\n\tbase_x = 46\n\tbase_y = 238\n\tfreecell_x = 314\n\tfreecell_y = 24\n\twidth = 128\n\theight = 30\n\tmodifier_x = 40\n\tmodifier_y = 19\n\n\t# Correct for retina display (change to 1 on conventional monitor)\n\tres_scale = 0.5\n\n\t# First, click the window\n\tpyautogui.mouseDown((offset_x + 100) * res_scale, (offset_y + 100) * res_scale, button=\"left\")\n\ttime.sleep(0.5)\n\tpyautogui.mouseUp()\n\ttime.sleep(1)\n\n\t# Now, replay the moves one by one\n\tfor move in moves:\n\t\t# which stack, how many cards down -> which stack, how many cards down\n\t\tx_pre, y_pre, x_post, y_post = move\n\n\t\t# If it's a regular stack, move to the offset\n\t\tif x_pre < 8:\n\t\t\tx_pre_final = offset_x + base_x + (width * x_pre) + modifier_x\n\t\t\ty_pre_final = offset_y + base_y + (height * y_pre) + modifier_y\n\t\t# Separate offsets for freecell\n\t\telse:\n\t\t\tx_pre_final = offset_x + freecell_x + (width * (x_pre - 8)) + modifier_x\n\t\t\ty_pre_final = offset_y + freecell_y + modifier_y\n\n\t\tif x_post < 8:\n\t\t\tx_post_final = offset_x + base_x + (width * x_post) + modifier_x\n\t\t\ty_post_final = offset_y + base_y + (height * y_post) + modifier_y\n\t\telse:\n\t\t\tx_post_final = offset_x + freecell_x + (width * (x_post - 8)) + modifier_x\n\t\t\ty_post_final = offset_y + freecell_y + modifier_y\n\n\t\tprint(\"Mouse to %d, %d -> drag to %d, %d\" % (x_pre_final, y_pre_final, x_post_final, y_post_final))\n\n\t\t# Move the mouse to the beginning place\n\t\tpyautogui.moveTo(x_pre_final * res_scale, y_pre_final * res_scale, duration = 0.25)\n\n\t\t# Click and drag to the end\n\t\tpyautogui.dragTo(x_post_final * res_scale, y_post_final * res_scale, duration = 0.25, button = \"left\")\n\n\t\t# Wait for a while\n\t\ttime.sleep(0.25)", "def moveToNext(self):\n\t\tif self.G.debug:\n\t\t\ttic=time.clock()\n\t\tself.debugPrint('looks for new spot')\n\t\texceeds=self.m.exceedsAngleLim\t#function\n\t\tinside=self.m.isWithinPlantingBorders\t#function\n\t\tcart=self.m.getCartesian\n\t\tauto=self.m.automatic\n\t\tt=self.m.times\n\t\tcommands=[]\n\t\tif self.autoMoved:\n\t\t\topt=self.pos\n\t\t\tself.autoMoved=False #if this search is unsuccessfull, automove is enabled to next ideal pos.\n\t\telse:\n\t\t\topt=self.getNextOptimal()\n\t\tmoveTo=opt #for so long..\n\t\trTemp=0.1\n\t\tthTemp=0\n\t\tb=0.05 #constant for the spiral\n\t\ta=0.1\n\t\tplant=True #we will plant in this step...\n\t\td2=self.m.plantMinDist**2 #dist^2\n\t\tpossible = False #for so long\n\t\twhile not possible:\n\t\t\ttic=time.clock()\n\t\t\tpossible=True\n\t\t\tobstList=self.G.terrain.GetVisibleObstacles(moveTo, R=self.radius)\n\t\t\ttreeList=self.G.terrain.GetTrees(moveTo, R=self.radius+self.m.plantMinDist)\n\t\t\tobstList+=[tr for tr in treeList if not tr in obstList] #this procedure minimizes R in Getobst\n\t\t\t#[p1, p2]=self.getPHCoord(moveTo)\n\t\t\tphPos=self.getPHCoord(moveTo)\n\t\t\tplantSpots=self.getPlantingCoord(moveTo)\n\t\t\t#[f1,f2]=self.getPlantingCoord(moveTo)\n\t\t\tif self.otherDevice is not None:\n\t\t\t\totherDevPlantCor=self.otherDevice.getPlantingCoord(self.otherDevice.pos)\n\t\t\t\t#check for colissions and similar related to other device\n\t\t\t\tif collide(self, self.otherDevice, o1pos=moveTo): \n\t\t\t\t\tpossible=False\n\t\t\t\telse:\n\t\t\t\t\tfor o in otherDevPlantCor:\n\t\t\t\t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f,o)<d2:#plantingspot of device is closer than allowed to other Device's plantingspot\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\t\t\n\t\t\tif possible:\t#either 1a or angle OK and above check OK\n\t\t\t\tfor obst in obstList:\n\t\t\t\t\t#tic=time.clock()\n\t\t\t\t\tif isinstance(obst, Tree):\n\t\t\t\t\t\t#other demands, more than 1.5 m from plantingspot.\n\t\t\t \t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f, obst.pos)<d2 or collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif isinstance(obst, Hole): #hole can be in beetween plantheads... Plantpos can be in hole.\n\t\t\t\t\t\tif len(self.plantHeads)==1: #bracke\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif collide(self.plantHeads[0], obst, o1pos=phPos[0]) or collide(self.plantHeads[1], obst, o1pos=phPos[1]):\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\tbreak\n\t\t\t\tif possible and self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice):\n\t\t\t\t\tpossible=False\t#angle is too big to the other device\n\t\t\t#at this point, all test for \"possibility\" are performed.\n\t\t\tPlantingDevice.timesProf[0]+=time.clock()-tic\n\t\t\tdthini=pi/50.\n\t\t\tif not possible:\n\t\t\t\t#move in a spiral outwards\n\t\t\t\trTemp=a+b*thTemp\n\t\t\t\tdth=(pi/25.)/(rTemp/2.)\n\t\t\t\tthTemp+=dth\n\t\t\t\tthInit=thTemp #used to avoid infinite loop\n\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\t\t\twhile not inside(moveTo) or (self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice)):\n\t\t\t\t\t#outside borders or too big angle.. make above expression shorter..\n\t\t\t\t\t#self.pSpots.append(self.m.getCartesian([rTemp,thTemp], opt))\n\t\t\t\t\trTemp=a+b*thTemp\n\t\t\t\t\tthTemp+=(pi/25.)/(rTemp/2.)\t\t\t\t\t\n\t\t\t\t\t#if abs(thTemp-thInit)>2*pi: #if radius is too big..\n\t\t\t\t\tif abs(thInit-thTemp)>2*pi:\n\t\t\t\t\t\tplant=False #we will not plant this time.\n\t\t\t\t\t\t#move to make it easier for the other head:\n\t\t\t\t\t\tif self.otherDevice is not None and self.lastPos==self.pos and self.struckLastTime:\t\t\t\t\t\t\n\t\t\t\t\t\t\tthIni=self.posCyl[1]-dthini\n\t\t\t\t\t\t\tthTemp=thIni\n\t\t\t\t\t\t\t\"\"\"if exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tnp=cart([self.posCyl[0],thTemp])\"\"\" #old stuff... should be removed, right?\n\t\t\t\t\t\t\twhile inside(cart([self.posCyl[0],thTemp])) and not exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tthTemp-=dthini #moves in order to make more space\n\t\t\t\t\t\t\tif thTemp==thIni: #it wasnt inside or exceeded\n\t\t\t\t\t\t\t\tcommands.extend(self.releaseDriver()) #releases driver, if he is used\n\t\t\t\t\t\t\t\tif exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\t\t#we are struck! Wait for other device to move.\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((waitevent, self, self.otherDevice.moveEvent))\n\t\t\t\t\t\t\t\telse: #not inside, we have reached the end of the half circle\n\t\t\t\t\t\t\t\t\tself.debugPrint(\"end of pattern reached, passivates %s device\"%self.mountPoint)\n\t\t\t\t\t\t\t\t\tself.noMoreSpots=True\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((passivate, self))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmoveTo=cart([self.posCyl[0],thTemp+dthini])\n\t\t\t\t\t\t\t\ttraveltime=self.setPos(moveTo)\n\t\t\t\t\t\t\t\tself.debugPrint('clears for other head')\n\t\t\t\t\t\t\t\tcommands=self.cmnd(commands, traveltime,auto=auto['clearForOtherHead'])\n\t\t\t\t\t\tif plant:\n\t\t\t\t\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\t\t\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\n\t\t\t\t\t\treturn (commands,plant)\n\t\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\ttravelTime=self.setPos(moveTo)\n\t\tself.debugPrint('traveltime: %f'%travelTime)\n\t\tif plant: #this timeconsumption is only for succesfull...\n\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\t\t\n\t\tcommands=self.cmnd(commands, travelTime,auto=auto['moveToMicro'])\n\t\treturn (commands,plant)", "def move(self, direction):\n\n # Check if there are empty tiles available\n for row in self._grid:\n if row.count(0) != 0:\n self._game_over = False\n break\n else:\n self._game_over = True\n\n # If empty tiles are not available, game over\n if self._game_over == True:\n print \"Sorry Game Over, Board Full\"\n print self.__str__()\n return None\n\n # New tiles won't be needed for illegal moves\n new_tiles_needed = False\n\n for tile in self._initial_tiles[direction]:\n old_tiles = self.traverse_grid(tile, OFFSETS[direction], self._steps[direction])\n tiles = merge(old_tiles)\n if old_tiles != tiles:\n # The old row and the new row are different after the merge\n # New tile will be needed\n new_tiles_needed = True\n self.set_grid(tile, OFFSETS[direction], tiles)\n\n if new_tiles_needed == True:\n self.new_tile()", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path", "def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def move_to_position2(self):" ]
[ "0.6312876", "0.60473263", "0.5894628", "0.5894628", "0.5836864", "0.5819794", "0.581422", "0.5812412", "0.5812412", "0.58097583", "0.58019125", "0.5792342", "0.57617646", "0.5753274", "0.57401913", "0.5738901", "0.5717236", "0.5713577", "0.57098556", "0.57092357", "0.56941414", "0.5677833", "0.5670717", "0.56610096", "0.5658108", "0.56133103", "0.56106454", "0.56064165", "0.5574207", "0.55502754" ]
0.6905907
0
Basically a state machine Given a instruction('R' or 'L') and a direction('N' or 'S' or 'E' or 'W'), returns the new direction Throws an exception in case of bad instruction
def _direction_after_turning(self, direction, instruction): next_left_states = {'N':'W', 'W': 'S', 'S': 'E', 'E': 'N'} next_right_states = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'} if instruction == 'R': return next_right_states[direction] elif instruction == 'L': return next_left_states[direction] else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_one_step(old_state, direction):\n assert direction in ['R', 'L', 'U', 'D']\n\n x, y = old_state\n if direction == 'R':\n return (x+1, y)\n if direction == 'L':\n return (x-1, y)\n if direction == 'U':\n return (x, y+1)\n if direction == 'D':\n return (x, y-1)", "def getDirection(a):\n try:\n if (int(a) == 0): return \"N\"\n elif (int(a) == 1): return \"S\"\n elif (int(a) == 2): return \"E\"\n elif (int(a) == 3): return \"W\"\n else: raise Exception(\"Invalid Action\")\n except Exception as err:\n print(err)\n exit()", "def _get_direction(self, action, direction):\n left = [2,3,1,0]\n right = [3,2,0,1]\n if direction == 0:\n new_direction = action\n elif direction == -1:\n new_direction = left[action]\n elif direction == 1:\n new_direction = right[action]\n else:\n raise Exception(\"getDir received an unspecified case\")\n return new_direction", "def _get_state(self, state, direction):\n row_change = [-1,1,0,0]\n col_change = [0,0,-1,1]\n row_col = seq_to_col_row(state, self.num_cols)\n row_col[0,0] += row_change[direction]\n row_col[0,1] += col_change[direction]\n\n # check for invalid states\n if self.obs_states is not None:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1) or\n np.any(np.sum(abs(self.obs_states - row_col), 1)==0)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n else:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n\n return next_state", "def get_direction(position, next_position):\n x, y = position\n next_x, next_y = next_position\n if x == next_x:\n if y < next_y:\n return constants.Action.Right\n else:\n return constants.Action.Left\n elif y == next_y:\n if x < next_x:\n return constants.Action.Down\n else:\n return constants.Action.Up\n raise constants.InvalidAction(\"We did not receive a valid position transition.\")", "def getDirectionChange(pre, now, next):\r\n return RIGHT", "def navigate_rover(self, name, instruction_str):\n\n rover = self.rovers.get(name)\n if not rover:\n raise RoverException(ExceptionMessages.BAD_NAME)\n\n coordinate = copy.deepcopy(rover.coordinate)\n direction = rover.direction\n\n for instruction in instruction_str:\n\n if instruction == 'L' or instruction == 'R':\n direction = self._direction_after_turning(direction, instruction)\n elif instruction == 'M':\n coordinate = self._coordinate_after_moving(direction, coordinate)\n else:\n raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)\n\n # This means we have processed all the instructions without exception\n # assign new direction and coordinates to rover\n rover.direction = direction\n rover.coordinate = coordinate", "def get_move(self, direction):\n pos = self._state.index(0)\n row = pos // self._size\n col = pos % self._size\n moves = get_moves(self._size, col, row)\n new_state = self._state\n if direction in moves:\n if moves[direction]['is_movable']:\n new_state = move(self._state, pos, moves[direction]['rel_pos'])\n return Node(new_state, heuristic=self._heuristic,\n g_score=self._g_score+self._cost(self._state, new_state))", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def get_action_for_move(\n agent_position: Tuple[int, int],\n agent_direction: Grid4TransitionsEnum,\n next_agent_position: Tuple[int, int],\n next_agent_direction: int,\n rail: GridTransitionMap) -> Optional[RailEnvActions]:\n possible_transitions = rail.get_transitions(*agent_position, agent_direction)\n num_transitions = np.count_nonzero(possible_transitions)\n # Start from the current orientation, and see which transitions are available;\n # organize them as [left, forward, right], relative to the current orientation\n # If only one transition is possible, the forward branch is aligned with it.\n if rail.is_dead_end(agent_position):\n valid_action = RailEnvActions.MOVE_FORWARD\n new_direction = (agent_direction + 2) % 4\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif num_transitions == 1:\n valid_action = RailEnvActions.MOVE_FORWARD\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n else:\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n if new_direction == agent_direction:\n valid_action = RailEnvActions.MOVE_FORWARD\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction + 1) % 4:\n valid_action = RailEnvActions.MOVE_RIGHT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction - 1) % 4:\n valid_action = RailEnvActions.MOVE_LEFT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action", "def move(self, direction):\n # Store the values in the connection dictionary in a list\n self.room_num = self.current_room.connection[direction]\n\n # Check if there is a conditional movement and change the current room\n if len(self.room_num) == 1:\n self.current_room = self.rooms[int(self.room_num[0]) - 1]\n else:\n adventure.check(len(self.room_num))", "def new_location (x, y):\n North, South, West, East = walls(x,y)\n while True:\n direction = input('Direction: ').upper()\n\n if direction == 'N' and North:\n y += 1\n break\n elif direction == 'S' and South:\n y -= 1\n break\n elif direction == 'E' and East:\n x += 1\n break\n elif direction == 'W' and West:\n x -=1\n break\n else:\n print('Not a valid direction!')\n return x, y", "def test_input_stream_state_statewp():\n state_t1 = StateTask1(Direction.EAST, 0, 0)\n state_t2 = StateTask2([1, 10], [0, 0])\n\n instructions = tuple(read_instructions(input_stream()))\n assert state_t1.manhatam_distance == 0\n\n assert instructions[0] == Instruction(Direction.FWD, 10)\n state_t1.apply(instructions[0])\n state_t2.apply(instructions[0])\n assert state_t1.north == 0 and state_t1.east == 10\n assert state_t2.waypoint == [1, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[1] == Instruction(Direction.NORTH, 3)\n state_t1.apply(instructions[1])\n state_t2.apply(instructions[1])\n assert state_t1.north == 3 and state_t1.east == 10\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[2] == Instruction(Direction.FWD, 7)\n state_t1.apply(instructions[2])\n state_t2.apply(instructions[2])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [38, 170]\n\n assert instructions[3] == Instruction(Turn.RIGHT, 90)\n state_t1.apply(instructions[3])\n state_t2.apply(instructions[3])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [38, 170]\n\n assert instructions[4] == Instruction(Direction.FWD, 11)\n state_t1.apply(instructions[4])\n state_t2.apply(instructions[4])\n assert state_t1.north == -8 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [-72, 214]", "def applyAction(state, action):\r\n if action == 'N':\r\n return (state[0] - 1, state[1])\r\n\r\n if action == 'E':\r\n return (state[0], state[1] + 1)\r\n\r\n if action == 'W':\r\n return (state[0], state[1] - 1)\r\n\r\n if action == 'S':\r\n return (state[0] + 1, state[1])", "def travel(direction, x, y):\n x_new = x\n y_new = y\n for i in range(len(direction)):\n test = direction[i].lower()\n if test == 'n':\n y_new += 1\n elif test == 's':\n y_new -= 1\n elif test == 'e':\n x_new += 1\n elif test == 'w':\n x_new -= 1\n return (x_new, y_new)", "def move(self, direction):\n command = self.DIRECTIONS[direction][\"command\"]\n mem, out = self.cpu.run_program(inputs=[command])\n status = out.pop()\n if status in (1, 2):\n self.position = Point(\n self.position.x + self.DIRECTIONS[direction][\"mask\"][0],\n self.position.y + self.DIRECTIONS[direction][\"mask\"][1]\n )\n if self.display:\n self.draw_grid()\n sleep(self.delay)\n return status", "def move(self, action):\n \n currentState = self.state\n\n if action == \"up\":\n newState = (self.state[0] - 1, self.state[1])\n elif action == \"down\":\n newState = (self.state[0] + 1, self.state[1])\n elif action == \"right\":\n newState = (self.state[0], self.state[1] + 1)\n elif action == \"left\":\n newState = (self.state[0], self.state[1] - 1)\n else:\n raise NameError(action, 'This is not a valid action!')\n\n # Need to check if the new state is a legal move\n if (newState[0] >= 0) & (newState[0] <= 1) & (newState[1] >= 0) & (newState[1] <= 2):\n return newState\n else:\n print('This move takes you off the board, you have not moved!')\n return currentState", "def move(self, direction):\n try:\n\n if self.in_thing:\n print(\"You have to get out of the \" + str(*self.in_thing[-1]) +\n \" first\")\n return self\n if direction == 'north':\n if self.finished_places == 12:\n self.finished_places += 1\n return North(self.items, self.finished_places)\n if direction == 'up':\n if self.finished_places == 4:\n self.finished_places += 1\n return Up(self.items, self.finished_places)\n if direction == 'east':\n if self.finished_places == 2:\n self.finished_places += 1\n return East(self.items, self.finished_places)\n except AttributeError:\n self.items = []\n return self.move(direction)\n print(' you didn\\'t listen to my very subtle hints, i know it was hard'\n ' your lost now. if you remember the commands i told you you can'\n ' go back to where you left off and continue, just type \"QUIT\"')\n return Place(self.items, self.finished_places)\n\n # implement\n # return new instance on class", "def test_findDirection_8(self):\n startCoordinate = coordinate.Coordinate(5, 5)\n endCoordinate = coordinate.Coordinate(3, 7)\n expected_result = 8\n actual_result = rules.findDirection(startCoordinate, endCoordinate)\n self.assertEqual(actual_result, expected_result)", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")", "def get_valid_move_actions_(agent_direction: Grid4TransitionsEnum,\n agent_position: Tuple[int, int],\n rail: GridTransitionMap) -> Set[RailEnvNextAction]:\n valid_actions: Set[RailEnvNextAction] = OrderedSet()\n possible_transitions = rail.get_transitions(*agent_position, agent_direction)\n num_transitions = np.count_nonzero(possible_transitions)\n # Start from the current orientation, and see which transitions are available;\n # organize them as [left, forward, right], relative to the current orientation\n # If only one transition is possible, the forward branch is aligned with it.\n if rail.is_dead_end(agent_position):\n action = RailEnvActions.MOVE_FORWARD\n exit_direction = (agent_direction + 2) % 4\n if possible_transitions[exit_direction]:\n new_position = get_new_position(agent_position, exit_direction)\n valid_actions.add(RailEnvNextAction(action, new_position, exit_direction))\n elif num_transitions == 1:\n action = RailEnvActions.MOVE_FORWARD\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n valid_actions.add(RailEnvNextAction(action, new_position, new_direction))\n else:\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n if new_direction == agent_direction:\n action = RailEnvActions.MOVE_FORWARD\n elif new_direction == (agent_direction + 1) % 4:\n action = RailEnvActions.MOVE_RIGHT\n elif new_direction == (agent_direction - 1) % 4:\n action = RailEnvActions.MOVE_LEFT\n else:\n raise Exception(\"Illegal state\")\n\n new_position = get_new_position(agent_position, new_direction)\n valid_actions.add(RailEnvNextAction(action, new_position, new_direction))\n return valid_actions", "def loss_direction(direction, eps):\n return to_python_scalar(eps * (direction @ g.t()) - 0.5 * eps ** 2 * direction @ H @ direction.t())", "def loss_direction(direction, eps):\n return to_python_scalar(eps * (direction @ g.t()) - 0.5 * eps ** 2 * direction @ H @ direction.t())", "def direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"direction\")", "def test_findDirection_bad(self):\n startCoordinate = coordinate.Coordinate(4, 4)\n self.assertRaises(ValueError,\n rules.findDirection,\n startCoordinate,\n startCoordinate)", "def shift(self, direction):\n try:\n if direction == Direction.UP:\n return self.shift_up()\n elif direction == Direction.DOWN:\n return self.shift_down()\n elif direction == Direction.RIGHT:\n return self.shift_right()\n elif direction == Direction.LEFT:\n return self.shift_left()\n else:\n raise IndexError(\"Invalid direction {}\".format(direction))\n except IndexError as e:\n raise IndexError(e)", "def steer(self, command):\n if isinstance(command, int):\n assert command < len(self.commands)\n command = self.commands[command] # change command id into real command\n if command == 'move forward':\n self.control_conn.sendall('upO'.encode())\n elif command == 'turn left':\n self.control_conn.sendall('leftO'.encode())\n elif command == 'turn right':\n self.control_conn.sendall('rightO'.encode())\n elif command == 'stop':\n self.control_conn.send('stopO'.encode())\n else:\n print('pre', self.pre_cmd)\n self.steer(self.pre_cmd)\n command = self.pre_cmd\n self.pre_cmd = command\n return self.commands.index(command)", "def apply_one_command(old_state, command):\n direction = command[0]\n times = int(command[1:])\n for i in range(times):\n old_state = go_one_step(old_state, direction)\n yield old_state", "def man_dir():\n\n print \"\\n\" + \"-\" * 8 + \"Select Direction\" + \"-\" * 8\n print \"1. Up\"\n print \"2. Down\"\n print \"3. Left\"\n print \"4. Right\"\n choice = valid(\"\\nSelect direction: \", 1, 4)\n\n if choice == 1:\n direct = \"U\"\n elif choice == 2:\n direct = \"D\"\n elif choice == 3:\n direct = \"L\"\n elif choice == 4:\n direct = \"R\"\n return direct" ]
[ "0.6593464", "0.62368506", "0.61913526", "0.5976613", "0.57964754", "0.5683937", "0.5672923", "0.5668834", "0.5567708", "0.5457504", "0.5439745", "0.5427397", "0.54177237", "0.53752434", "0.5332933", "0.53058296", "0.53014976", "0.52971196", "0.5290954", "0.5269749", "0.52675956", "0.5265559", "0.5265479", "0.5265479", "0.5238769", "0.5232936", "0.5219115", "0.5213562", "0.5206062", "0.51773226" ]
0.77884203
0
Returns a new coordinate after moving the rover, Based on the direction, it applies a movement of one grid and calculates the new coordinates. Its throws an exception if the new coordinate is off grid the new coordinate results in an collision with another rover
def _coordinate_after_moving(self, direction, coordinate): if direction == 'N': new_coordinate = Coordinate(coordinate.x, coordinate.y + 1) elif direction == 'S': new_coordinate = Coordinate(coordinate.x, coordinate.y - 1) elif direction == 'W': new_coordinate = Coordinate(coordinate.x - 1, coordinate.y) else: new_coordinate = Coordinate(coordinate.x + 1, coordinate.y) if not self._is_coordinate_in_the_grid(new_coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(new_coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) return new_coordinate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def move(self, direction):\n\n # Check if there are empty tiles available\n for row in self._grid:\n if row.count(0) != 0:\n self._game_over = False\n break\n else:\n self._game_over = True\n\n # If empty tiles are not available, game over\n if self._game_over == True:\n print \"Sorry Game Over, Board Full\"\n print self.__str__()\n return None\n\n # New tiles won't be needed for illegal moves\n new_tiles_needed = False\n\n for tile in self._initial_tiles[direction]:\n old_tiles = self.traverse_grid(tile, OFFSETS[direction], self._steps[direction])\n tiles = merge(old_tiles)\n if old_tiles != tiles:\n # The old row and the new row are different after the merge\n # New tile will be needed\n new_tiles_needed = True\n self.set_grid(tile, OFFSETS[direction], tiles)\n\n if new_tiles_needed == True:\n self.new_tile()", "def _do_move(game, main_grid, wallh_grid, wallv_grid,\n wallfills_grid, move, player, requested_players):\n # Extract destination coordinations.\n x = move[\"x\"]\n y = move[\"y\"]\n\n if move[\"type\"] == \"move\":\n for i in range(len(main_grid)):\n if player in main_grid[i]:\n py = i\n px = main_grid[i].index(player)\n main_grid[y][x] = player\n main_grid[py][px] = 0\n\n if move[\"type\"] == \"wall\" and move[\"direction\"] == \"h\":\n wallh_grid[y][x] = 1\n wallh_grid[y][x+1] = 1\n wallfills_grid[y][x] = 1\n if player == 1:\n game.player1_walls -= 1\n elif player == 2:\n game.player2_walls -= 1\n elif player == 3:\n game.player3_walls -= 1\n elif player == 4:\n game.player4_walls -= 1\n\n if move[\"type\"] == \"wall\" and move[\"direction\"] == \"v\":\n wallv_grid[y][x] = 1\n wallv_grid[y+1][x] = 1\n wallfills_grid[y][x] = 1\n if player == 1:\n game.player1_walls -= 1\n elif player == 2:\n game.player2_walls -= 1\n elif player == 3:\n game.player3_walls -= 1\n elif player == 4:\n game.player4_walls -= 1\n\n if requested_players == \"two\":\n if player == 1:\n game.turn = game.player2\n if player == 2:\n game.turn = game.player1\n if requested_players == \"four\":\n if player == 1:\n game.turn = game.player2\n if player == 2:\n game.turn = game.player3\n if player == 3:\n game.turn = game.player4\n if player == 4:\n game.turn = game.player1\n\n last_status = json.dumps({\"status\": \"playing\", \"turn\": game.turn.username})\n\n # Check to see if the game has been won. Apply scores if so.\n if check_win(main_grid, player):\n if requested_players == \"two\":\n players = {1: game.player1, 2: game.player2}\n winner = players[player]\n winner.two_player_wins += 1\n winner.total_score += 1\n winner.save()\n for p in players.values():\n if p == winner:\n continue\n p.two_player_losses += 1\n p.total_score -= 1\n p.save()\n if requested_players == \"four\":\n players = {1: game.player1, 2: game.player2,\n 3: game.player3, 4: game.player4}\n winner = players[player]\n winner.four_player_wins += 1\n winner.total_score += 3\n winner.save()\n for p in players.values():\n if p == winner:\n continue\n p.four_player_losses += 1\n p.total_score -= 1\n p.save()\n last_status = json.dumps({\"winner\": winner.username,\n \"status\": \"%s has won the game!\"\n % winner.username.capitalize()})\n \n # Update database.\n game.last_status = last_status\n game.main_grid = json.dumps(main_grid)\n game.wallh_grid = json.dumps(wallh_grid)\n game.wallv_grid = json.dumps(wallv_grid)\n game.wallfills_grid = json.dumps(wallfills_grid)\n game.save()\n\n return (last_status + \"\\n\" + game.main_grid + \"\\n\" + game.wallh_grid\n + \"\\n\" + game.wallv_grid + \"\\n\" + game.wallfills_grid\n + \"\\n\" + get_walls(game, requested_players))", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()", "def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()", "def move(self, direction):\n if direction in (UP, DOWN):\n num_steps = self._grid_height\n elif direction in (LEFT, RIGHT):\n num_steps = self._grid_width\n moved = False\n temp_list = []\n for start_cell in self._move_dir[direction]:\n # step 1: iterate through each line, write results to temp list\n for step in range(num_steps):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n temp_list.append(self._cells[row][col])\n # step 2: merge temp list\n temp_list_snap = temp_list[:]\n temp_list = merge(temp_list)\n print(temp_list_snap, temp_list)\n if temp_list_snap != temp_list:\n moved = True\n # step 3: store merged temp list back on grid\n idx = 0\n for step in range(num_steps):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n if direction in (UP, DOWN):\n self._cells[row][col] = temp_list[idx]\n idx += 1\n elif direction in (LEFT, RIGHT):\n self._cells[row][col] = temp_list[idx]\n idx += 1\n temp_list = []\n if moved:\n self.new_tile()\n moved = False\n score = sum(map(sum, self._cells))\n print(\"Your score: %s\" % score)\n #return self._cells", "def move(self, direction):\n change_check = False\n for tile in self.dir_dict[direction]:\n if direction == UP or direction == DOWN:\n temp_list = []\n for step in range(self.grid_height):\n temp_list.append(self.grid[tile[0] + step * OFFSETS[direction][0]]\n [tile[1] + step * OFFSETS[direction][1]])\n if not temp_list == merge(temp_list):\n change_check = True\n temp_list = merge(temp_list)\n for step in range(self.grid_height):\n self.grid[tile[0] + step * OFFSETS[direction][0]] \\\n [tile[1] + step * OFFSETS[direction][1]] \\\n = temp_list[step]\n if direction == LEFT or direction == RIGHT:\n temp_list = []\n for step in range(self.grid_width):\n temp_list.append(self.grid[tile[0] + step * OFFSETS[direction][0]]\n [tile[1] + step * OFFSETS[direction][1]])\n if not temp_list == merge(temp_list):\n change_check = True\n temp_list = merge(temp_list)\n for step in range(self.grid_width):\n self.grid[tile[0] + step * OFFSETS[direction][0]] \\\n [tile[1] + step * OFFSETS[direction][1]] \\\n = temp_list[step]\n if change_check == True:\n self.new_tile()", "def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def getMove(self, grid):\n\t\tmove = self.performIterativeDepthSearch(grid)\n\t\tendtime = time.clock()\n\t\t#print (endtime - starttime)\n\t\treturn move", "def move(self, direction):\n\n # Move to the right\n if direction == 'right':\n if self.square_x < (NUMBER_OF_SPRITES -1):\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y][self.square_x + 1] != 'w':\n # Move by one square right on X axis\n self.square_x += 1\n # Calculation of the \"Real\" positioning in pixels\n self.x_pos = self.square_x * SPRITES_SIZE\n\n # Move to the left\n if direction == 'left':\n if self.square_x > 0:\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y][self.square_x - 1] != 'w':\n # Move by one square left on X axis\n self.square_x -= 1\n # Calculation of the \"Real\" positioning in pixels\n self.x_pos = self.square_x * SPRITES_SIZE\n\n # Move up\n if direction == 'up':\n if self.square_y > 0:\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y-1][self.square_x] != 'w':\n # Move by one square up on Y axis\n self.square_y -= 1\n # Calculation of the \"Real\" positioning in pixels\n self.y_pos = self.square_y * SPRITES_SIZE\n\n # Move down\n if direction == 'down':\n if self.square_y < (NUMBER_OF_SPRITES -1):\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y + 1][self.square_x] != 'w':\n # Move by one square down on Y axis\n self.square_y += 1\n # Calculation of the \"Real\" positioning in pixels\n self.y_pos = self.square_y * SPRITES_SIZE", "def apply_move(cell, x, y):\r\n x2 = (co_ords[cell])[0] + x\r\n y2 = (co_ords[cell])[1] + y\r\n return (x2, y2)", "def advance(self,distance = 1):\n colOffset = 0\n rowOffset = 0\n if self.currentOrientation == GridOrientation.left:\n colOffset = -1 * distance\n if self.currentOrientation == GridOrientation.right:\n colOffset = distance\n if self.currentOrientation == GridOrientation.down:\n rowOffset = -1 * distance\n if self.currentOrientation == GridOrientation.up:\n rowOffset = distance\n self.currentCol += colOffset\n self.currentRow += rowOffset\n \n #See if we've expanded the grid\n if self.currentCol > self.maxCol:\n self.maxCol = self.currentCol\n if self.currentCol < self.minCol:\n self.minCol = self.currentCol\n if self.currentRow > self.maxRow:\n self.maxRow = self.currentRow\n if self.currentRow < self.minRow:\n self.minRow = self.currentRow\n \n return self.getCoordinate()", "def new_move(self, grid_x, grid_y, player):\n #duplication /!\\\n if player == self.X:\n self.draw_X(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.X\n\n elif player == self.O:\n self.draw_O(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.O", "def move(self, direction):\n no_change = True\n if direction == UP or direction == DOWN:\n other_direction = self.get_grid_height()\n elif direction == LEFT or direction == RIGHT:\n other_direction = self.get_grid_width()\n for first_index in self._indices[direction]:\n row = first_index[0]\n col = first_index[1]\n line = []\n for _ in range(other_direction):\n line.append(self.get_tile(row, col))\n row += OFFSETS[direction][0]\n col += OFFSETS[direction][1]\n merged_line = merge(line)\n \n if merged_line != line:\n no_change = False\n \n row = first_index[0]\n col = first_index[1]\n for idx in range(other_direction):\n self.set_tile(row, col, merged_line[idx])\n row += OFFSETS[direction][0]\n col += OFFSETS[direction][1]\n if no_change == False: \n self.new_tile()", "def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def move(self, direction):\r\n\r\n # ternary operator setting dimension variable\r\n dim = self.get_grid_width() if (direction == \"LEFT\" or direction ==\r\n \"RIGHT\") else self.get_grid_height()\r\n self.get_row(direction, dim)\r\n self.new_tile()", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def move(self, direction):\n command = self.DIRECTIONS[direction][\"command\"]\n mem, out = self.cpu.run_program(inputs=[command])\n status = out.pop()\n if status in (1, 2):\n self.position = Point(\n self.position.x + self.DIRECTIONS[direction][\"mask\"][0],\n self.position.y + self.DIRECTIONS[direction][\"mask\"][1]\n )\n if self.display:\n self.draw_grid()\n sleep(self.delay)\n return status", "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path", "def move(self, direction):\r\n # we are initializing the required variables\r\n num_steps=0\r\n if direction== UP or direction==DOWN:\r\n num_steps=self._height\r\n if direction==LEFT or direction==RIGHT:\r\n num_steps=self._width\r\n move_in=OFFSETS[direction]\r\n temp_list=[]\r\n moved=False \r\n # merging the list in the particular direction\r\n for start_cell in self._initial_cells[direction]:\r\n for step in range(num_steps):\r\n row = start_cell[0] + step * move_in[0]\r\n col = start_cell[1] + step * move_in[1]\r\n # creating a list of all the columns and rows in that direction \r\n temp_list.append(self._grid[row][col])\r\n # caling the merge function to calculate the resultant list\r\n merged_list=merge(temp_list)\r\n # putting back the resultant list\r\n for step in range(num_steps):\r\n row = start_cell[0] + step * move_in[0]\r\n col = start_cell[1] + step * move_in[1]\r\n self._grid[row][col]=merged_list[step]\r\n # cheking for any changes in the board\r\n if temp_list!=merged_list:\r\n moved=True\r\n temp_list=[]\r\n #adding anew tile\r\n if moved:\r\n self.new_tile()", "def move(self, direction):\n # replace with your code\n\n indices = self.direction_indices[direction]\n for coordinate in indices:\n merged_coordinate_list = self.get_list(direction, coordinate)\n self.change_board(merged_coordinate_list, coordinate, direction)\n print(self.__str__())\n if self.board_is_not_full():\n self.new_tile()", "def move(self, direction):\n # We will add a new tile only if something has moved\n moved = False\n \n # We may extract a row or a column.\n loop_length = self._height + self._width \\\n - len(self._initial_tiles[direction])\n \n # Offsets for grid traversal\n row_off, col_off = OFFSETS[direction]\n \n for row, col in self._initial_tiles[direction]:\n # Computing positions of tiles to extract\n pos_list = [(row + index * row_off, \n col + index * col_off) \n for index in xrange(loop_length)]\n \n # Getting values from the grid and merging\n extracted_list = [self.get_tile(*pos) for pos in pos_list]\n merge_list = merge(extracted_list)\n \n # We modify the grid only if it has changed\n for pos, val_1, val_2 in zip(pos_list, extracted_list, merge_list):\n if val_1 - val_2:\n self.set_tile(*pos, value = val_2)\n moved = True\n \n # Any changes?\n if moved:\n self.new_tile()", "def make_move(self, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n if self.stat_brain.all_valid(tower) == 0 or self.stat_brain.is_valid(height, index, tower):\r\n return height, index\r\n else:\r\n while not self.stat_brain.is_valid(height, index, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n return height, index", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res" ]
[ "0.6662709", "0.6605978", "0.6600228", "0.6573474", "0.6549251", "0.65067387", "0.6477407", "0.6455114", "0.64177567", "0.6393052", "0.6361281", "0.6359388", "0.6349698", "0.63481045", "0.63376284", "0.63300747", "0.63202614", "0.6316886", "0.630494", "0.62923414", "0.62752783", "0.6271348", "0.6218553", "0.6210817", "0.62067926", "0.6187448", "0.61868685", "0.61863774", "0.61756545", "0.6156275" ]
0.7444775
0
Return any binary tree that matches the given preorder and postorder traversals. Values in the traversals pre and post are distinct positive integers.
def constructFromPrePost(self, pre, post): if not pre and not post: return None root = TreeNode(pre[0]) if len(pre) == 1 and len(post) == 1: return root if pre[1] == post[-2]: lpre, lpost = pre[1:], post[:len(post)-1] ltree = self.constructFromPrePost(lpre, lpost) root.left = ltree else: lpre = pre[1:pre.index(post[-2])] lpost = post[:post.index(pre[1]) + 1] rpre = pre[pre.index(post[-2]):] rpost = post[post.index(pre[1])+1:-1] ltree = self.constructFromPrePost(lpre, lpost) rtree = self.constructFromPrePost(rpre, rpost) root.left, root.right = ltree, rtree return root
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildTree(self, inorder: 'List[int]', postorder: 'List[int]') -> 'TreeNode':\n self.post_index = len(postorder) - 1\n dict = {}\n for i, num in enumerate(inorder):\n dict[num] = i\n \n def helper(in_left, in_right):\n if in_left > in_right:\n return None\n \n root_val = postorder[self.post_index]\n root = TreeNode(root_val)\n self.post_index -= 1\n if in_left < in_right:\n root.right = helper(dict[root_val] + 1, in_right)\n root.left = helper(in_left, dict[root_val] - 1)\n return root\n \n return helper(0, len(inorder) - 1)", "def build_tree(preorder, inorder):\n\n # Base case\n if preorder == [] or inorder == []:\n return\n\n root = preorder[0]\n\n # Breaks the lists by root, left side, and right side\n in_index = inorder.index(root)\n in_left = inorder[:in_index]\n in_right = inorder[in_index + 1:]\n pre_left = preorder[1 : len(in_left) + 1]\n pre_right = preorder[len(in_left) + 1 :]\n\n # Recursively creates smaller binary trees to make a big binary tree\n tree = BinaryTree(root)\n tree.set_left(build_tree(pre_left, in_left))\n tree.set_right(build_tree(pre_right, in_right))\n\n return tree", "def construct_binary_tree(preorder, inorder):\n if len(preorder) == 0 or preorder == None or inorder == None:\n return None\n\n val = preorder[0]\n root = BinaryTreeNode(val)\n\n if len(preorder) > 1:\n inorder_root_index = inorder.index(val)\n inorder_left_sub_tree = inorder[:inorder_root_index]\n inorder_right_sub_tree = inorder[inorder_root_index+1:]\n preorder_left_sub_tree = preorder[1:len(inorder_left_sub_tree)+1]\n preorder_right_sub_tree = preorder[len(inorder_left_sub_tree) + 1:]\n root.left = construct_binary_tree(preorder_left_sub_tree, inorder_left_sub_tree)\n root.right = construct_binary_tree(preorder_right_sub_tree, inorder_right_sub_tree)\n return root", "def test_bin_tree():\n n1 = BinTreeNode(1)\n n2 = BinTreeNode(2)\n n3 = BinTreeNode(3)\n n4 = BinTreeNode(4)\n n5 = BinTreeNode(5)\n n1.left = n2\n n1.right = n3\n n2.left = n4\n n3.right = n5\n t = BinTree(n1)\n print('pre order')\n preorder_trav(t.root)\n print('in order')\n inorder_trav(t.root)\n print('post order')\n postorder_trav(t.root)", "def Trees_preOrder_traversal():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:https://www.hackerrank.com/challenges/tree-preorder-traversal/problem\n def preOrder(root):\n # inorder: left root right\n # preorder: root, left, right 5,3,1,4,7,6,8\n # postorder: left,right, root\n # 5\n # 3 7\n # 1 4 6 8\n out = []\n to_proccess = [root]\n while to_proccess:\n node = to_proccess.pop()\n out.append(node.info)\n for child in [node.right, node.left]:\n if child:\n to_proccess.append(child)\n print(\" \".join(map(str, out)))\n\n def preOrder_recursive(root):\n def _preOrder(node):\n out = []\n out.append(node.info)\n for child in [node.left, node.right]:\n if child:\n out.extend(_preOrder(child))\n return out\n print(\" \".join(map(str, _preOrder(root))))", "def postorderTraversal(self, root: TreeNode) -> List[int]:\n stack = []\n postorder = []\n stack.append(root)\n\n while stack:\n node = stack.pop()\n if not node: continue\n right = node.right\n left = node.left\n if not right and not left:\n postorder.append(node.val)\n else:\n node.left = None\n node.right = None\n stack.append(node)\n stack.append(right)\n stack.append(left)\n return postorder", "def postorder_traversal(tree):\n post = '' # Handles the spaces between the postorder traversal\n # in the string\n\n # To make sure the function doesn't move on if it doesn't have\n # a left child, so it doesn't add to string if it is None\n if tree.get_left() != None:\n post += postorder_traversal(tree.get_left()) + ' '\n\n # To make sure the function doesn't move on if it doesn't have\n # a right child, so it doesn't add to string if it is None\n if tree.get_right() != None:\n post += postorder_traversal(tree.get_right()) + ' '\n\n # Prints the current value (this is all recursed in postorder)\n post += str(tree.get_val())\n\n return post", "def build_tree_from_preorder(values): \r\n \r\n if len(values) == 0 or values[0] == None:\r\n return None\r\n root = TreeNode(values[0])\r\n if len(values) == 1:\r\n return root\r\n root.left = build_tree_from_preorder(values[1:((len(values)-1) // 2 + 1)])\r\n root.right = build_tree_from_preorder(values[((len(values)-1) // 2 + 1):]) \r\n if root.left != None:\r\n root.left.parent = root\r\n if root.right != None:\r\n root.right.parent = root\r\n \r\n return root", "def compute_pre_post_order_values(\n tree,\n parent_id=None,\n parent_preorder=1,\n):\n # if we don't get a parent_id, we infer it to be the top level node\n parent_id = parent_id or next(iter(tree))\n\n pre_post_parent = {\n parent_id: {\n \"preorder\": parent_preorder,\n }\n }\n\n child_postorder = None\n # sorted to make result deterministic\n children_ids = sorted(tree[parent_id].keys())\n for child_id in children_ids:\n # if child_postorder is set we know this is not the first child and can set preorder relative to previous child\n child_preorder = child_postorder + 1 if child_postorder else parent_preorder + 1\n\n pre_post_child = compute_pre_post_order_values(\n tree[parent_id],\n child_id, \n child_preorder)\n pre_post_parent.update(pre_post_child)\n child_postorder = pre_post_child[child_id][\"postorder\"]\n # if children, parent post order is one more than last child post order; if leafnode, then postorder is one more than preorder\n pre_post_parent[parent_id][\"postorder\"] = pre_post_child[child_id][\"postorder\"] + 1 if children_ids else parent_preorder + 1\n\n return pre_post_parent", "def postorder_iteratively(tree):\n visited = []\n path = [tree]\n result = []\n while len(path) > 0:\n t = path[-1]\n if t not in visited:\n if t.right:\n path = path + [t.right]\n if t.left:\n path = path + [t.left]\n visited.append(t)\n else:\n path.pop()\n result.append(t.data)\n\n return result", "def postorder(root):\n if not root:\n print(\"Tree is Empty.\")\n return\n stack = []\n node = root\n visited = set()\n while stack or node:\n if node:\n stack.append(node)\n node = node.left\n\n else:\n node = stack.pop()\n if node.right and not (node.right in visited):\n stack.append(node)\n node = node.right\n else:\n visited.add(node)\n print(node.data, end=\" \")\n node = None\n print()", "def postorder(root):\n if not root:\n return\n inorder(root.left)\n inorder(root.right)\n print(root.data, end=' ')", "def post_order_nodes(root):\n if root.get_left():\n for node in post_order_nodes(root.get_left()):\n yield node\n\n if root.get_right():\n for node in post_order_nodes(root.get_right()):\n yield node\n\n yield root", "def reconstruct_preorder(preorder):\n\tdef reconstruct_preorder_helper(preorder_iter):\n\t\tsubtree_key = next(preorder_iter, None)\n\t\tif subtree_key is None:\n\t\t\treturn None\n\t\treturn BinaryTreeNode(\n\t\t\tsubtree_key,\n\t\t\treconstruct_preorder_helper(preorder_iter),\n\t\t\treconstruct_preorder_helper(preorder_iter))\n\treturn reconstruct_preorder_helper(iter(preorder))", "def postorder_iterative(root):\n if root is None:\n return\n \n stack1 = []\n stack2 = []\n stack1.append(root)\n \n while len(stack1):\n node = stack1.pop()\n stack2.append(node)\n \n if node.left:\n stack1.append(node.left)\n if node.right:\n stack1.append(node.right)\n \n while len(stack2):\n node = stack2.pop()\n print(node.data, end=\" \")", "def post_order(self):\n def walk(root):\n\n if root.left:\n walk(root.left)\n\n if root.right:\n walk(root.right)\n\n self.post_values.append(root.value)\n \n walk(self.root)\n return self.post_values", "def postorder(root: Node):\n return postorder(root.left) + postorder(root.right) + [root.data] if root else []", "def test_post_order_0_2(bst_all_to_left):\n assert tuple(bst_all_to_left.post_order()) == (1, 3, 2, 5, 4)", "def pre_order(self):\n def walk(root):\n self.pre_values.append(root.value)\n\n if root.left:\n walk(root.left)\n \n if root.right:\n walk(root.right)\n \n walk(self.root)\n return self.pre_values", "def postorder(self):\n\n traversal = []\n self.postorder_helper(self.root, traversal)\n return traversal", "def test_post_order_traversal(our_bsts):\n bpost = []\n for i in our_bsts[0].post_order():\n bpost.append(i)\n assert bpost == our_bsts[6]", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def create_BinaryTree(inor, preor, inStart, inEnd):\n if inStart > inEnd:\n return\n temp = BinaryTreeNode(preor[create_BinaryTree.index])\n create_BinaryTree.index += 1\n\n if inStart == inEnd:\n return temp\n\n for i in range(inStart, inEnd + 1):\n if inor[i] == temp.data:\n index = i\n\n temp.left = create_BinaryTree(inor, preor, inStart, index - 1)\n temp.right = create_BinaryTree(inor, preor, index + 1, inEnd)\n return temp", "def preorder(root: Node):\n return [root.data] + preorder(root.left) + preorder(root.right) if root else []", "def branches(tree):\n return tree[1:]", "def generate_tree_postorder(node_lst, root_index):", "def post_order(self):\n try:\n if not self.root:\n return \"the tree is empty!\"\n else:\n output = []\n\n def order_tree(node):\n if node.left:\n order_tree(node.left)\n if node.right:\n order_tree(node.right)\n nonlocal output\n output += [node.value]\n return output\n final_out = order_tree(self.root)\n return final_out\n except:\n print(\"something went wrong please try again\")", "def branches(tree):\n\n return tree[1:]", "def preorder(node, pred, succ, res):\n if node is None:\n return res\n res = min(res, node.val - pred)\n res = min(res, succ - node.val)\n res = preorder(node.left, pred, node.val, res)\n res = preorder(node.right, node.val, succ, res)\n return res", "def preorder(root):\n if not root:\n return\n print(root.data, end=' ')\n inorder(root.left)\n inorder(root.right)" ]
[ "0.6668872", "0.6449099", "0.6396742", "0.6088152", "0.60584265", "0.5841729", "0.57979167", "0.5787835", "0.5776373", "0.5711486", "0.55507797", "0.5525655", "0.5513274", "0.5498472", "0.54892987", "0.5478768", "0.54738367", "0.5418973", "0.53901404", "0.53882676", "0.53662634", "0.5342413", "0.53129363", "0.5307645", "0.53002566", "0.52778006", "0.5276526", "0.52742505", "0.5267223", "0.52619344" ]
0.66620654
1
Create and return an instance of the Isort plugin.
def setup_isort_tool_plugin(custom_rsc_path=None): arg_parser = argparse.ArgumentParser() if custom_rsc_path is not None: resources = Resources([custom_rsc_path]) else: resources = Resources( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) config = Config(resources.get_file("config.yaml")) plugin_context = PluginContext(arg_parser.parse_args([]), resources, config) plugin_context.args.output_directory = os.path.dirname(__file__) itp = IsortToolPlugin() itp.set_plugin_context(plugin_context) return itp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sorter(Plugin):\n return Plugin.order", "def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)", "def new(self, sort, properties=None):\n if sort is None:\n sort = UNKNOWNSORT\n # find next available vid\n vid, index = self.vid, self.index\n while vid in index:\n vid += 1\n varstring = '{}{}'.format(sort, vid)\n index[vid] = varstring\n if properties is None:\n properties = []\n self.store[varstring] = properties\n self.vid = vid + 1\n return (varstring, properties)", "def create_r2plugin(self, **kwargs):\n return self.create_tool(cls=R2Plugin, **kwargs)", "def classFactory(iface):\n from .plugin_builder import PluginBuilder\n return PluginBuilder(iface)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .iso4app import MainPlugin\n return MainPlugin(iface)", "def getInstance(config):\n return Plugin(config)", "def getInstance(config):\n return Plugin(config)", "def __init__(\n self,\n plugin_id,\n plugin_name,\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n ):\n (\n self.__plugin_id,\n self.__plugin_names,\n self.__plugin_description,\n self.__plugin_instance,\n self.__plugin_enabled_by_default,\n self.__plugin_version,\n self.__plugin_interface_version,\n self.__plugin_file_name,\n self.__plugin_url,\n self.__plugin_configuration,\n ) = (\n plugin_id.strip().lower(),\n [],\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n )\n for next_name in plugin_name.lower().split(\",\"):\n next_name = next_name.strip()\n if next_name:\n self.__plugin_names.append(next_name)", "def __new__(mcls, name, bases, namespace): # @NoSelf - 'mcls' is SortinfoMeta, 'cls' is the new class\n # Check that the namespace is compliant\n if '__slots__' not in namespace:\n raise PydmrsError('Subclasses of Sortinfo must define __slots__')\n if 'features' in namespace:\n raise PydmrsError(\"Subclasses of Sortinfo must not define a 'features' attribute\")\n \n # Force all feature names to be lowercase\n namespace['__slots__'] = tuple(feat.lower() for feat in namespace['__slots__'])\n \n # Create the class, and add the 'features' attribute\n cls = super().__new__(mcls, name, bases, namespace)\n cls.features = tuple(chain.from_iterable(getattr(parent, '__slots__', ())\n for parent in reversed(cls.__mro__)))\n \n # Sortinfo defines a from_normalised_dict method which calls either EventSortinfo or InstanceSortinfo\n # Subclasses need to override this method\n if 'from_normalised_dict' not in namespace:\n cls.from_normalised_dict = cls._from_normalised_dict\n \n return cls", "def test_isort_tool_plugin_found():\n if sys.version_info.major == 3 and sys.version_info.minor < 6:\n pytest.skip(\"isort is only available for Python 3.6+, unable to test\")\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"isort\"\n assert any(\n plugin_info.plugin_object.get_name() == \"isort\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named Isort Tool Plugin\n assert any(\n plugin_info.name == \"Isort Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def new(self, plugin, *args, **kwargs):\n if plugin in self.modules.keys():\n return self.modules[plugin](*args, **kwargs)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .CCD_Plugin import CCD_Plugin\n return CCD_Plugin(iface)", "def sortby(self):\n ...", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .eco_valuator import EcoValuatorPlugin\n return EcoValuatorPlugin()", "def _make_sorter(self, ax):\n np_array = ax.get_values()\n # return np_array.argsort()\n # ax = ax.take(indexer)\n sorter = RocRadixSortDriver()\n sorted_array, indices = sorter.sort_with_indices(np_array)\n return sorted_array, indices", "def uctt_plugin_factory_cli_info(\n environment: Environment, instance_id: str = ''):\n return InfoCliPlugin(environment, instance_id)", "def __init__(self, new_sorts, supersorts):\n global crt_sorts\n crt_sorts = new_sorts\n \n super(SortDecl, self).__init__()\n self.new_sorts = new_sorts\n self.supersorts = supersorts", "def __init__(self, ItemComparer):\n self.item_comparer = ItemComparer", "def __init__(self,\r\n n,\r\n sort,\r\n algo,\r\n comps,\r\n exs,\r\n predata,\r\n postdata,\r\n comp_eq,\r\n ex_eq,\r\n time):\r\n self.n = n\r\n self.sort = sort\r\n self.algo = algo\r\n self.comps = comps\r\n self.exs = exs\r\n self.predata = predata\r\n self.postdata = postdata\r\n self.comp_eq = comp_eq\r\n self.ex_eq = ex_eq\r\n self.time = time", "def get_plugin_interface(self):", "def __init__(self, data, draw, speed):\n self.heap_sort(data, draw, speed)", "def create_cutter_plugin():\n return CutterCovPlugin()", "def plugin_instance(self):\n return self.__plugin_instance", "def sort(self, *args, **kwargs) -> \"Actions\":\n self.actions.sort(*args, **kwargs)\n return self", "def uctt_plugin_factory_cli_environment(\n environment: Environment, instance_id: str = ''):\n return EnvironmentCliPlugin(environment, instance_id)", "def _init_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass", "def uctt_plugin_factory_cli_config(\n environment: Environment, instance_id: str = ''):\n return ConfigCliPlugin(environment, instance_id)", "def __init__(self, new_sorts, supersorts):\r\n global crt_sorts\r\n crt_sorts = new_sorts\r\n \r\n super(SortDecl, self).__init__()\r\n self.new_sorts = new_sorts\r\n self.supersorts = supersorts", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHDIF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj" ]
[ "0.5880008", "0.572811", "0.5263633", "0.5135936", "0.5086754", "0.5085847", "0.5052518", "0.5052518", "0.5028125", "0.5018764", "0.5011754", "0.49714696", "0.49462602", "0.48368236", "0.48352364", "0.4823109", "0.48152107", "0.48114508", "0.48113042", "0.479689", "0.47876537", "0.47851378", "0.47843146", "0.47635952", "0.4755802", "0.47509703", "0.4749833", "0.47206005", "0.4680351", "0.46746126" ]
0.59617186
0
Test that the plugin manager can find the Isort plugin.
def test_isort_tool_plugin_found(): if sys.version_info.major == 3 and sys.version_info.minor < 6: pytest.skip("isort is only available for Python 3.6+, unable to test") manager = PluginManager() # Get the path to statick_tool/__init__.py, get the directory part, and # add 'plugins' to that to get the standard plugins dir manager.setPluginPlaces( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) manager.setCategoriesFilter( { "Tool": ToolPlugin, } ) manager.collectPlugins() # Verify that a plugin's get_name() function returns "isort" assert any( plugin_info.plugin_object.get_name() == "isort" for plugin_info in manager.getPluginsOfCategory("Tool") ) # While we're at it, verify that a plugin is named Isort Tool Plugin assert any( plugin_info.name == "Isort Tool Plugin" for plugin_info in manager.getPluginsOfCategory("Tool") )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_plugins(self):\n from omtk import plugin_manager\n pm = plugin_manager.plugin_manager\n\n loaded_plugin_names = [plugin.cls.__name__ for plugin in pm.get_loaded_plugins_by_type('modules')]\n\n builtin_plugin_names = (\n 'Arm',\n 'FK',\n 'AdditiveFK',\n 'AvarGrpOnSurface',\n 'FaceBrow',\n 'FaceEyeLids',\n 'FaceEyes',\n 'FaceJaw',\n 'FaceLips',\n 'FaceNose',\n 'FaceSquint',\n 'Hand',\n 'Head',\n 'IK',\n 'InteractiveFK',\n 'Leg',\n 'LegQuad',\n 'Limb',\n 'Neck',\n 'Ribbon',\n 'SplineIK',\n 'Twistbone',\n )\n\n for plugin_name in builtin_plugin_names:\n self.assertIn(plugin_name, loaded_plugin_names)", "def test_plugins():\n assert plugins.template.plugin_test() == True\n assert plugin_test() == True", "def test(self, plugin):\n plug = plugin_source.load_plugin(plugin)\n plug.test()", "def test_plugin_initialize(self):\n p = PluginCustom()\n self.assertEqual('youpie', p.toto)", "def test_discoverable(self):\r\n plugins = getPlugins(IProcessor)\r\n lmath = [p for p in plugins if p.name == \"mlore\"]\r\n self.assertEqual(len(lmath), 1, \"Did not find math lore plugin: %r\" % (lmath,))", "def test_make_tool_plugin_found():\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces([os.path.join(os.path.dirname(statick_tool.__file__),\n 'plugins')])\n manager.setCategoriesFilter({\n \"Tool\": ToolPlugin,\n })\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"make\"\n assert any(plugin_info.plugin_object.get_name() == 'make' for\n plugin_info in manager.getPluginsOfCategory(\"Tool\"))\n # While we're at it, verify that a plugin is named Yamllint Tool Plugin\n assert any(plugin_info.name == 'Make Tool Plugin' for\n plugin_info in manager.getPluginsOfCategory(\"Tool\"))", "def test_register_dynamic_plugin(self):\n pass", "def test_rstlint_tool_plugin_found():\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"rstlint\"\n assert any(\n plugin_info.plugin_object.get_name() == \"rstlint\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named rstlint Tool Plugin\n assert any(\n plugin_info.name == \"rstlint Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def test_register_dynamic_plugin_manager(self):\n pass", "def test_exposeInterfaces(self):\n if self.plugin is None:\n return\n\n cs = settings.Settings()\n results = self.plugin.exposeInterfaces(cs)\n if results is None or not results:\n return\n\n # each plugin should return a list\n self.assertIsInstance(results, list)\n for result in results:\n # Make sure that all elements in the list satisfy the constraints of the\n # hookspec\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 3)\n\n order, interface, kwargs = result\n\n self.assertIsInstance(order, (int, float))\n self.assertTrue(issubclass(interface, interfaces.Interface))\n self.assertIsInstance(kwargs, dict)", "def test_isort(self):\n chdir(REPO_ROOT)\n cmd = [\"isort\", \"-df\", \"-rc\", \"-c\", *SRC_DIRS]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, f\"isort issues:\\n{proc.stdout.decode('utf-8')}\"", "def load_plugin():\n return HostTestPluginCopyMethod_Shell()", "def test_isort_tool_plugin_parse_valid():\n itp = setup_isort_tool_plugin()\n total_output = []\n output = \"/tmp/x.py\"\n total_output.append(output)\n output = \"/tmp/y.py\"\n total_output.append(output)\n issues = itp.parse_output(total_output)\n assert len(issues) == 2\n assert issues[0].filename == \"/tmp/x.py\"\n assert issues[0].line_number == \"0\"\n assert issues[0].tool == \"isort\"\n assert issues[0].issue_type == \"formatting\"\n assert issues[0].severity == \"3\"\n assert issues[0].message == \"Imports are incorrectly sorted and/or formatted.\"\n assert issues[1].filename == \"/tmp/y.py\"", "def test_plugin_retrieval(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n self.assertIsNotNone(plugin)\n self.assertEqual(plugin.get_model().name, PLUGIN_NAME)\n self.assertEqual(plugin.name, PLUGIN_NAME)\n self.assertEqual(plugin.get_model().title, PLUGIN_TITLE)\n self.assertEqual(plugin.title, PLUGIN_TITLE)\n self.assertEqual(plugin.entry_point_url_id, PLUGIN_URL_ID)", "def test_register_dynamic_plugin_manager1(self):\n pass", "def test_register_dynamic_plugin1(self):\n pass", "def test_remote_plugin(self):\n plugin_name = 'Slack'\n Plugin.download_plugin(plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')", "def test_get_plugin_by_id(self):\n response = self.client.get_plugin_by_id(1)\n self.assertEqual(response['id'], 1)", "def testSorting(self):\n if self.sorting in tools.SORTINGS:\n self.assertEqual(\n self.sorting,\n self.config.sorting\n )\n else:\n self.assertNotEqual(\n self.sorting,\n self.config.sorting\n )\n self.assertEqual(\n tools.SORTING_DEFAULT,\n self.config.sorting\n )", "def test_parrot_imported():\n assert \"parrot\" in sys.modules", "def test_custom_plugin(self):\n plugin_name = 'Druptest'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['name'] == plugin_name", "def test_get_plugins_with_search_args(self):\n response = self.client.get_plugins({'name_exact': \"pl-dircopy\"})\n self.assertEqual(response['data'][0]['name'], \"pl-dircopy\")", "def sorter(Plugin):\n return Plugin.order", "def get_plugin_interface(self):", "def test_plugin_initialize_from_args(self):\n sys.argv.append('-t')\n p = PluginCustom()\n self.assertEqual('yourah', p.toto)", "def test_plugin_slowinit(node_factory):\n n = node_factory.get_node()\n\n n.rpc.plugin_start(os.path.join(os.getcwd(), \"tests/plugins/slow_init.py\"))\n\n # It's not actually configured yet, see what happens;\n # make sure 'rescan' and 'list' controls dont crash\n n.rpc.plugin_rescan()\n n.rpc.plugin_list()", "def test_install_plugin_again_is_ok(self):\n raise NotImplementedError()", "def setup_isort_tool_plugin(custom_rsc_path=None):\n arg_parser = argparse.ArgumentParser()\n\n if custom_rsc_path is not None:\n resources = Resources([custom_rsc_path])\n else:\n resources = Resources(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n config = Config(resources.get_file(\"config.yaml\"))\n plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)\n plugin_context.args.output_directory = os.path.dirname(__file__)\n itp = IsortToolPlugin()\n itp.set_plugin_context(plugin_context)\n return itp", "def test_plugin_urls(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n self.assertEqual(plugin.urls, urlpatterns)", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)" ]
[ "0.66552377", "0.6554768", "0.6273206", "0.62442213", "0.6233943", "0.62220126", "0.60991585", "0.60933506", "0.6089562", "0.5941484", "0.59386533", "0.59242934", "0.5922501", "0.58955973", "0.58816534", "0.58793914", "0.58021533", "0.57796365", "0.57335883", "0.57034636", "0.56641763", "0.56267726", "0.5626531", "0.5626476", "0.562094", "0.55849403", "0.55421877", "0.55373126", "0.5537275", "0.55300945" ]
0.8355158
0
Verify that we can parse the normal output of isort.
def test_isort_tool_plugin_parse_valid(): itp = setup_isort_tool_plugin() total_output = [] output = "/tmp/x.py" total_output.append(output) output = "/tmp/y.py" total_output.append(output) issues = itp.parse_output(total_output) assert len(issues) == 2 assert issues[0].filename == "/tmp/x.py" assert issues[0].line_number == "0" assert issues[0].tool == "isort" assert issues[0].issue_type == "formatting" assert issues[0].severity == "3" assert issues[0].message == "Imports are incorrectly sorted and/or formatted." assert issues[1].filename == "/tmp/y.py"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_isort(self):\n chdir(REPO_ROOT)\n cmd = [\"isort\", \"-df\", \"-rc\", \"-c\", *SRC_DIRS]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, f\"isort issues:\\n{proc.stdout.decode('utf-8')}\"", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)", "def isort_check(ctx):\n ctx.run(f\"{VENV_PREFIX} isort --atomic --check-only .\")", "def test_shell(self):\n integers = shell_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def isort(command, checkonly=False):\n print(\n \"\"\"\nRunning isort the Python code import sorter\n===========================================\n\"\"\"\n )\n cmd = \"isort --check-only --diff .\" if checkonly else \"isort .\"\n command.run(cmd, echo=True, pty=POSIX)", "def test_optimize_parse():\n assert True", "def test_python_3_compatibility(self):\n assert natsort(['1', 'a']) == ['1', 'a']", "def test_parse(self): \n\n results = self.parser.parse()\n self.assertEqual(results, test_case_data['parse_output'])", "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def test(self):\n test_dir = join_path(self.test_suite.current_test_cache_dir, self.test_src_dir)\n self.run_test(\n \"sh\",\n [\"testshortsort.sh\"],\n expected=\"Alignments sorted by coordinate.\",\n purpose=\"test: checking alignments\",\n work_dir=test_dir,\n )", "def test_sort_strings(self):\n chdir(REPO_ROOT)\n cmd = [\"python\", \"scripts/sort_strings.py\", \"--check\"]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, (\n f\"sort strings issues:\\n{proc.stdout.decode('utf-8')}\\n\\n\"\n \"Please run ./scripts/sort_string.py to resolve this issue.\"\n )", "def test_rust_code_analysis_tokei_Rust() -> None:\n\n ret_value = compare(\n \"rust-code-analysis\",\n \"tokei\",\n [\"-g\", \"-f\"],\n [\"SLOC\", \"PLOC\", \"CLOC\", \"BLANK\"],\n \"Rust\",\n \"bubble_sort.rs\",\n )\n\n assert ret_value == 0", "def test_isort_tool_plugin_found():\n if sys.version_info.major == 3 and sys.version_info.minor < 6:\n pytest.skip(\"isort is only available for Python 3.6+, unable to test\")\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"isort\"\n assert any(\n plugin_info.plugin_object.get_name() == \"isort\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named Isort Tool Plugin\n assert any(\n plugin_info.name == \"Isort Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def test_compare(self):\n parser = parse_args(['-g', '10', '-s', 'bubble', '-c'])\n self.assertTrue(parser.compare)\n self.assertEqual(True, parser.compare)\n\n parser = parse_args(['-g', '10', '-s', 'bubble'])\n self.assertEqual(False, parser.compare)", "def test_natsort(self):\r\n # string with alpha and numerics sort correctly\r\n s = 'sample1 sample2 sample11 sample12'.split()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n s.reverse()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n self.assertEqual(natsort(list('cba321')), list('123abc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort(list('cdba')), list('abcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort(['1.11', '1.12', '1.00', '0.009']),\r\n ['0.009', '1.00', '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(\r\n natsort([('11', 'A'), ('2', 'B'), ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'), ('2', 'B'), ('11', 'A')])", "def test_isort_tool_plugin_scan_oserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = OSError(\"mocked error\")\n itp = setup_isort_tool_plugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n package[\"python_src\"] = [\n os.path.join(os.path.dirname(__file__), \"valid_package\", \"sample.py\")\n ]\n issues = itp.scan(package, \"level\")\n assert not issues", "def test_parse_results_valid():\n valid_result = [{\n \"url\": \"https://docs.ansible.com/ansible/.../test.html\",\n \"sections\": [\"test\"],\n \"title\": \"title – subtitle — Ansible Documentation\",\n \"body\": \"Long body containing flavor text\",\n \"_index\": \"5693d1e68db231f24d000003\",\n \"_type\": \"5693d1e68db231f24d000004\",\n \"_score\": 1,\n \"_version\": \"\",\n \"_explanation\": \"\",\n \"sort\": \"\",\n \"id\": \"test\",\n \"highlight\": {}\n }]\n assert [{\"title\": \"title\",\n \"subtitle\": \"subtitle\",\n \"arg\": \"https://docs.ansible.com/ansible/.../test.html\",\n \"valid\": True}] == parse_results(valid_result)", "def _verifyParsing(self):\n for attrname, attr in self.__dict__.items():\n if attrname.endswith('records') and iterable(attr):\n ts = get_record_timestamps(attr)\n if not issorted(ts):\n print('Sorting %s' % attrname)\n if type(attr) == list:\n attr = list(np.asarray(attr)[ts.argsort()])\n else:\n attr = attr[ts.argsort()]\n ts = get_record_timestamps(attr)\n assert issorted(ts)\n self.__dict__[attrname] = attr # update", "def test_invalid_sort():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot X date SORT OOPS Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def _is_lexsorted(self) -> bool:\n return self._lexsort_depth == self.nlevels", "def test_sort_otu_table(self):\r\n\r\n actual = sort_otu_table(parse_biom_table_str(self.otu_table1),\r\n ['NA', 'Key', 'Fing'])\r\n expected = parse_biom_table_str(self.age_sorted_otu_table1)\r\n self.assertEqual(actual, expected)", "def try_run_isort_formatting(path_to_protocol_package: str) -> None:\n subprocess.run( # nosec\n [sys.executable, \"-m\", \"isort\", *ISORT_CLI_ARGS, path_to_protocol_package],\n check=True,\n )", "def test_bogo(self):\n integers = bogo_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def step020():\n logger.logMessage('Begin: Sorting records')\n sortCommand = 'sort {0} -t \\';\\' --key 2 -o {1}'.format(candidatesFile,sortedCandidatesFile) \n rc = os.system(sortCommand)\n if rc != 0:\n raise Exception('Error returned by sort program: {0:d}'.format(rc))\n logger.logMessage('End : Sorting records')", "def test_parse_devide(self):\n self.assertEqual(parse_input.parse([\"8\", \"/\", \"4\"]), 2)", "def test_good_input():\n\n out_file = 'unclustered.fa'\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n try:\n rv, out = getstatusoutput(f'{prg} -c {cdhit} -p {proteins}')\n assert rv == 0\n assert out == ('Wrote 309 of 220,520 unclustered '\n 'proteins to \"unclustered.fa\"')\n\n assert os.path.isfile(out_file)\n\n seqs = list(SeqIO.parse(out_file, 'fasta'))\n\n assert len(seqs) == 309\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_output(self):\n good_value_pairs = STR_VALUE_PAIRS\n for pair in good_value_pairs:\n output = to_cardinal_trio(pair[0])\n self.assertEqual(output, pair[1],\n f\"{pair[0]} should be {pair[1]}, not {output}\")", "def test_advance_ast_avaliable():\n assert _test_advanced_ast_presence()", "def check_output(output, expected_output):\n o = copy.deepcopy(output) # so that we don't mutate input\n e = copy.deepcopy(expected_output) # so that we don't mutate input\n \n o.sort()\n e.sort()\n return o == e", "def test_12(self):\n num_elements = np.random.randint(1, 11)\n\n input_array = np.random.normal(size=num_elements)\n\n # We first check the sorting implementation.\n py = sorted(input_array)\n f90 = fort_debug.wrapper_sorted(input_array, num_elements)\n assert_equal(py, f90)\n\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n edu_spec, optim_paras, num_types = dist_class_attributes(\n respy_obj, \"edu_spec\", \"optim_paras\", \"num_types\"\n )\n\n args = (edu_spec[\"start\"], edu_spec[\"share\"], edu_spec[\"max\"])\n f90 = fort_debug.wrapper_sort_edu_spec(*args)\n py = sort_edu_spec(edu_spec)\n for i, label in enumerate([\"start\", \"share\", \"max\"]):\n assert_equal(py[label], f90[i])\n\n py = sort_type_info(optim_paras, num_types)\n f90 = fort_debug.wrapper_sort_type_info(optim_paras[\"type_shares\"], num_types)\n for i, label in enumerate([\"order\", \"shares\"]):\n assert_equal(py[label], f90[i])" ]
[ "0.6781174", "0.6635181", "0.6309134", "0.62981886", "0.61402726", "0.6060493", "0.60508925", "0.59607303", "0.5936084", "0.59126383", "0.587762", "0.58522105", "0.5729688", "0.56102586", "0.5577316", "0.55231833", "0.5521064", "0.55136585", "0.55019987", "0.54842657", "0.5482111", "0.5479343", "0.54657125", "0.54582775", "0.54581916", "0.54488796", "0.5425367", "0.54231393", "0.5421183", "0.5408248" ]
0.77473277
0
Test what happens when a CalledProcessError is raised (usually means isort hit an error).
def test_isort_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output): mock_subprocess_check_output.side_effect = subprocess.CalledProcessError( 0, "", output="mocked error" ) itp = setup_isort_tool_plugin() package = Package( "valid_package", os.path.join(os.path.dirname(__file__), "valid_package") ) package["python_src"] = [ os.path.join(os.path.dirname(__file__), "valid_package", "sample.py") ] issues = itp.scan(package, "level") assert len(issues) == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(1, '', output=\"mocked error\")\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n package['make_targets'] = 'make_targets'\n issues = mtp.scan(package, 'level')\n assert not issues", "def check_call(*args, **kwargs):\n rc = call(*args, **kwargs)\n if rc != 0:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = args[0]\n raise CalledProcessError(rc, cmd)\n return 0", "def test_dump_handles_oserror(mocker):\n mocker.patch('subprocess.Popen' , side_effect=OSError(\"no such file\"))\n with pytest.raises(SystemExit):\n pgdump.dump(url)", "def called_process_error2exit_decorator(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except subprocess.CalledProcessError as e:\n print(\"{err}:\\n{msg}\".format(err=str(e), msg=e.output))\n sys.exit(1)\n return func_wrapper", "def test_dump_handles_os_error(mocker):\n\tmocker.patch('subprocess.Popen', side_effect=OSError('no such file'))\n\twith pytest.raises(SystemExit):\n\t\tpgdump.dump(url)", "def test_start_args(self, mocked_check, mocked_proc):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n info_source = self.supervisor.supvisors.info_source\n info_source.update_extra_args.side_effect = KeyError\n info_source.supervisor_rpc_interface.startProcess.side_effect = [\n RPCError(Faults.NO_FILE, 'no file'),\n RPCError(Faults.NOT_EXECUTABLE),\n RPCError(Faults.ABNORMAL_TERMINATION),\n 'done']\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with extra arguments and a process that is not compliant\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_EXTRA_ARGUMENTS, exc.exception.code)\n self.assertEqual(\"BAD_EXTRA_ARGUMENTS: rules for namespec appli:proc\"\n \" are not compatible with extra arguments in command line\",\n exc.exception.text)\n self.assertEqual(0, mocked_check.call_count)\n self.assertEqual(0, info_source.update_extra_args.call_count)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n # test RPC call with extra arguments and a process that is compliant\n # but unknown in Supervisor\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual(\"BAD_NAME: namespec appli:proc unknown in this Supervisor instance\",\n exc.exception.text)\n self.assertEqual([call('appli:proc', 'dummy arguments')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n info_source.update_extra_args.reset_mock()\n info_source.update_extra_args.side_effect = None\n # test RPC call with start exceptions\n mocked_proc.side_effect = None\n mocked_proc.return_value = None, None\n # NO_FILE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc')\n self.assertEqual(Faults.NO_FILE, exc.exception.code)\n self.assertEqual(\"NO_FILE: no file\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', True)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NO_FILE: no file')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # NOT_EXECUTABLE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.NOT_EXECUTABLE, exc.exception.code)\n self.assertEqual(\"NOT_EXECUTABLE\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NOT_EXECUTABLE')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # other exception doesn't trigger an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual(\"ABNORMAL_TERMINATION\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual(0, info_source.force_process_fatal.call_count)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # finally, normal behaviour\n self.assertEqual('done', rpc.start_args('appli:proc'))", "def test_startProcessUnknownKeyError(self):\r\n self.assertRaises(KeyError, self.pm.startProcess, \"foo\")", "def test_not_running(): # pragma: windows\n comm_kwargs = dict(comm='IPCComm', direction='send', reverse_names=True)\n nt.assert_raises(RuntimeError, new_comm, 'test', **comm_kwargs)", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)", "def test_exceptions():\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.connect(path=r\"No process with this please\")\r\n assert False\r\n except application.ProcessNotFoundError:\r\n print('ProcessNotFoundError has been raised. OK.')\r\n\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.start(cmd_line = r\"No process with this please\")\r\n assert False\r\n except application.AppStartError:\r\n print('AppStartError has been raised. OK.')", "def test_runFailed(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c', 'print \"hi\"; raise SystemExit(1)'])\n self.assertEquals(exc.exitStatus, 1)\n self.assertEquals(exc.exitSignal, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def test_invoke_pipe_not_found():\n\n testapp = holocron.Application()\n\n with pytest.raises(ValueError) as excinfo:\n next(testapp.invoke(\"test\"))\n\n assert str(excinfo.value) == \"no such pipe: 'test'\"", "def check_call_out(args, **kwargs):\n out, returncode = communicate(args, **kwargs)\n if returncode:\n raise CalledProcessError(\n returncode, args, kwargs.get('cwd'), out[0], out[1])\n return out", "def test_unexpected_error_in_processor(self):\n\n one_process_workflow = \"\"\"file://B <- file://A ! buggy_processor\n echo A does not produce B\n \"\"\"\n process = run_first_process(one_process_workflow, BuggyProcessor())\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle processor '\n 'buggy_processor :') >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in processor\")') >= 0, process.error_message\n assert process.error_message.find('will not complete.') >= 0, process.error_message", "def test_subprocess_fork_pid0_exception(self, mocker):\n mocker.stopall()\n\n test_command = [\"who\", \"-b\"]\n test_name = \"test_who\"\n test_fork = True\n pid = 0\n\n # mock\n mock_logging_debug = mocker.MagicMock(name=\"mock_logging_debug\")\n mock_logging_error = mocker.MagicMock(name=\"mock_logging_error\")\n mock_os_fork = mocker.MagicMock(name=\"mock_os_fork\", side_effect=[pid, OSError])\n mock_sys_exit = mocker.MagicMock(name=\"mock_sys_exit\")\n mock_os_chdir = mocker.MagicMock(name=\"mock_os_chdir\")\n mock_os_setsid = mocker.MagicMock(name=\"mock_os_setsid\")\n mock_os_umask = mocker.MagicMock(name=\"mock_os_umask\")\n\n # patch\n mocker.patch.object(\n scarlett_os.subprocess.logging.Logger, \"debug\", mock_logging_debug\n )\n mocker.patch.object(\n scarlett_os.subprocess.logging.Logger, \"error\", mock_logging_error\n )\n mocker.patch.object(scarlett_os.subprocess.os, \"fork\", mock_os_fork)\n mocker.patch.object(scarlett_os.subprocess.sys, \"exit\", mock_sys_exit)\n mocker.patch.object(scarlett_os.subprocess.os, \"chdir\", mock_os_chdir)\n mocker.patch.object(scarlett_os.subprocess.os, \"setsid\", mock_os_setsid)\n mocker.patch.object(scarlett_os.subprocess.os, \"umask\", mock_os_umask)\n\n scarlett_os.subprocess.Subprocess(test_command, name=test_name, fork=test_fork)\n\n mock_logging_error.assert_any_call(\"Error forking process second time\")\n\n mocker.stopall()", "def unavailable_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(1)\"], **kwargs)", "def test_execute_or_bail_internal_error(self):\n with self.assertLogs(level=\"INFO\") as cm:\n # This patches the \"croak\" function that is IMPORTED inside \"etl.commands\".\n with unittest.mock.patch(\"etl.commands.croak\") as mocked_croak:\n with etl.commands.execute_or_bail(\"unittest\"):\n # Simulate an internal error where we don't catch an exception.\n raise ValueError(\"oops\")\n mocked_croak.assert_called()\n # The exit code (2nd arg) is expected to be 3 for uncaught exceptions.\n self.assertEqual(mocked_croak.call_args[0][1], 3)\n\n self.assertEqual(len(cm.output), 2)\n self.assertIn(\"terrible happened\", cm.output[0])", "def test_get_case_command_fail(loqusdbapi, mocker):\n # GIVEN a loqusdb api and a case id\n case_id = 'a_case'\n # WHEN an error occurs during fetching a case with the adapter\n mocker.patch.object(subprocess, 'check_output')\n subprocess.check_output.side_effect = subprocess.CalledProcessError(1, 'error')\n\n # THEN assert that the error is raised\n with pytest.raises(subprocess.CalledProcessError):\n loqusdbapi.get_case(case_id)", "def test_subprocess_fails_with_no_command(self):\n with self.assertRaises(ValueError):\n LazySubprocessTester([])", "def test_missing_file(self):\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_output(\n [sys.executable, idf_py_path, '--version', '@args_non_existent'],\n env=os.environ,\n stderr=subprocess.STDOUT).decode('utf-8', 'ignore')\n self.assertIn('(expansion of @args_non_existent) could not be opened', cm.exception.output.decode('utf-8', 'ignore'))", "def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)", "def test_runSignaled(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c',\n 'import sys; print \"hi\"; sys.stdout.flush(); '\n 'import os; os.kill(os.getpid(), 9)'])\n self.assertEquals(exc.exitSignal, 9)\n self.assertEquals(exc.exitStatus, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n ExecutionExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def _handleExceptionAndCheckCall(array_call, **kwargs):\n stdout = kwargs.get('stdout', subprocess.PIPE)\n stderr = kwargs.get('stderr', subprocess.PIPE)\n shell = kwargs.get('shell', False)\n\n cmd = array_call[0]\n\n output = None\n error = None\n\n # TODO: Check the value of array_call and <=[0]\n logging.debug(\"Calling {0}:\".format(cmd))\n logging.debug(\"%s\", array_call)\n logging.debug(\"---------\")\n\n # TODO: Use universal_newlines option from Popen?\n try:\n p = subprocess.Popen(array_call, stdout=stdout,\n stderr=stderr, shell=shell)\n\n # TODO: Change this because of possible memory issues => https://docs.python.org/2/library/subprocess.html#subprocess.Popen.communicate\n\n output, error = p.communicate()\n\n if stdout == subprocess.PIPE:\n logging.debug(\"\\t{0}\".format(output))\n else:\n logging.debug(\"\\tOutput in file {0}\".format(stdout.name))\n # If we detect an error from the subprocess, then we raise an exception\n # TODO: Manage if we raise an exception for everything, or use CRITICAL etc... but not stop process\n # TODO: The responsability of returning a sys.exit() should not be there, but up in the app.\n if p.returncode:\n if stderr == subprocess.PIPE:\n raise PopenError(cmd, error, p.returncode)\n else:\n # TODO: To Handle properly with a design behind, if we received a option as a file for the error\n raise Exception(\"Error when calling {0}. Error as been logged in your file {1}. Error code: {2}\"\n .format(cmd, stderr.name, p.returncode))\n\n except OSError as e:\n message = \"The subprocess {0} has encountered an OSError: {1}\".format(\n cmd, e.strerror)\n if e.filename:\n message = '\\n'.join(\n (message, \", against this file: {0}\".format(e.filename)))\n logging.error(message)\n sys.exit(-1)\n except PopenError as p:\n message = \"The subprocess {0} has returned the error: {1}.\".format(\n p.cmd, p.return_code)\n message = '\\n'.join(\n (message, \"Its error message is: {0}\".format(p.error)))\n\n logging.exception(message)\n\n sys.exit(p.return_code)\n except Exception as e:\n message = \"The subprocess {0} has encountered an unknown error: {1}\".format(\n cmd, e)\n logging.exception(message)\n\n sys.exit(-1)\n return p", "def test_ns_fail():\n env = NsSimPyEnvironment()\n env.process(some_process(env, 10.1))\n env.run()", "def test_unexpected_error_in_exists(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggyExistsResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while checking existence of '\n 'output resources' ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in exists()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message", "def _handle_failure(self, proc, test_case):\n if proc.returncode != 0:\n print('ERROR: Test execution failed: {}'.format(test_case.get_name()))\n stdout, stderr = proc.communicate()\n raise TestCaseFailure('Test case {} failed. stdout: {}, stderr: {}, '\n 'return code: {}.'.format(test_case.get_name(),\n stdout, stderr,\n proc.returncode))", "def test_error_message_from_background_process(self):\n one_process_workflow = \"\"\"file://B <- file://A\n error\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow)\n assert process.error_message.find(\"Process ended with error code\") >= 0, process.error_message", "def run(cmd):\n print ' '.join(cmd)\n try:\n check_call(cmd)\n except CalledProcessError as cpe:\n print \"Error: return code: \" + str(cpe.returncode)\n sys.exit(cpe.returncode)" ]
[ "0.66911113", "0.64319515", "0.63943857", "0.63668126", "0.6338667", "0.63044673", "0.63034093", "0.6283694", "0.62171775", "0.6183645", "0.61580575", "0.6093192", "0.6079444", "0.603348", "0.6027173", "0.5997922", "0.59800065", "0.59751534", "0.59546334", "0.5954266", "0.5952915", "0.5950407", "0.5940823", "0.5938315", "0.5936006", "0.5910566", "0.58934283", "0.58907115", "0.5888934", "0.5878262" ]
0.6745141
0
Get the release history from pypi Use the json API to get the release history from pypi. The returned json structure includes a 'releases' dictionary which has keys that are release numbers and the value is an array of uploaded files. While we don't have a 'release time' per say (only the upload time on each of the files), we'll consider the timestamp on the first source file found (which will be a .zip or tar.gz typically) to be 'release time'. This is inexact, but should be close enough for our purposes.
def get_releases_for_package(name, since): f = urlreq.urlopen("http://pypi.org/project/%s/json" % name) jsondata = f.read() data = json.loads(jsondata) releases = [] for relname, rellist in data['releases'].iteritems(): for rel in rellist: if rel['python_version'] == 'source': when = _parse_pypi_released(rel['upload_time']) # for speed, only care about when > since if when < since: continue releases.append( Release( name, relname, rel['filename'], when)) break return releases
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_releases():\n response = requests.get(PYPI_URL.format(package=PYPI_PACKAGE_NAME))\n if response:\n data = response.json()\n\n releases_dict = data.get('releases', {})\n\n if releases_dict:\n for version, release in releases_dict.items():\n release_formats = []\n published_on_date = None\n for fmt in release:\n release_formats.append(fmt.get('packagetype'))\n published_on_date = fmt.get('upload_time')\n\n release_formats = ' | '.join(release_formats)\n print('{:<10}{:>15}{:>25}'.format(version, published_on_date, release_formats))\n else:\n print('No releases found for {}'.format(PYPI_PACKAGE_NAME))\n else:\n print('Package \"{}\" not found on Pypi.org'.format(PYPI_PACKAGE_NAME))", "def versionHistory(self):\n url = self.metaData().getLink(\"version-history\")\n assert url is not None\n\n header = self._baseHeader.copy()\n response = self._adapter.getRequest(url, header)\n\n return json.loads(response['Body'])", "def get_current_release_downloads():\n downloads = (\n get_downloads_metadata()\n ['releases']\n [get_current_release()]\n ['downloads'])\n\n def up_to_date(dir, urls):\n try:\n df = pandas.read_csv(join(dir, \"DOWNLOAD_INFO.csv\"))\n return list(df.url) == list(urls)\n except IOError:\n return None\n\n return OrderedDict(\n (download[\"name\"], {\n 'downloaded': exists(join(get_downloads_dir(), download[\"name\"])),\n 'up_to_date': up_to_date(\n join(get_downloads_dir(), download[\"name\"]),\n [download['url']] if 'url' in download else download['part_urls']),\n 'metadata': download,\n }) for download in downloads\n )", "def Releases():\n return releases", "def releases(releaser, count):\n releases = sorted(\n releaser.get_releases().values(),\n key=lambda rel: rel[\"end_timestamp\"],\n reverse=True,\n )\n click.echo(f\"Latest {count} releases:\")\n for release in releases[:count]:\n click.echo(f'{release[\"end_timestamp\"]} {release[\"commit\"]}')", "def latest(data):\n result = dict()\n version = parse(\"0\")\n for release, info in data.items():\n python_version = Pypi._get_python_version(info)\n ver = parse(release)\n if not ver.is_prerelease:\n version = max(version, ver)\n python_version = python_version\n\n result = dict(version=str(version), python_version=python_version)\n\n return [result]", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_release_info():\n major_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\" \"firefox_history_major_releases.json\"\n )\n if major_info is None:\n raise Exception(\"Failed to fetch major version info\")\n minor_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\"\n \"firefox_history_stability_releases.json\"\n )\n if minor_info is None:\n raise Exception(\"Failed to fetch minor version info\")\n\n return {\"major\": major_info, \"minor\": minor_info}", "def get_release_info(self, version):\r\n try:\r\n return self._detail[\"releases\"][version]\r\n except KeyError as key_error:\r\n log.warning(key_error)\r\n return []", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get_previous_release_info(\n previous_release_version: str | None, past_releases: list[ReleaseInfo], current_release_version: str\n) -> str | None:\n previous_release = None\n if previous_release_version == current_release_version:\n # Re-running for current release - use previous release as base for git log\n if len(past_releases) > 1:\n previous_release = past_releases[1].last_commit_hash\n else:\n previous_release = past_releases[0].last_commit_hash if past_releases else None\n return previous_release", "def get_history(cls, api, history):\n api_base = api.split('/')[-1]\n cursor = cls.history_index.cursor()\n cursor.execute(\n \"select filename from history where api=? and ymdh=?;\",\n (api_base, history))\n files = [r[0] for r in cursor]\n cls.history_index.commit()\n if not files:\n return {}\n results = {}\n for fn in files:\n ts = re.split('[?@]', fn)[-1].replace('.gz', '')\n fn_full = os.path.join(config.base_store_dir, fn)\n fd = (gzip.open if fn.endswith('.gz') else open)(fn_full)\n results[ts] = json.load(fd, encoding='utf8')\n fd.close()\n return results", "def GetVersions(url, requestedProduct, requestedVersion):\n dictValidReleasesSorted = {}\n response = requests.get(url)\n if response.status_code == 200:\n jsonResult = response.json()\n jVersions = jsonResult[requestedProduct][\"versions\"]\n dictValidReleases = {}\n # do not want pre-releases; filter them out\n for item in jVersions.items(): \n for build in item[1][\"builds\"]:\n if (build[\"os\"] == SUPPORTED_OS):\n if (build[\"arch\"] == SUPPORTED_ARCH):\n if not (re.search('[a-zA-Z]', item[1][\"version\"])): \n dictValidReleases[item[1][\"version\"]] = build[\"url\"]\n\n for key in sorted(dictValidReleases,key=LooseVersion):\n dictValidReleasesSorted[key] = dictValidReleases[key]\n else:\n raise requests.ConnectionError(\"Server did not return status 200 - returned {0}\".format(response.status_code))\n\n return dictValidReleasesSorted", "def get_latest_tags(self):\n\n start = len(self.tags) - self.num_comparisons\n tags = self.tags\n latest = []\n for i in xrange(len(tags)):\n if i >= start:\n parts = tags[i]['ref'].split('/')\n release_num = parts[2]\n sha = tags[i]['object']['sha']\n tag = [release_num, sha]\n latest.append(tag)\n return latest", "def get_release(request):\r\n\r\n release = raven.fetch_git_sha(os.path.dirname(os.path.dirname(__file__)))\r\n return HttpResponse(json.dumps({\"release\": release[:7]}))", "def releases():\n r = run('ls -x %(releases_path)s' % env)\n env.releases = sorted(r.split(\"\\t\"))\n if len(env.releases) >= 1:\n env.current_revision = env.releases[-1]\n env.current_release = '%(releases_path)s/%(current_revision)s' % env\n if len(env.releases) > 1:\n env.previous_revision = env.releases[-2]\n env.previous_release = '%(releases_path)s/%(previous_revision)s' % env\n\n #cleanup old releases. max 3 allowed.\n cleanup()", "def get_recent_release_from_product_details() -> int:\n rls_prod_details_json = get(\n \"https://product-details.mozilla.org/1.0/firefox_history_major_releases.json\"\n ).json()\n rls_prod_details = Series(rls_prod_details_json).sort_values(ascending=True)\n [(cur_rls_vers, _date)] = rls_prod_details[-1:].iteritems()\n cur_rls_maj, *_v = cur_rls_vers.split(\".\")\n return int(cur_rls_maj)", "def _parse_latest_update(self, resp: Dict[str, Any], latest_version: str) -> str:\n latest_release = resp.get(\"releases\", {}).get(latest_version)\n if latest_release is not None and isinstance(latest_release, list):\n release_artifact_dates = []\n for artifact in latest_release:\n try:\n upload_time = artifact.get(\"upload_time_iso_8601\")\n parsed_upload_time = dateutil.parser.isoparse(upload_time)\n release_artifact_dates.append(parsed_upload_time)\n except Exception:\n pass\n latest_artifact_timestamp = max(release_artifact_dates)\n return latest_artifact_timestamp.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n return \"\"", "def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')", "def get_released_versions(package_name):\n url = \"https://pypi.python.org/pypi/{}/json\".format(package_name)\n data = json.load(urllib.request.urlopen(url))\n\n versions = {\n # We can actually select any element in `dist_files` because all the distribution files\n # should have almost the same upload time.\n version: dist_files[0][\"upload_time\"]\n for version, dist_files in data[\"releases\"].items()\n # If len(dist_files) = 0, this release is unavailable.\n # Example: https://pypi.org/project/xgboost/0.7\n #\n # > pip install 'xgboost==0.7'\n # ERROR: Could not find a version that satisfies the requirement xgboost==0.7\n if len(dist_files) > 0 and (not dist_files[0].get(\"yanked\", False))\n }\n return versions", "def releases():\n result = run('ls %(releases_dir)s' % env)\n releases_list = re.split('\\s+', result)\n releases_list.sort(reverse=True)\n return releases_list", "def get_release_notes(self):\n\n notes = self.output.get_header('RELEASE NOTES')\n notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \\\n self.repo, self.product) + '\\n'\n\n notes += self.output.get_sub_header('COMPARISONS')\n notes += self.get_comparison(self.latest_tags[0][VERS],\n self.latest_tags[1][VERS])\n\n if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1):\n notes += self.get_comparison(self.latest_tags[1][VERS],\n self.latest_tags[2][VERS])\n\n if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW:\n notes += self.get_comparison(self.latest_tags[2][VERS],\n self.latest_tags[3][VERS])\n\n tag_data = self.get_tag(self.latest_tags[3][SHA])\n\n notes += self.output.get_sub_header('TAGS')\n notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\\n'\n notes += self.get_url_tag_commit(tag_data[\"object\"][\"sha\"]) + '\\n'\n\n changelog = self.get_changelog(tag_data[\"object\"][\"sha\"])\n if changelog:\n notes += self.output.get_sub_header('CHANGELOG')\n notes += changelog\n return notes", "def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date", "def get_version_and_release_date(\n requirement, version=None, verbose=False, response=None\n):\n if not response:\n url = get_pypi_url(requirement, version)\n response = request(url)\n\n # see if the url is 404'ing because it has been redirected\n if response.status == 404:\n root_url = url.rpartition(\"/\")[0]\n res = request(root_url, method=\"HEAD\")\n if res.status == 301:\n new_location = res.headers[\"location\"] + \"/json\"\n response = request(new_location)\n\n if response.status != 200:\n if version:\n if verbose:\n print(\n \"{} ({}) isn't available on PyPI \"\n \"anymore!\".format(requirement, version)\n )\n else:\n if verbose:\n print(\n \"{} isn't on PyPI. Check that the project \"\n \"still exists!\".format(requirement)\n )\n return None, None\n\n if not response.json:\n if verbose:\n print(\n \"Decoding the JSON response for {} ({}) \"\n \"failed\".format(requirement, version)\n )\n return None, None\n\n response = response.json\n\n try:\n if version:\n if version in response[\"releases\"]:\n release_date = response[\"releases\"][version][0][\"upload_time\"]\n else:\n return None, None\n else:\n version = response[\"info\"].get(\"stable_version\")\n\n if not version:\n versions = {\n v: parse_version(v)\n for v in response[\"releases\"].keys()\n if not parse_version(v).is_prerelease()\n }\n\n # if we still don't have a version, let's pick up a prerelease one\n if not versions:\n versions = {\n v: parse_version(v) for v in response[\"releases\"].keys()\n }\n\n if versions:\n version = max(versions.items(), key=operator.itemgetter(1))[0]\n release_date = response[\"releases\"][str(version)][0][\"upload_time\"]\n else:\n return None, None\n\n return version, datetime.fromtimestamp(\n time.mktime(time.strptime(release_date, \"%Y-%m-%dT%H:%M:%S\"))\n )\n except IndexError:\n if verbose:\n print(\"{} ({}) didn't return a date property\".format(requirement, version))\n return None, None", "def get_releases(is_vertebrate: bool):\n url = \"http://ftp.ensemblgenomes.org/pub?\"\n if is_vertebrate:\n url = \"http://ftp.ensembl.org/pub?\"\n ret = retry(requests.get, 3, url)\n # sort releases new to old\n releases = sorted(\n [int(i) for i in re.findall(r'\"release-(\\d+)/\"', ret.text)],\n reverse=True,\n )\n if is_vertebrate:\n # ignore immature releases\n releases = [r for r in releases if r > 46]\n return releases", "def api_json(self):\n if not self._api_json:\n resp = requests.get(\n GitHubManager.RELEASE_API.format(repo=self.repo)\n )\n if not resp.ok:\n resp.raise_for_status()\n\n self._api_json = resp.json()\n\n return self._api_json", "def certifiVersions():\n log = logger.new(function='certifiVersions')\n r = yield treq.get('https://pypi.python.org/pypi/certifi/json', timeout=5)\n log.msg(\"got certifi versions!\")\n data = yield r.json()\n\n # Note: this takes advantage of the fact that certifi's releases have the\n # same version number sort order as lexicographical. If that changes,\n # this will break.\n releases = sorted(data[u'releases'].keys())\n\n first_release = releases.index('14.05.14')\n target_versions = releases[first_release:]\n\n result = []\n for version in target_versions:\n files = data[u'releases'][version]\n\n # Find the .tar.gz release.\n for file in files:\n if file[u'filename'].endswith(u'.tar.gz'):\n break\n else:\n continue\n\n log.msg(\"new release located\", version=version, tarball=file[u'url'])\n result.append((version, file[u'url']))\n\n returnValue(result)", "def get_releases(repo, quiet=False, per_page=None) -> List[str]:\n req_url = f\"https://api.github.com/repos/{owner}/{repo}/releases\"\n\n params = {}\n if per_page is not None:\n if per_page < 1 or per_page > 100:\n raise ValueError(\"per_page must be between 1 and 100\")\n params[\"per_page\"] = per_page\n\n request = get_request(req_url, params=params)\n num_tries = 0\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code in (404, 503) and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n print(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n releases = json.loads(result.decode())\n if not quiet:\n print(f\"found {len(releases)} releases for {owner}/{repo}\")\n\n avail_releases = [\"latest\"]\n avail_releases.extend(release[\"tag_name\"] for release in releases)\n return avail_releases", "def get_releases(platform, year, month):\n platform_num = PLATFORMS.get(platform.lower(), 0)\n\n if platform_num == 0:\n url = _3DJUEGOS_RELEASES_URL + \"todos/por-mes/0/{}/{}/\".format(year, month)\n else:\n url = _3DJUEGOS_RELEASES_URL + platform + \\\n \"/por-mes/{}/{}/{}/\".format(platform_num, year, month)\n\n soup = get_soup_obj(url)\n if not soup:\n return None\n results = {}\n results[\"games\"] = []\n root = soup.find(\"div\", {\"class\": \"pad_rl10\"})\n for div in root.findAll(\"div\"):\n if div.attrs[\"class\"] == [\"s20\", \"ffnav\", \"b\", \"mar_t50\"]:\n release_date = \"{}-{}-{}\".format(year, month,\n re.search(r'\\d+', div.span.text).group())\n elif div.attrs[\"class\"] == [\"dtc\", \"vam\"]:\n name = div.a.span.text\n platform = div.div.span.text\n results[\"games\"].append(\n {\"name\": name, \"platform\": platform, \"releaseDate\": release_date})\n\n return results", "def list_releases(self, name):\n endpoint = '/v1/charm/{}/releases'.format(name)\n response = self._client.get(endpoint)\n\n channel_map = []\n for item in response['channel-map']:\n expires_at = item['expiration-date']\n if expires_at is not None:\n # `datetime.datetime.fromisoformat` is available only since Py3.7\n expires_at = parser.parse(expires_at)\n channel_map.append(\n Release(revision=item['revision'], channel=item['channel'], expires_at=expires_at))\n\n channels = [\n Channel(\n name=item['name'],\n fallback=item['fallback'],\n track=item['track'],\n risk=item['risk'],\n branch=item['branch'],\n ) for item in response['package']['channels']]\n\n revisions = [_build_revision(item) for item in response['revisions']]\n\n return channel_map, channels, revisions" ]
[ "0.6942631", "0.66807014", "0.6496823", "0.6251065", "0.6223727", "0.61681485", "0.607848", "0.605974", "0.6022348", "0.5989354", "0.59570056", "0.59270537", "0.5916981", "0.58646035", "0.5862971", "0.5855767", "0.5854523", "0.5853826", "0.58354694", "0.5833912", "0.58016866", "0.57994175", "0.5793847", "0.5746684", "0.5738796", "0.572935", "0.57211924", "0.5691585", "0.5677908", "0.5586303" ]
0.6868364
1
Calculates X values for given list of Y values in range defined by a and b parameters. X values are simply calculated by dividing given X range by number of nodes, so they are distributed in even range.
def prepare_initial_nodes(x_start, x_end, nodes_y): nodes_x = [float(x_start + ((x_end - x_start) / (len(nodes_y) - 1)) * i) for i in range(0, len(nodes_y))] nodes_y = [float(y) for y in nodes_y] print(nodes_x) print(nodes_y) nodes = list(zip(nodes_x, nodes_y)) return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projectionX(xdata, ydata, nbins, xrange=None, yrange=None):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(xmin, xmax, nbins+1)\n y_out = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(xdata, x_out[i], x_out[i+1]), ydata)\n y_out[i] = bin_data.size\n x_out += dx / 2.\n x_out = x_out[:-1]\n return x_out, y_out", "def neural_dist(func_a, func_b, x_range):\n func_a = func_a.numpy()\n func_b = func_b.numpy()\n return func_a.fit(func_b, x_range=x_range)[1]", "def calculate_ranges(a, b):\n try:\n ranges = list(range(0, a, a//b))\n if ranges[-1] != a:\n ranges.append(a)\n return ranges\n except ValueError:\n return [0, a]", "def profileX(xdata, ydata, nbins, xrange=None, yrange=None, drop_nan=True):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(xmin, xmax, nbins+1)\n y_out = np.empty(nbins)\n y_err = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(xdata, x_out[i], x_out[i+1]), ydata)\n y_out[i] = np.mean(bin_data)\n y_err[i] = np.std(bin_data) / bin_data.size**0.5\n x_out += dx / 2.\n x_out = x_out[:-1]\n if drop_nan:\n selection = ~(np.isnan(y_out) | np.isnan(y_err))\n x_out = x_out[selection]\n y_out = y_out[selection]\n y_err = y_err[selection]\n return x_out, y_out, y_err", "def plot_linear(x_range, w, b):\n\tplt.plot(x_range, x_range * w + b)", "def fit_to_range(val: float, a: float, b: float, a1: float, b1: float) -> float:\n new_value = ((val - a) / (b - a)) * (b1 - a1) + a1\n return new_value", "def rangeX(iterations):\n if not isinstance(iterations, (tuple)):\n raise AttributeError\n return itertools.product(*map(range, iterations))", "def to_arrays(self, xmin=None, xmax=None):\n sidx = 0 if xmin is None else np.searchsorted(self.xvec, [xmin])[0]\n eidx = len(self.xvec) if xmax is None else np.searchsorted(self.xvec, [xmax])[0]\n\n if eidx < len(self.xvec) and self.xvec[eidx] == xmax:\n eidx += 1\n\n xtemp = self.xvec[sidx:eidx]\n if xmin is not None and (len(xtemp) == 0 or xtemp[0] != xmin):\n np.insert(xtemp, 0, [xmin])\n if xmax is not None and (len(xtemp) == 0 or xtemp[-1] != xmax):\n np.append(xtemp, [xmax])\n return xtemp, self(xtemp)", "def grid(x, y):\n return product(xrange(1, x+1), xrange(1, y+1))", "def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))", "def _lagrange2(x, y):\n\n def P(x_ip):\n total = 0\n n = len(x)\n for i in range(0, n):\n\n def g(i, n):\n tot_mul = 1\n for j in range(0, n):\n if i == j:\n continue\n if x[i] == x[j]:\n log.fatal(\n f\"Leads to division by zero (x = {x[i]}). Identical values given in x array. \"\n \"For example by using Lagrange interpolation for precise orbit, \"\n \"check if identical observation epochs are given in SP3 file\"\n )\n tot_mul *= (x_ip - x[j]) / float(x[i] - x[j])\n return tot_mul\n\n total += y[i] * g(i, n)\n return total\n\n return P", "def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):\n \n if min_x is None or max_x is None:\n a, b = self.get_min_max_x(**kwargs)\n if min_x is None:\n min_x = a\n if max_x is None:\n max_x = b\n \n new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)\n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x)", "def calculate(\n expression: str, symmetrical_bounds: Union[int, float] = 10\n ) -> Tuple[np.ndarray, np.ndarray]:\n symmetrical_bounds = abs(symmetrical_bounds)\n x = np.arange(-symmetrical_bounds, symmetrical_bounds, symmetrical_bounds / 50)\n expr = parse_expr(expression)\n x_symbol = Symbol(\"x\")\n\n y = np.array([expr.subs({x_symbol: x_point}).evalf() for x_point in x])\n\n return x, y", "def scale(x, a=5, b=10, xmin=-1, xmax=1):\n return (b - a)*(x - xmin)/(xmax - xmin) + a", "def projectionY(xdata, ydata, nbins, yrange=None, xrange=None):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(ymin, ymax, nbins+1)\n y_out = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(ydata, x_out[i], x_out[i+1]), xdata)\n y_out[i] = bin_data.size\n x_out += dx / 2.\n x_out = x_out[:-1]\n return x_out, y_out", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def calc(x_list):\n\n y_list = [x**2 + 2*x + 1 for x in x_list]\n\n return y_list", "def plot(self, a=None, b=None):\n\n # === choose reasonable interval if [a, b] not specified === #\n if a is None:\n a = self.observations.min() - self.observations.std()\n if b is None:\n b = self.observations.max() + self.observations.std()\n\n # === generate plot === #\n x_vals = np.linspace(a, b, num=100)\n f = np.vectorize(self.__call__)\n plt.plot(x_vals, f(x_vals))\n plt.show()", "def scale(x_range=1, y_range=1):\r\n x = rand_val(x_range)\r\n y = rand_val(y_range)\r\n return np.array(((x, 0, 0),\r\n (0, y, 0),\r\n (0, 0, 1)), dtype=np.float)", "def plot(self, center=0, xmin=-1, xmax=1):\n if self.eps == 0:\n return [xmin, center, center, xmax], [0, 0, 1, 1]\n else:\n n = 200./self.eps\n x = concatenate(\n linspace(xmin, center-self.eps, 21),\n linspace(center-self.eps, center+self.eps, n+1),\n linspace(center+self.eps, xmax, 21))\n y = self(x)\n return x, y", "def project_weights_and_nodes(a, b, unit_weights, unit_nodes):\n\n\t# project onto interval [a,b]\n\tnodes = 0.5*(b-a)*unit_nodes + 0.5*(a+b)\n\tweights = 0.5*(b-a)*unit_weights\n\n\treturn weights, nodes", "def toPointwise_withLinearXYs( self, accuracy, biSectionMax = 16, **kwargs ) :\n\n if( accuracy < 1e-6 ) : accuracy = 1e-6\n if( accuracy > 0.1 ) : accuracy = 0.1\n\n P, n = [], 1000\n for i in xrange( n + 1 ) :\n x = ( ( n - i ) * self.domainMin + self.domainMax * i ) / n\n P.append( [ x, self.evaluate( x ) ] )\n axes = axesModule.axes( )\n yUnit = self.getAxisUnitSafely( 0 )\n xUnit = self.getAxisUnitSafely( 1 )\n axes[0] = axesModule.axis( 'y(x)', 0, yUnit )\n axes[1] = axesModule.axis( 'x', 1, xUnit )\n Pclass = self.toLinearXYsClass()\n P = Pclass( P, accuracy = accuracy, axes = axes )\n return( P.thin( accuracy = accuracy ) )", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def getCellData(X, y, min0, max0, min1, max1):\n Xcell = []\n ycell = []\n\n for x,label in zip(X, y):\n if (x[0] >= min0) and (x[0] < max0) and (x[1] >= min1) and (x[1] < max1):\n Xcell.append(x)\n ycell.append(label)\n\n return np.array(Xcell), np.array(ycell)", "def makeCrossPlotX(f,g):\n x = zerofloat(n1,n2)\n y = zerofloat(n1,n2)\n class Loop(Parallel.LoopInt):\n def compute(self,i2):\n for i1 in range(1,n1-1):\n x[i2][i1] = 0.5*(f[i2][i1+1]-f[i2][i1-1])\n y[i2][i1] = g[i2][i1]-f[i2][i1]\n Parallel.loop(n2,Loop())\n return x,y", "def __call__(self, x, y):\n xa = np.asarray(x)\n ya = np.asarray(y)\n return (self._evaluate(xa.flatten(), ya.flatten())).reshape(xa.shape)", "def get_densities(\n x: np.ndarray,\n y: np.ndarray,\n nx: int,\n ny: int,\n x_range: Tuple = (0, 100),\n y_range: Tuple = (0, 100),\n n: int = 30,\n) -> np.ndarray:\n\n x_values = np.linspace(x_range[0], x_range[1], nx)\n y_values = np.linspace(y_range[0], y_range[1], ny)\n\n density = np.empty((nx, ny))\n tree = get_kdtree(x, y)\n\n for x in tqdm(range(nx)):\n for y in range(ny):\n density[x, y] = get_density_from_neighbours(\n x_values[x], y_values[y], tree, n\n )\n\n return density, tree", "def coords_in_range(self, anchor, steps):\n coords = list()\n x_low = -steps\n x_high = steps\n\n #Generate using an axial formula to make it easier\n #calculate z via the other two and throw away ones that aren't in bounds\n for x in range(x_low, x_high+1):\n for y in range(max(-steps, -x-steps), min(steps, -x+steps)+1):\n z = -x - y\n coords.append(anchor+self.coord(x, y, z))\n return coords", "def alpha_range(x0, x1, x_min, x_max):\n if x0 == x1:\n raise ValueError('x1 and x2 should be different, get {} and {}'.format(x0, x1))\n alpha_x1 = (x_min - x0) / (x1 - x0)\n alpha_x2 = (x_max - x0) / (x1 - x0)\n alpha_min = max(0, min(alpha_x1, alpha_x2))\n alpha_max = min(1, max(alpha_x1, alpha_x2))\n return alpha_min, alpha_max", "def nll(x, a, b):\n x = _validate_x_bounds(x, low=0, high=1, strict_low=True, strict_high=True)\n return -mp.fsum([logpdf(t, a, b) for t in x])" ]
[ "0.58776325", "0.5662537", "0.5642036", "0.5495832", "0.5469611", "0.5420076", "0.5375376", "0.53669316", "0.53453207", "0.53439814", "0.53289247", "0.5299425", "0.5295588", "0.52585924", "0.52468276", "0.5246785", "0.5245637", "0.5245567", "0.52296543", "0.5221779", "0.5213703", "0.5212454", "0.5208512", "0.52083296", "0.5201708", "0.5194486", "0.51711047", "0.5168863", "0.5165443", "0.5120755" ]
0.58207065
1
Takes list of divided differences nodes and calculates new divided differences node from each pair of nodes_to_compute. In other words, it computes next level of so called Newton's second interpolation form tree.
def calculate_divided_differences_row(nodes_to_compute): divided_differences = [] if len(nodes_to_compute) == 1: return None for i in range(0, len(nodes_to_compute) - 1): child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1]) child.calculate_value() divided_differences.append(child) for node in divided_differences: print(node, end='') print('\n') return divided_differences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_divided_differences(nodes):\n nodes_to_compute = []\n divided_differences = []\n for node in nodes:\n nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1]))\n\n divided_differences.append(tuple(nodes_to_compute))\n\n while len(nodes_to_compute) > 1:\n next_node_row = calculate_divided_differences_row(nodes_to_compute)\n divided_differences.append(tuple(next_node_row))\n nodes_to_compute = next_node_row\n\n return divided_differences", "def calculate_newton_interpolation(divided_differences):\n polynomial = []\n for i, divided_differences_row in enumerate(divided_differences):\n polynomial_part = '({0})'.format(divided_differences_row[0].divided_difference)\n\n for j in range(0, i):\n polynomial_part += '*(x-{0})'.format(divided_differences[0][j].x)\n\n polynomial_part += '+'\n polynomial.append(polynomial_part)\n polynomial_str = ''.join(polynomial)[:-1]\n\n print('Calculated polynomial: {0}'.format(polynomial_str))\n # Heuristic simplification of calculated polynomial\n simplified_polynomial = sy.simplify(polynomial_str)\n print(\"Simplified polynomial: {0}\".format(simplified_polynomial))\n return simplified_polynomial", "def compute_relations(nodes: List[Node]) -> None:\n # Calculate parents\n for node in nodes:\n node.parents = []\n for node in nodes:\n for child in node.children():\n child.parents.append(node)\n\n def compute_dominators(\n entry: Node,\n parents: Callable[[Node], List[Node]],\n dominators: Callable[[Node], Set[Node]],\n immediately_dominates: Callable[[Node], List[Node]],\n set_immediate_dominator: Callable[[Node, Optional[Node]], None],\n ) -> None:\n # See https://en.wikipedia.org/wiki/Dominator_(graph_theory)#Algorithms\n # Note: if `n` is unreachable from `entry`, then *every* node will\n # vacuously belong to `n`'s dominator set.\n for n in nodes:\n dominators(n).clear()\n if n == entry:\n dominators(n).add(n)\n else:\n dominators(n).update(nodes)\n\n changes = True\n while changes:\n changes = False\n for node in nodes:\n if node == entry:\n continue\n nset = dominators(node)\n for parent in parents(node):\n nset = nset.intersection(dominators(parent))\n nset.add(node)\n if len(nset) < len(dominators(node)):\n assert nset.issubset(dominators(node))\n dominators(node).intersection_update(nset)\n changes = True\n\n # Compute immediate dominator, and the inverse relation\n for node in nodes:\n immediately_dominates(node).clear()\n for node in nodes:\n doms = dominators(node).difference({node})\n # If `node == entry` or the flow graph is not reducible, `doms` may be empty.\n # TODO: Infinite loops could be made reducible by introducing\n # branches like `if (false) { return; }` without breaking semantics\n if doms:\n # There should be a unique max `len(dominators(d))` if the flowgraph\n # is reducible. Fall back to largest index for irreducible graphs.\n imdom = max(doms, key=lambda d: (len(dominators(d)), d.block.index))\n immediately_dominates(imdom).append(node)\n set_immediate_dominator(node, imdom)\n else:\n set_immediate_dominator(node, None)\n for node in nodes:\n immediately_dominates(node).sort(key=lambda x: x.block.index)\n\n def _set_immediate_dominator(node: Node, imdom: Optional[Node]) -> None:\n node.immediate_dominator = imdom\n\n def _set_immediate_postdominator(node: Node, impdom: Optional[Node]) -> None:\n node.immediate_postdominator = impdom\n\n entry = nodes[0]\n terminal = nodes[-1]\n assert isinstance(terminal, TerminalNode)\n\n # Compute dominators & immediate dominators\n compute_dominators(\n entry=entry,\n parents=lambda n: n.parents,\n dominators=lambda n: n.dominators,\n immediately_dominates=lambda n: n.immediately_dominates,\n set_immediate_dominator=_set_immediate_dominator,\n )\n\n # Compute postdominators & immediate postdominators\n # This uses the same algorithm as above, but with edges reversed\n compute_dominators(\n entry=terminal,\n parents=lambda n: n.children(),\n dominators=lambda n: n.postdominators,\n immediately_dominates=lambda n: n.immediately_postdominates,\n set_immediate_dominator=_set_immediate_postdominator,\n )\n\n # Iterate over all edges n -> c and check for backedges, which define natural loops\n for node in nodes:\n for child in node.children():\n if child not in node.dominators:\n continue\n # Found a backedge node -> child where child dominates node; child is the \"head\" of the loop\n if child.loop is None:\n child.loop = NaturalLoop(child)\n child.loop.nodes |= {child, node}\n child.loop.backedges.add(node)\n for parent in nodes:\n if reachable_without(parent, node, child):\n child.loop.nodes.add(parent)", "def compute_tree(self, tree):\n g_list_val, g_list_h = self._build_graph(tree) # return theano variable of each node\n list_val = self._traversal_tree(tree) #\n f = theano.function(g_list_val, g_list_h, allow_input_downcast=True)\n result = f(*list_val)\n return result", "def gradients(output_node, node_list):\r\n\r\n # a map from node to a list of gradient contributions from each output node\r\n node_to_output_grads_list = {}\r\n # Special note on initializing gradient of output_node as oneslike_op(output_node):\r\n # We are really taking a derivative of the scalar reduce_sum(output_node)\r\n # instead of the vector output_node. But this is the common case for loss function.\r\n node_to_output_grads_list[output_node] = [oneslike_op(output_node)]\r\n # a map from node to the gradient of that node\r\n node_to_output_grad = {}\r\n # Traverse graph in reverse topological order given the output_node that we are taking gradient wrt.\r\n reverse_topo_order = list(reversed(find_topo_sort([output_node])))\r\n #node_to_output_grad[output_node] = oneslike_op(output_node)\r\n \r\n \r\n \"\"\"TODO: Your code here\"\"\"\r\n \r\n for node in reverse_topo_order:\r\n #print(node)\r\n #print(node_to_output_grad)\r\n if not(node in node_to_output_grad):\r\n #node_to_output_grad[node] = node.op.gradient(node, sum_node_list ([node_to_output_grad[node1] for node1 in node_to_output_grads_list[node] ]))\r\n sum_node = sum_node_list (node_to_output_grads_list[node]) \r\n grad = node.op.gradient(node, sum_node)\r\n node_to_output_grad[node] = sum_node\r\n #print(grad)\r\n #print(len(node.inputs))\r\n for i in range(len(node.inputs)):\r\n #print(i)\r\n if (not(node.inputs[i] in node_to_output_grads_list)):\r\n node_to_output_grads_list[node.inputs[i]]=[]\r\n node_to_output_grads_list[node.inputs[i]].append(grad[i])\r\n \r\n #input_grad = \r\n \r\n \r\n '''for node1 in node_to_output_grads_list[node]:\r\n print(node1)\r\n if (node in node_to_output_grad):\r\n node_to_output_grad[node] = node_to_output_grad[node] + node_to_output_grad[node1]\r\n else:\r\n node_to_output_grad[node] = node_to_output_grad[node1]\r\n '''\r\n #print(\"node to output \")\r\n #print(node_to_output_grad)\r\n\r\n del reverse_topo_order\r\n # Collect results for gradients requested.\r\n grad_node_list = [node_to_output_grad[node] for node in node_list]\r\n return grad_node_list", "def insert_nodes(self):\n neighbour_max_distance = 5\n new_nodes = []\n for node in self.nodes:\n left_distance = node.get_distance(node.neighbour1)\n right_distance = node.get_distance(node.neighbour2)\n if left_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour1.x - node.x) / 2,\n node.y + (node.neighbour1.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour1.connect(node.neighbour1.neighbour1, new_node)\n new_node.connect(node.neighbour1, node)\n node.connect(new_node, node.neighbour2)\n new_nodes.append(new_node)\n new_nodes.append(node)\n\n if right_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour2.x - node.x) / 2,\n node.y + (node.neighbour2.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour2.connect(new_node, node.neighbour2.neighbour2)\n new_node.connect(node, node.neighbour2)\n node.connect(node.neighbour1, new_node)\n new_nodes.append(new_node)\n\n return new_nodes", "def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)", "def expand_tree(self, N=1):\n # type: (int) -> None\n assert self._initialized, 'Search not initialized.'\n for _ in range(N): \n x_rand = self.sample_free()\n x_nearest = self.nearest(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n if self.coll_free(x_nearest, x_new):\n self.index+=1\n X_near = [x for x in self.near(x_new) if self.coll_free(x, x_new)]\n cost_min = self.costs[self.research_index(self.nodes,x_nearest)][1] + self.dist(x_nearest, x_new)\n x_min = x_nearest\n for x in X_near:\n cost = self.costs[self.research_index(self.nodes,x)][1] + self.dist(x, x_new)\n if cost < cost_min:\n cost_min = cost\n x_min = x\n \n self.nodes.append(x_new)\n j=self.research_index(self.nodes,x_min)\n self.parents[self.index,j]=1\n self.costs[self.index] = (x_new,self.costs[j][1] + self.dist(x_min, x_new))\n for x in X_near:\n k=self.research_index(self.nodes,x)\n if self.costs[self.index][1] + self.dist(x_new, x) < self.costs[k][1]:\n self.parents[self.index]=np.zeros(self.N)\n self.parents[self.index,k] = 1\n self.costs[k] = (self.costs[k][0],self.costs[self.index][1] + self.dist(x_new, x))", "def all_nodes_dfs(log_T, initial_state, min_score, sub_info, max_depth=1000000000000000000, maxtraversals=1000000000000000000):\n # default argument for sub_info: empty_sub_info = (np.array([], dtype=int), np.array([], dtype=int), 1000000000000000000)\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n order = np.zeros(log_T.shape, np.int64)\n for i in xrange(order.shape[1]):\n order[i] = (-log_T[i]).argsort()\n n_states = log_T.shape[0]\n node = [order[initial_state, 0]] # most likely first node\n node_idx = [0]\n lengths_dfs = [-1.0]\n nodes_dfs = [[-1, ]]\n for it in xrange(maxtraversals):\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n lengths_dfs.append(-score)\n nodes_dfs.append(list(node))\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n break # end of the generator, can't increase even the root\n else:\n assert False, \"Number of traversals exceeded\"\n\n return lengths_dfs[1:], nodes_dfs[1:]", "def find_sharpest_fork_general(Nodes):\n pair_list = []\n Dis = np.array([])\n for n in Nodes:\n if n.parent is not None:\n if n.parent.parent is not None:\n a = n.parent.children\n if(isinstance(a, list)):\n if(len(a)==2):\n n1 = a[0]\n n2 = a[1]\n pair_list.append([n1 , n2])\n dis = LA.norm(a[0].xyz - a[1].xyz,2)\n Dis = np.append(Dis,dis)\n if(len(Dis)!= 0):\n (b,) = np.where(Dis == Dis.min())\n sharpest_pair = pair_list[b[0]]\n distance = Dis.min()\n else:\n sharpest_pair = [0,0]\n distance = 0.\n return sharpest_pair, distance", "def get_locations(nodes, tl, br):\n \n # Base cases:\n if len(nodes) == 1: # for singleton, only choice is to place in the single spot in 1x1 square\n return {nodes[0]: tl}\n if len(nodes) == 2: # for two nodes, arbitrarily chose to place the first node in top left\n return {nodes[0]: tl, nodes[1]: br}\n\n # Recursive case, need to create and solve subproblems:\n ret = {}\n\n num_edges = count_num_edges(nodes)\n if num_edges == 0: # for empty graphs, no need to run METIS, just assign arbitrarily\n i = 0\n for x in range(tl.x, br.x+1): \n for y in range(tl.y, br.y+1):\n if i < len(nodes):\n ret.update({nodes[i]: Point(x,y)})\n i += 1\n return ret\n\n filename = splitext(basename(sys.argv[1]))[0] + '.p.' + sys.argv[2] + '.yx.' + sys.argv[3] + '.drop.' + sys.argv[4] + '.' +\\\n '_'.join(['delete', str(tl.x), str(tl.y), str(br.x), str(br.y)]) \n\n # special case for the very first call of get_locations. For example, suppose that there are\n # 97 nodes on a 10x10 grid. Instead of dividing the 97 nodes into 2 equal partitions, we should\n # divide them into a partition of 90 nodes and a partition of 7 nodes. The former should be\n # placed on a 10x9 grid and te latter should be placed on a 1x7 grid.\n if len(nodes) < (br.x - tl.x + 1) * (br.y - tl.y + 1):\n assert tl == Point(0, 0)\n size_tl_nodes = (br.x + 1) * int(len(nodes) / (br.x + 1))\n if size_tl_nodes == len(nodes):\n ret.update(get_locations(nodes, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n return ret\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n # complicated indexing here. As an example, for the 97 into 10x10 case, we want to send 90 nodes\n # to a rectangle spanned by tl=Point(0, 0) and br=Point(9, 8) and we want to send 7 nodes to a \n # rectangle spanned by tl=Point(0, 9) and br=Point(6, 9)\n ret.update(get_locations(nodes_tl, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n ret.update(get_locations(nodes_br, tl=Point(0, len(nodes) / (br.x + 1)), br=Point(len(nodes) % (br.x + 1) - 1, len(nodes) / (br.x + 1))))\n return ret\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n half = tl.x + (br.x - tl.x - 1) / 2\n size_tl_nodes = (half - tl.x + 1) * (br.y - tl.y + 1)\n else: # split on x axis\n half = tl.y + (br.y - tl.y - 1) / 2\n size_tl_nodes = (br.x - tl.x + 1) * (half - tl.y + 1)\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(half, br.y)))\n ret.update(get_locations(nodes_br, tl=Point(half + 1,tl.y), br=br))\n else: # split on x axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(br.x, half)))\n ret.update(get_locations(nodes_br, tl=Point(tl.x, half + 1), br=br))\n\n return ret", "def find_sharpest_fork(nodes):\n pair_list = []\n Dis = np.array([])\n for n in nodes:\n if n.parent is not None:\n if n.parent.parent is not None:\n a = n.parent.children\n if(isinstance(a, list)):\n if(len(a)==2):\n n1 = a[0]\n n2 = a[1]\n if(len(n1.children) == 0 and len(n2.children) == 0):\n pair_list.append([n1 , n2])\n dis = LA.norm(a[0].xyz - a[1].xyz,2)\n Dis = np.append(Dis,dis)\n if(len(Dis)!= 0):\n (b,) = np.where(Dis == Dis.min())\n sharpest_pair = pair_list[b[0]]\n distance = Dis.min()\n else:\n sharpest_pair = [0,0]\n distance = 0.\n return sharpest_pair, distance", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def RegenerateWith2Nodes (failed_node, list_of_nodes, name):\n \n list_of_vects = []\n\n for node_index in list_of_nodes:\n list_of_vects = list_of_vects + (map(lambda x: list(x), list(conf.BASIS_VECTORS [node_index])))\n\n array_A = numpy.array(list_of_vects).transpose()\n arrays_B = []\n\n arrays_B = map(lambda x: list(x), list(conf.BASIS_VECTORS [failed_node]))\n\n obj_list = []\n\n for node_index in list_of_nodes:\n for object_index in range(0,conf.PART_SIZE):\n obj_list.append (dist.pull_object_from_stores (name, node_index, object_index))\n\n for each in arrays_B:\n parts_to_pull = numpy.linalg.solve (array_A, numpy.array(each).transpose())\n pack = []\n \n for i in range(0,len(parts_to_pull)):\n if (parts_to_pull[i] != 0):\n pack.append (copy.deepcopy (obj_list[i]))\n\n dist.push_object_to_store (name, failed_node, reduce (numpy.bitwise_xor, pack), arrays_B.index (each))", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def update_tree(root, executed_acts, total_rew):\n root.value = max(total_rew, root.value)\n root.visits += 1\n new_nodes = 0\n\n node = root\n for step, act in enumerate(executed_acts):\n if act not in node.children:\n node.children[act] = Node()\n new_nodes += 1\n node = node.children[act]\n node.value = max(total_rew, node.value)\n node.visits += 1\n\n return new_nodes", "def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,\n order, step_size, time, tol, unitary, upper):\n initial_guess = tf.reduce_sum(\n tf1.where(\n tf.range(MAX_ORDER + 1) <= order,\n backward_differences[:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),\n axis=0)\n\n np_dtype = np_dtype = dtype_util.as_numpy_dtype(backward_differences.dtype)\n\n rhs_constant_term = newton_coefficient * tf.reduce_sum(\n tf1.where(\n tf.range(1, MAX_ORDER + 1) <= order,\n RECIPROCAL_SUMS[1:, np.newaxis].astype(np_dtype) *\n backward_differences[1:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),\n axis=0)\n\n next_time = time + step_size\n step_size_cast = tf.cast(step_size, backward_differences.dtype)\n real_dtype = tf.abs(backward_differences).dtype\n\n def newton_body(iterand):\n \"\"\"Performs one iteration of Newton's method.\"\"\"\n next_backward_difference = iterand.next_backward_difference\n next_state_vec = iterand.next_state_vec\n\n rhs = newton_coefficient * step_size_cast * ode_fn_vec(\n next_time,\n next_state_vec) - rhs_constant_term - next_backward_difference\n delta = tf.squeeze(\n tf.linalg.triangular_solve(\n upper,\n tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),\n lower=False))\n num_iters = iterand.num_iters + 1\n\n next_backward_difference += delta\n next_state_vec += delta\n\n delta_norm = tf.cast(tf.norm(delta), real_dtype)\n lipschitz_const = delta_norm / iterand.prev_delta_norm\n\n # Stop if method has converged.\n approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm\n close_to_sol = approx_dist_to_sol < tol\n delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))\n converged = close_to_sol | delta_norm_is_zero\n finished = converged\n\n # Stop if any of the following conditions are met:\n # (A) We have hit the maximum number of iterations.\n # (B) The method is converging too slowly.\n # (C) The method is not expected to converge.\n too_slow = lipschitz_const > 1.\n finished = finished | too_slow\n if max_num_iters is not None:\n too_many_iters = tf.equal(num_iters, max_num_iters)\n num_iters_left = max_num_iters - num_iters\n num_iters_left_cast = tf.cast(num_iters_left, real_dtype)\n wont_converge = (\n approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)\n finished = finished | too_many_iters | wont_converge\n\n return [\n _NewtonIterand(\n converged=converged,\n finished=finished,\n next_backward_difference=next_backward_difference,\n next_state_vec=next_state_vec,\n num_iters=num_iters,\n prev_delta_norm=delta_norm)\n ]\n\n iterand = _NewtonIterand(\n converged=False,\n finished=False,\n next_backward_difference=tf.zeros_like(initial_guess),\n next_state_vec=tf.identity(initial_guess),\n num_iters=0,\n prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))\n [iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),\n newton_body, [iterand])\n return (iterand.converged, iterand.next_backward_difference,\n iterand.next_state_vec, iterand.num_iters)", "def recursion_loop(pulls, discount, grid_n):\n\n r_grid = np.linspace(0, 1, grid_n)\n gittins, values = initial_approximation(pulls, discount, grid_n)\n n = pulls - 2 # Note that the 2 comes from (1) the initial approximation and (2) python indexing\n while n >= 1:\n g, v = recursion_step(values[:n + 1, n, :], r_grid, discount)\n values[:n, n - 1] = v\n gittins[:n, n - 1] = g\n n -= 1\n return gittins, values", "def update_nodes(self, weights=None, hive_instance=None):\n hive = hive_instance or shared_hive_instance()\n metadata = None\n account = None\n cnt = 0\n while metadata is None and cnt < 5:\n cnt += 1\n try:\n account = Account(\"fullnodeupdate\", hive_instance=hive)\n metadata = json.loads(account[\"json_metadata\"])\n except:\n hive.rpc.next()\n account = None\n metadata = None\n if metadata is None:\n return\n report = metadata[\"report\"]\n failing_nodes = metadata[\"failing_nodes\"]\n parameter = metadata[\"parameter\"]\n benchmarks = parameter[\"benchmarks\"]\n if weights is None:\n weights_dict = {}\n for benchmark in benchmarks:\n weights_dict[benchmark] = (1. / len(benchmarks))\n elif isinstance(weights, list):\n weights_dict = {}\n i = 0\n weight_sum = 0\n for w in weights:\n weight_sum += w\n for benchmark in benchmarks:\n if i < len(weights):\n weights_dict[benchmark] = weights[i] / weight_sum\n else:\n weights_dict[benchmark] = 0.\n i += 1\n elif isinstance(weights, dict):\n weights_dict = {}\n i = 0\n weight_sum = 0\n for w in weights:\n weight_sum += weights[w]\n for benchmark in benchmarks:\n if benchmark in weights:\n weights_dict[benchmark] = weights[benchmark] / weight_sum\n else:\n weights_dict[benchmark] = 0.\n\n max_score = len(report) + 1\n new_nodes = []\n for node in self:\n new_node = node.copy()\n for report_node in report:\n if node[\"url\"] == report_node[\"node\"]:\n new_node[\"version\"] = report_node[\"version\"]\n scores = []\n for benchmark in benchmarks:\n result = report_node[benchmark]\n rank = result[\"rank\"]\n if not result[\"ok\"]:\n rank = max_score + 1\n score = (max_score - rank) / (max_score - 1) * 100\n weighted_score = score * weights_dict[benchmark]\n scores.append(weighted_score)\n sum_score = 0\n for score in scores:\n sum_score += score\n new_node[\"score\"] = sum_score\n for node_failing in failing_nodes:\n if node[\"url\"] == node_failing:\n new_node[\"score\"] = -1\n new_nodes.append(new_node)\n super(NodeList, self).__init__(new_nodes)", "def solve(self):\n self.left -= len(self.nodes)\n \n def depths(x,depth = 0):\n depth+=1\n for y in self.graph[x]:\n if y in self.nodes:\n self.nodes.remove(y)\n depth = depths(y,depth)\n return depth\n \n while len(self.nodes):\n x = self.nodes.pop()\n self.firstGen.append(depths(x))\n #print self.graph\n #print self.nodes\n #print self.firstGen", "def generate_children_nodes(\n curr_node, list_of_processed_nodes,\n running_count_of_children_dups, a_star_search=False\n):\n children_nodes_to_return = []\n direction_coordinates_map = curr_node.state.get_legal_snake_movement_coords()\n \n for direction, legal_coords in direction_coordinates_map.iteritems():\n curr_state_copy = copy.deepcopy(curr_node.state)\n new_state = State(\n dim=curr_state_copy.dim, num_obstacles=curr_state_copy.num_obstacles,\n grid=curr_state_copy.grid, snake=curr_state_copy.snake\n )\n #pdb.set_trace()#\n new_state.update_state_after_movement(new_head_coords=legal_coords)\n\n new_node_state = new_state\n new_node_action = direction\n new_node_parent_index = curr_node.index\n new_node_depth = curr_node.depth + 1\n\n new_node = Node(\n state=new_node_state, action=new_node_action,\n parent_index=new_node_parent_index, depth=new_node_depth\n )\n\n if not a_star_search:\n if new_node in list_of_processed_nodes:\n running_count_of_children_dups += 1\n continue\n\n children_nodes_to_return.append(new_node)\n\n return children_nodes_to_return, running_count_of_children_dups", "def find_nodes(input_line: str) -> List[Node]:\n li = [int(elem) for elem in input_line.split(\" \")]\n assert len(li) >= 2\n \n # store Nodes in two sets, depending is their processing ready or not\n unfinished = set()\n finished = set()\n \n \n i = 0 # points to the index where to read the input list\n parent = None\n \n # add root node\n global root # global so we can directly grab its value outside this func\n root = Node(num_childs = li[i], num_metadata = li[i+1], children = None, parent = parent)\n print(\"Added root node:\", root)\n \n # Logic for handling the root node\n if root.num_childs > 0:\n unfinished.add(root) # assumes more to come...\n i += 2 # continue from child's first element\n else: # root node does not have children\n finished.add(root)\n i += 2 + num_metadata\n \n parent = root\n \n \n all_done = False # set to True when all nodes has been processed (to break out of the loop)\n \n # now we have a root ready\n while i < len(li):\n #print(i)\n \n while parent.num_child_processed >= parent.num_childs:\n # backtrack a step towards root node!\n # store metadata elements\n parent.metadata = li[i: i+parent.num_metadata]\n \n # calculate node value\n parent.value = sum(parent.children[idx - 1].value for idx in parent.metadata if idx > 0 and idx <= parent.num_childs)\n \n finished.add(parent)\n unfinished.remove(parent)\n i += parent.num_metadata\n \n if parent.parent:\n parent = parent.parent\n else: # was root\n print(\"Backtracking out from root, hence all done\")\n all_done = True\n break\n \n if all_done:\n break\n \n curr_num_childs, curr_num_metadata = li[i], li[i+1]\n \n # create a new node\n curr_node = Node(num_childs = curr_num_childs, num_metadata = curr_num_metadata, children = None, parent = parent)\n #print(\"Found new node:\", curr_num_childs, curr_num_metadata, \"\\t\\tparent:\", parent)\n parent.children.append(curr_node)\n parent.num_child_processed += 1\n \n if curr_num_childs > 0: # current node has children\n unfinished.add(curr_node)\n i = i + 2 # continue with the child\n parent = curr_node # which has current node as its parent\n else: # current node is a leaf node\n curr_node.metadata = li[i+2: i+2+curr_num_metadata]\n # calculate node value\n curr_node.value = sum(curr_node.metadata)\n \n finished.add(curr_node)\n i = i + 2 + curr_num_metadata\n \n return finished", "def getNextNodeUsingCellDiff(kGoalState):\n \n global fringe\n global solutions\n\n \n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getHValueForNode(pnode,kGoalState)\n #print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value = getHValueForNode(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getHValueForNode(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def _compute_nodes_1d(npts, ilbds1d): # pylint: disable=line-too-long\n if npts % 2 == 0:\n raise ValueError(\"Please enter odd npts\")\n ind = np.arange(1, npts + 1)\n nodes = 0.5 * (1 - np.cos(np.pi * ind / (npts + 1)))\n return nodes * (ilbds1d[1] - ilbds1d[0]) + ilbds1d[0]", "def calculate(self, extraParams=None):\n\n if not self.isCalc or self.nodeClass == \"button\" or self.nodeClass == \"formnode\":\n nodeIsCircular = self.isCircular()\n if not self._bypassCircularEvaluator and nodeIsCircular:\n circularNodes = self.getSortedCyclicDependencies()\n\n if self.dynamicEvaluator is None:\n self.dynamicEvaluator = FactoryDynamic.createInstance(\n circularNodes, self)\n\n params = self.dynamicEvaluator.generateCircularParameters(\n self, circularNodes)\n\n if params['dynamicIndex'] is None:\n raise ValueError(\"Cyclic dependency detected between nodes: \" + \",\".join(\n circularNodes) + \". Please use the 'pp.dynamic' function\")\n elif 'indexDic' in params and len(params['indexDic']) > 1:\n raise ValueError(\n f'Multiple indices were found using dynamic. Indexes: {\",\".join(params[\"indexDic\"].keys())}. Nodes involved: {\",\".join(circularNodes)}')\n\n self.dynamicEvaluator.circularEval(self, params)\n else:\n from_circular_evaluator = self._bypassCircularEvaluator\n\n self.sendStartCalcNode(from_circular_evaluator)\n self.model.currentProcessingNode(self.identifier)\n self._bypassCircularEvaluator = False\n\n startTime = dt.datetime.now()\n finalDef = str(self._definition)\n self.lastLazyTime = 0\n\n # CLEAR circular dependency\n if nodeIsCircular:\n finalDef = BaseDynamic.clearAllCircularDependency(finalDef)\n tmpCode = self.compileDef(finalDef)\n\n # check for replace calls to varaibles with next rules:\n # node1 --> change to getNode('node1').result\n # node1.result --> change to getNode('node1').result\n # node1.title --> change to getNode('node1').title\n # \"node1\" --> no change\n # 'node1' --> no change\n if not tmpCode is None:\n names = self.parseNames(tmpCode)\n rx = r\"('[^'\\\\]*(?:\\\\.[^'\\\\]*)*'|\\\"[^\\\"\\\\]*(?:\\\\.[^\\\"\\\\]*)*\\\")|\\b{0}\\b\"\n for node in names:\n if self._model.existNode(self._model.clearId(node)):\n finalDef = re.sub(rx.format(node), lambda m:\n (\n m.group(1)\n if m.group(1)\n else\n (\n \"getNode('\"+node+\"')\"\n if (m.endpos > m.regs[0][1]+5) and ((m.string[m.regs[0][1]:m.regs[0][1]+5] == '.node') or (m.string[m.regs[0][1]:m.regs[0][1]+8] == '.timeit('))\n else\n (node\n if (m.string[m.regs[0][0]-1:m.regs[0][0]+len(node)] == ('.'+node)) or (m.string[m.regs[0][0]-7:m.regs[0][0]] == 'import ') or (m.string[m.regs[0][0]-5:m.regs[0][0]] == 'from ')\n else \"getCalcNode('\"+node+\"')\"\n )\n )\n ), finalDef, 0, re.IGNORECASE)\n elif node == \"self\":\n finalDef = re.sub(rx.format(node), lambda m:\n (\n m.group(1)\n if m.group(1)\n else\n \"getNode('\" + self.identifier + \"')\"\n if (m.endpos > m.regs[0][1]+11) and (m.string[m.regs[0][1]:m.regs[0][1]+11] != '._tryFilter')\n else \"self\"\n ), finalDef, 0, re.IGNORECASE)\n\n localRes = {\n \"getNode\": self._model.getNode,\n \"getCalcNode\": self._getCalcNode,\n \"cp\": Helpers(self)\n }\n if not extraParams is None:\n for keyParam in extraParams:\n localRes[keyParam] = extraParams[keyParam]\n\n customImports = self.model.getCustomImports()\n if customImports:\n for keyParam in customImports:\n localRes[keyParam] = customImports[keyParam]\n\n try:\n # execute node definition in supervised context\n memoryIO = io.StringIO()\n try:\n with redirect_stdout(memoryIO):\n exec(compile(finalDef, '<string>', 'exec'), localRes)\n except Exception as ex:\n if \"_io.StringIO\" in str(ex):\n exec(compile(finalDef, '<string>', 'exec'), localRes)\n else:\n raise ex\n\n self.lastEvaluationConsole = memoryIO.getvalue()\n memoryIO = None\n\n if self.nodeClass not in [\"button\", \"module\", \"text\"]:\n if 'this' in localRes:\n self._result = localRes['this']\n elif 'result' in localRes:\n self._result = localRes['result']\n else:\n self._result = None\n if self.lastEvaluationConsole != \"\":\n self._result = str(self.lastEvaluationConsole)\n else:\n raise ValueError(\n \"The result was not found. Did you forget to include the text 'result =' ?\")\n\n self._isCalc = self.nodeClass != \"button\"\n self.postCalculate()\n\n endTime = dt.datetime.now()\n self.lastEvaluationTime = (\n endTime - startTime).total_seconds() - self.lastLazyTime\n if self.lastEvaluationTime < 0:\n self.lastEvaluationTime = 0\n self.evaluationVersion = self.model.evaluationVersion\n finally:\n localRes[\"cp\"].release()\n localRes = None\n self.sendEndCalcNode(from_circular_evaluator)\n else:\n self._bypassCircularEvaluator = False", "def start_one_step(self):\r\n new_infected_list = []\r\n old_infected_list = copy.deepcopy(self.infected_list)\r\n new_recovered_list = []\r\n old_recovered_list = copy.deepcopy(self.recovered_list)\r\n # For each infected node\r\n for infected_nid in old_infected_list:\r\n infected_node = self.node_dict[infected_nid]\r\n # For each neighbor\r\n for dst_nid in infected_node.get_dst_nid_list(self.graph):\r\n dst_node = self.node_dict[dst_nid]\r\n # Infect susceptible nodes with probability [p]\r\n if dst_node.state is NodeState.SUSCEPTIBLE and random.random() < self.p:\r\n dst_node.infected(self.i)\r\n new_infected_list.append(dst_nid)\r\n\r\n # Minus 1 turn of (remaining) infected days for all infected nodes\r\n infected_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if infected_node.check_finish_infection():\r\n # Infected node get recovered\r\n infected_node.recovered(self.r)\r\n # Remove from infected list\r\n self.infected_list.remove(infected_nid)\r\n # Append to recovered list\r\n new_recovered_list.append(infected_nid)\r\n\r\n # Add newly infected nodes into infected list\r\n self.infected_list += new_infected_list\r\n\r\n # For each recovered node\r\n for recovered_nid in old_recovered_list:\r\n recovered_node = self.node_dict[recovered_nid]\r\n # Minus 1 turn of (remaining) recovered days for all recovered nodes\r\n recovered_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if recovered_node.check_finish_recovery():\r\n # Recovered node get recovered\r\n recovered_node.susceptible()\r\n # Remove from recovered list\r\n self.recovered_list.remove(recovered_nid)\r\n\r\n # Add newly recovered nodes into recovered list\r\n self.recovered_list += new_recovered_list", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def update_nodes(nodes, svg_h):\n for i in range(0, len(nodes)):\n nodes[i,2] = svg_h-nodes[i,2]\n return nodes", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up", "def run_one_step(self, dt):\n if not self._erode_flooded_nodes:\n flood_status = self._grid.at_node[\"flood_status_code\"]\n flooded_nodes = np.nonzero(flood_status == _FLOODED)[0]\n else:\n flooded_nodes = []\n\n upstream_order_IDs = self._grid[\"node\"][\"flow__upstream_node_order\"]\n\n defined_flow_receivers = np.not_equal(\n self._grid[\"node\"][\"flow__link_to_receiver_node\"], self._grid.BAD_INDEX\n )\n\n try:\n length_of_link = self._grid.length_of_d8\n except AttributeError:\n length_of_link = self._grid.length_of_link\n\n flow_link_lengths = length_of_link[\n self._grid.at_node[\"flow__link_to_receiver_node\"][defined_flow_receivers]\n ]\n flow_receivers = self._grid[\"node\"][\"flow__receiver_node\"]\n\n # Operate the main function:\n if self._use_W:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / self._W[defined_flow_receivers]\n / (flow_link_lengths**self._n)\n )\n\n else:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / (flow_link_lengths**self._n)\n )\n\n # Handle flooded nodes, if any (no erosion there)\n if flooded_nodes is not None:\n self._alpha[flooded_nodes] = 0.0\n\n reversed_flow = self._elevs < self._elevs[flow_receivers]\n # this check necessary if flow has been routed across\n # depressions\n self._alpha[reversed_flow] = 0.0\n\n threshdt = self._sp_crit * dt\n\n # solve using Brent's Method in Cython for Speed\n if isinstance(threshdt, float):\n brent_method_erode_fixed_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )\n else:\n brent_method_erode_variable_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )" ]
[ "0.65839136", "0.57315016", "0.567347", "0.55890775", "0.55738044", "0.5463602", "0.54403263", "0.5439998", "0.5405277", "0.53822875", "0.5344752", "0.5334331", "0.5325484", "0.5318365", "0.53122985", "0.53017414", "0.5295547", "0.5275849", "0.52737594", "0.52574426", "0.5230346", "0.52266407", "0.5218263", "0.5185318", "0.51770127", "0.5173949", "0.5160851", "0.5157591", "0.51378775", "0.5133119" ]
0.6325345
1
Calculates divided differences for given interpolation nodes. It is assumed, that at least two interpolation nodes are provided. Each tuple of returned list represents one level of divided differences tree.
def calculate_divided_differences(nodes): nodes_to_compute = [] divided_differences = [] for node in nodes: nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1])) divided_differences.append(tuple(nodes_to_compute)) while len(nodes_to_compute) > 1: next_node_row = calculate_divided_differences_row(nodes_to_compute) divided_differences.append(tuple(next_node_row)) nodes_to_compute = next_node_row return divided_differences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_divided_differences_row(nodes_to_compute):\n divided_differences = []\n\n if len(nodes_to_compute) == 1:\n return None\n\n for i in range(0, len(nodes_to_compute) - 1):\n child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1])\n child.calculate_value()\n divided_differences.append(child)\n\n for node in divided_differences:\n print(node, end='')\n\n print('\\n')\n return divided_differences", "def diff(self):\n return [node.diff for node in self]", "def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)", "def getSubdivisionNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(2*x+1, 2*y, 2*z), (2*x, 2*y, 2*z),\n (2*x+1, 2*y+1, 2*z), (2*x, 2*y, 2*z+1),\n (2*x+1, 2*y+1, 2*z+1), (2*x, 2*y+1, 2*z),\n (2*x+1, 2*y, 2*z+1), (2*x, 2*y+1, 2*z+1)]", "def fractionalPoints(totalNodeList, recNodeList, fracPoints):\n\n avPoint = averagePoints(recNodeList)\n\n for i in range(0, fracPoints):\n closestPoint = closest_node(avPoint, totalNodeList) #Finds closest point\n totalNodeList.remove(closestPoint)\n recNodeList.append(closestPoint)\n\n printProgressBar(i, fracPoints) \n\n return recNodeList", "def equiangular_dimension_unpack(nodes, ratio):\n dim1 = int((nodes / ratio) ** 0.5)\n dim2 = int((nodes * ratio) ** 0.5)\n if dim1 * dim2 != nodes: # Try to correct dim1 or dim2 if ratio is wrong\n if nodes % dim1 == 0:\n dim2 = nodes // dim1\n if nodes % dim2 == 0:\n dim1 = nodes // dim2\n assert dim1 * dim2 == nodes, f'Unable to unpack nodes: {nodes}, ratio: {ratio}'\n return dim1, dim2", "def getFractionalItems(self, startingPoint, returnFmt = 0, refinements = 1):\n\n def closest_node(node, nodes):\n\n \"\"\" returns closest node using dot vectorization, slightly faster see https://codereview.stackexchange.com/questions/28207/finding-the-closest-point-to-a-list-of-points \"\"\"\n\n if node in nodes:\n nodes.remove(node)\n\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum('ij,ij->i', deltas, deltas)\n temp = nodes[np.argmin(dist_2)]\n return (temp[0], temp[1])\n\n def averagePoints(nodeList):\n #Consider switching to numpy mean arrays if performance is an issue\n #inits\n tempX, tempY = 0, 0\n for node in nodeList:\n tempX += node[0]\n tempY += node[1]\n \n avX, avY = tempX/len(nodeList), tempY/len(nodeList)\n avPoint = [avX, avY]\n\n return avPoint\n\n def fractionalPoints(totalNodeList, recNodeList, fracPoints):\n\n \"\"\" Starts out with one point should be in a place of high density #NOTE this is not automated yet. Keep adding points (it will add the closest)\n point to the set over and over until 50% of the points are encircled. Then it will return a list of those points \"\"\"\n\n avPoint = averagePoints(recNodeList)\n\n for i in range(0, fracPoints):\n closestPoint = closest_node(avPoint, totalNodeList) #Finds closest point\n totalNodeList.remove(closestPoint)\n recNodeList.append(closestPoint)\n\n printProgressBar(i, fracPoints) \n\n return recNodeList \n\n #Gets fractional points \n numPointsFrac = math.floor(self.numPoints * self.detectionFraction)\n fracPoints = fractionalPoints(self.points, [startingPoint], numPointsFrac)\n \n #Hull creation and getting of verticies\n hull = ConvexHull(fracPoints)\n polyVertices = [fracPoints[vertex] for vertex in hull.vertices] \n cutVertices = chaikins_corner_cutting(polyVertices, refinements)\n\n #Path creation \n polyCodes = [mppath.Path.LINETO] * len(polyVertices)\n polyCodes[0] = mppath.Path.MOVETO\n polyCodes[-1] = mppath.Path.CLOSEPOLY\n\n cutCodes = [mppath.Path.LINETO] * len(cutVertices)\n cutCodes[0] = mppath.Path.MOVETO\n cutCodes[-1] = mppath.Path.CLOSEPOLY\n\n polyPath = mppath.Path(polyVertices, polyCodes)\n cutPath = mppath.Path(cutVertices, cutCodes)\n\n #How you want the information returned \n if returnFmt == -2:\n return [[cutVertices, cutPath], fracPoints]\n if returnFmt == -1:\n return fracPoints\n if returnFmt == 0:\n return [cutVertices, cutPath]\n if returnFmt == 1:\n return [polyVertices, polyPath]\n if returnFmt == 2:\n return [[cutVertices, cutPath], [polyVertices, polyPath]]", "def _computeDerivative(self,angles, distances):\n slope=[]\n slope.append(0)\n for i in xrange(1,len(angles)):\n der = (distances[i]-distances[i-1])/(angles[i]-angles[i-1])\n slope.append(der)\n #slope.append(0)\n return slope", "def diff_frac(data_1, data_2):\n\n frac_1 = np.sum(data_1) / len(data_1)\n frac_2 = np.sum(data_2) / len(data_2)\n\n return frac_1 - frac_2", "def _compute_diff(self, begin, end):\n d = self.diff\n x = self.x\n for i in range(begin, end):\n for j in range(i):\n d[i].append((d[i][j] - d[i-1][j]) / (x[i] - x[i-j-1]))", "def rat2frac_list(x, y):\n\tcont = rat2cont_quot(x, y)\n\tfrac = []\n\tfor i in range(len(cont)):\n\t\tfrac.append(cont2frac(cont[:(i+1)]))\n\treturn frac", "def differences(data: list) -> list:\n differences = []\n iterable, copy = tee(data)\n next(copy) # adjusts copy of my iterable up 1 element\n for x, y in zip(iterable, copy):\n differences.append(abs(x - y))\n\n return differences", "def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)", "def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation", "def diff(self, value):\n if hasattr(value, \"__len__\"):\n if len(value) == len(self):\n for node, val in zip(self, value):\n node.diff = val\n return\n else:\n raise RxDException(\n \"diff must either be a scalar or an iterable of the same length as the NodeList\"\n )\n for node in self:\n node.diff = value", "def deltas(L):\n return map(sub, tuple(L)[1:], L)", "def __diff_internal(self):\n assert self.p > 0, \"order of Bspline must be > 0\" # we already handle the other case in diff()\n\n # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html\n #\n t = self.knot_vector\n p = self.p\n bi = BsplineBasis(t[:-1], p - 1)\n bip1 = BsplineBasis(t[1:], p - 1)\n\n numer1 = +p\n numer2 = -p\n denom1 = t[p:-1] - t[:-(p + 1)]\n denom2 = t[(p + 1):] - t[1:-p]\n\n with np.errstate(divide='ignore', invalid='ignore'):\n ci = np.where(denom1 != 0., (numer1 / denom1), 0.)\n cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.)\n\n return (ci, bi), (cip1, bip1)", "def find_fractions():\n num_list = []\n den_list = []\n for n in range(10, 100):\n for d in range(10, 100):\n if d > n:\n x = n / d\n ln = list(str(n))\n ld = list(str(d))\n if (ln[0] == ld[1]) and (ln[0] != '0'):\n if ld[0] != '0':\n if (int(ln[1]) / int(ld[0])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n elif (ln[1] == ld[0]) and (ln[1] != '0'):\n if ld[1] != '0':\n if (int(ln[0]) / int(ld[1])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n else:\n continue\n return num_list, den_list", "def diff(self):\n\t\tif len(self.v) < 4:\n\t\t\treturn None\n\t\tif self.poli == None:\n\t\t\tself.generar_polinomio()\n\t\tif x != None:\n\t\t\treturn diff(self.poli)(x)\n\t\treturn diff(self.poli)", "def _extract_fraction_with_text_nl(tokens, short_scale, ordinals):\n for c in _FRACTION_MARKER_NL:\n partitions = partition_list(tokens, lambda t: t.word == c)\n\n if len(partitions) == 3:\n numbers1 = \\\n _extract_numbers_with_text_nl(partitions[0], short_scale,\n ordinals, fractional_numbers=False)\n numbers2 = \\\n _extract_numbers_with_text_nl(partitions[2], short_scale,\n ordinals, fractional_numbers=True)\n\n if not numbers1 or not numbers2:\n return None, None\n\n # ensure first is not a fraction and second is a fraction\n num1 = numbers1[-1]\n num2 = numbers2[0]\n if num1.value >= 1 and 0 < num2.value < 1:\n return num1.value + num2.value, \\\n num1.tokens + partitions[1] + num2.tokens\n\n return None, None", "def calculate_deltas(tvals, tp_confidences, fp_confidences, num_samples):\n deltas = []\n tp_percentages = []\n fp_percentages = []\n for tval in tvals:\n dval, tp_pct, fp_pct = delta(\n tval,\n tp_confidences,\n fp_confidences,\n num_samples\n )\n deltas.append(dval)\n tp_percentages.append(tp_pct)\n fp_percentages.append(fp_pct)\n return deltas, tp_percentages, fp_percentages", "def divide_microstructure_unit(self,point1,point2,dimensions):\n\t\tnew_sections = []\n\t\tif len(point1) < 4:\n\t\t\tdone = 0.0\n\t\t\tdtwo = 0.0\n\t\telse:\n\t\t\tdone = point1[-1]\n\t\t\tdtwo = point2[-1]\n\t\t\n\t\tp1 = np.array(point1[:3])\n\t\tp2 = np.array(point2[:3])\n\t\tvec = p2-p1\n\t\tdimslength = float(np.sum(dimensions))\n\t\tfor d,dim in enumerate(dimensions[:-1]):\n\t\t\tnearsideproportion = np.sum(dimensions[:d])/dimslength\n\t\t\tfarsideproportion = np.sum(dimensions[:d+1])/dimslength\n\t\t\tnew_sections.append([\t\n\t\t\t\t\t\tlist(np.append(p1+vec*nearsideproportion,done)),\n\t\t\t\t\t\tlist(np.append(((p1+vec*nearsideproportion)+(p1+vec*farsideproportion))/2.0,(done+dtwo)/2.0)),\n\t\t\t\t\t\tlist(np.append(p1+vec*farsideproportion,dtwo))\n\t\t\t\t\t\t])\n\t\t\n\t\tnew_sections.append([\t\n\t\t\t\t\tlist(new_sections[-1][-1]),\n\t\t\t\t\tlist((np.array(new_sections[-1][-1])+np.array(list(point2[:3])+[dtwo]))/2.0),\n\t\t\t\t\tlist(point2[:3])+[dtwo]\n\t\t\t\t\t])\n\t\t\n\t\tif len(dimensions) > 2:\n\t\t\treturn(new_sections,['node','paranode1','paranode2','internode','paranode2','paranode1'][:len(new_sections)])\n\t\t\n\t\telse:\n\t\t\treturn(new_sections,['interbouton','bouton'][:len(new_sections)])", "def find_sharpest_fork_general(Nodes):\n pair_list = []\n Dis = np.array([])\n for n in Nodes:\n if n.parent is not None:\n if n.parent.parent is not None:\n a = n.parent.children\n if(isinstance(a, list)):\n if(len(a)==2):\n n1 = a[0]\n n2 = a[1]\n pair_list.append([n1 , n2])\n dis = LA.norm(a[0].xyz - a[1].xyz,2)\n Dis = np.append(Dis,dis)\n if(len(Dis)!= 0):\n (b,) = np.where(Dis == Dis.min())\n sharpest_pair = pair_list[b[0]]\n distance = Dis.min()\n else:\n sharpest_pair = [0,0]\n distance = 0.\n return sharpest_pair, distance", "def divideWork(self,ratios):\r\n \r\n taskCount = 0\r\n\r\n #Sums up the ratio amounts\r\n for nodeId,ratio in ratios.iteritems():\r\n taskCount += ratio\r\n\r\n #Divide the work into 'taskCount' chunks\r\n [primer,prefix] = WorkRange(self.charset).divideMore(self.primer,self.prefix,taskCount)\r\n \r\n prim = {}\r\n pref = {}\r\n \r\n for nodeId,ratio in ratios.iteritems():\r\n prim[nodeId] = []\r\n pref[nodeId] = []\r\n\r\n #Assigns each node 'ratio' amount chunks\r\n for i in range(ratio):\r\n prim[nodeId].append(primer.pop())\r\n pref[nodeId].append(prefix.pop())\r\n\r\n\r\n return [prim,pref]", "def derivative(requestContext, seriesList):\n results = []\n for series in seriesList:\n newValues = []\n prev = None\n for val in series:\n if None in (prev,val):\n newValues.append(None)\n prev = val\n continue\n newValues.append(val - prev)\n prev = val\n newName = \"derivative(%s)\" % series.name\n newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)\n newSeries.pathExpression = newName\n results.append(newSeries)\n return results", "def diff_op(self, args: List[float], time: float) -> List[float]:\n v, nK, ca = args\n ca_args: List[float] = [v, ca]\n dvdt: float = self.dvdt(args=args)\n dnKdt: float = self.kvhh.dndt(v=v, n=nK)\n dCadt: float = self.dCadt(args=ca_args)\n return [dvdt, dnKdt, dCadt]", "def split_distances(self, remaining: List[int], distances: List[float], mid: int) -> List[List[int]]:\n closer, farther = [], []\n for index in remaining:\n if distances[index] <= mid:\n closer.append(index)\n else:\n farther.append(index)\n return [closer, farther]", "def calc_diffs(self, y, x, locs):\n res = {}\n \n for item, value in locs.iteritems():\n res[item] = self.slab_ratio * (self.grid[y, x] - self.grid[value['y'], value['x']])\n \n return res", "def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")", "def compareNodes(x, y):\n return x.pathValue - y.pathValue" ]
[ "0.7081299", "0.526944", "0.52446645", "0.5239564", "0.523342", "0.51974493", "0.51963425", "0.5140926", "0.50798607", "0.5076724", "0.50607145", "0.5054353", "0.5047319", "0.49978474", "0.49799216", "0.49528977", "0.49478018", "0.49372533", "0.49131808", "0.490018", "0.48992065", "0.4881139", "0.483447", "0.48213744", "0.48211652", "0.48179495", "0.4774421", "0.4773157", "0.47703457", "0.47701833" ]
0.77134573
0
Creates polynomial from given list of divided differences. Polynomial string is created according to equation provided in project docs.
def calculate_newton_interpolation(divided_differences): polynomial = [] for i, divided_differences_row in enumerate(divided_differences): polynomial_part = '({0})'.format(divided_differences_row[0].divided_difference) for j in range(0, i): polynomial_part += '*(x-{0})'.format(divided_differences[0][j].x) polynomial_part += '+' polynomial.append(polynomial_part) polynomial_str = ''.join(polynomial)[:-1] print('Calculated polynomial: {0}'.format(polynomial_str)) # Heuristic simplification of calculated polynomial simplified_polynomial = sy.simplify(polynomial_str) print("Simplified polynomial: {0}".format(simplified_polynomial)) return simplified_polynomial
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_to_poly(polynomial_list):\n max_degree = len(polynomial_list) - 1\n strings = []\n opts = ['x', '']\n for index, num in enumerate(polynomial_list):\n if num == 0:\n continue\n if index < max_degree - 1:\n string = '{}x^{}'.format(num, max_degree - index)\n strings.append(string)\n else:\n strings.append(str(num) + opts[index - (max_degree - 1)])\n polynomial = ' + '.join(strings).replace('+ -', '- ')\n return polynomial", "def polynomial_equation(funct):\n coeff = str(differentiation.parse_coefficient(funct))\n if \"^\" not in funct:\n divisor = \"1\"\n else:\n divisor_location = str(funct.index(\"^\") + 1)\n divisor = funct[divisor_location:]\n if divisor == \"-1\":\n pass\n else:\n divisor = str(int(divisor) + 1)\n coeff += \"/\" + divisor\n return coeff + \"x^\" + str(divisor)", "def compute_deriv(poly):\n derivative_of_poly = []\n for i in range(1, len(poly)):\n power = i\n coeff = poly[i]\n y = float(coeff * power)\n first = derivative_of_poly.append(y)\n return derivative_of_poly", "def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p", "def poly_derivative(poly):\n if not poly or type(poly) is not list:\n return None\n\n response = []\n\n for order in range(1, len(poly)):\n response.append(order * poly[order])\n\n if not response:\n response.append(0)\n\n return response", "def polynomial_creator(*coefficients):\n def polynomial(x):\n res = 0\n for index, coeff in enumerate(coefficients):\n res += coeff * x** index\n return res\n return polynomial", "def compute_deriv(poly):\n exp = 1\n new_poly = []\n for x in poly[1:]:\n new_poly.append(x * exp)\n exp += 1\n\n return tuple(new_poly)", "def build_poly_expr(query_tuple):\n print(\"query_tuple: \", query_tuple)\n expression = '0 + '\n factors = np.arange(7)\n\n for coeff, factor in zip(query_tuple, factors):\n if coeff != None:\n expression += '(' + str(np.float64(coeff)) + '*x^{}) + '.format(factor)\n\n # Remove trailing '+'\n expression = expression[:-3]\n \n # Return as a tuple.\n return (expression,)", "def get_equation(self):\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n string = \"\"\n\n for index, polynomial in self.polynomials.items():\n polynomial = int(polynomial)\n index = int(index)\n\n if polynomial != 0:\n if polynomial < 0:\n string_pre = \" - \"\n else:\n string_pre = \" + \"\n\n if index != 0:\n string_append = \"x\"\n elif polynomial == 1 or polynomial == -1:\n string_append = str(abs(polynomial))\n else:\n string_append = \"\"\n\n if polynomial < 0:\n polynomial = abs(polynomial)\n\n if polynomial != 1:\n string_append = str(polynomial) + string_append\n\n if index != 0 and index != 1:\n string_append += \"^\" + str(index)\n\n string += string_pre + string_append\n\n if len(string) > 0:\n string = string[3:]\n else:\n string = \"0\"\n\n return string", "def coefficients_from_Weierstrass_polynomial(f):\n R = f.parent()\n cubic_variables = [x for x in R.gens() if f.degree(x) == 3]\n quadratic_variables = [y for y in R.gens() if f.degree(y) == 2]\n try:\n x = cubic_variables[0]\n y = quadratic_variables[0]\n except IndexError:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n a1 = a2 = a3 = a4 = a6 = 0\n x3 = y2 = None\n for coeff, mon in f:\n if mon == x**3:\n x3 = coeff\n elif mon == x**2:\n a2 = coeff\n elif mon == x:\n a4 = coeff\n elif mon == 1:\n a6 = coeff\n elif mon == y**2:\n y2 = -coeff\n elif mon == x*y:\n a1 = -coeff\n elif mon == y:\n a3 = -coeff\n else:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n if x3 != y2:\n raise ValueError('the coefficient of x^3 and -y^2 must be the same')\n elif x3 != 1:\n a1, a2, a3, a4, a6 = a1/x3, a2/x3, a3/x3, a4/x3, a6/x3\n return [a1, a2, a3, a4, a6]", "def test():\n assert str(Polynomial(0, 1, 0, -1, 4, -2, 0, 1, 3, 0)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial([-5, 1, 0, -1, 4, -2, 0, 1, 3, 0])) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x - 5\"\n assert str(Polynomial(x7=1, x4=4, x8=3, x9=0, x0=0, x5=-2, x3=-1, x1=1)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial(x2=0)) == \"0\"\n assert str(Polynomial(x0=0)) == \"0\"\n assert Polynomial(x0=2, x1=0, x3=0, x2=3) == Polynomial(2, 0, 3)\n assert Polynomial(x2=0) == Polynomial(x0=0)\n assert str(Polynomial(x0=1) + Polynomial(x1=1)) == \"x + 1\"\n assert str(Polynomial([-1, 1, 1, 0]) + Polynomial(1, -1, 1)) == \"2x^2\"\n pol1 = Polynomial(x2=3, x0=1)\n pol2 = Polynomial(x1=1, x3=0)\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 1) == \"x - 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 2) == \"x^2 - 2x + 1\"\n pol3 = Polynomial(x0=-1, x1=1)\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(Polynomial(x0=2).derivative()) == \"0\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative()) == \"6x^2 + 3\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative().derivative()) == \"12x\"\n pol4 = Polynomial(x3=2, x1=3, x0=2)\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert Polynomial(-2, 3, 4, -5).at_value(0) == -2\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3) == 20\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3, 5) == 44\n pol5 = Polynomial([1, 0, -2])\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-1, 3.6) == -23.92\n assert pol5.at_value(-1, 3.6) == -23.92", "def poly_derivative(poly):\n if type(poly) is not list or len(poly) < 1:\n return None\n if len(poly) == 1:\n return [0]\n\n derivated_coefficients = []\n\n for power, coefficient in enumerate(poly):\n if power == 0:\n pass\n\n else:\n new_coefficient = coefficient * power\n derivated_coefficients.append(new_coefficient)\n\n return(derivated_coefficients)", "def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2", "def construct_poly(data, power):\n return np.power(data, power)", "def fourth_poly(a, b, c, d, e):\n return lambda z: a*z**4 + b*z**3 + c*z**2 + d*z + e", "def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T", "def linear_simplify_poly(poly):\n if len(poly) < 4:\n return poly\n\n q = Queue()\n for v in poly:\n q.put(v)\n\n new_poly = []\n a = q.get()\n b = q.get()\n while True:\n if q.empty():\n new_poly += [a,b]\n break\n c = q.get()\n e1 = (b-a).normalized()\n e2 = (c-b).normalized()\n if abs(1.0 - e1.dot(e2)) < 1e-2:\n # colinear. skip b.\n a = a\n b = c\n else:\n # a,b needed.\n new_poly += [a]\n a = b\n b = c\n return new_poly", "def polyLS(pd, x, y, f, X, Y \\\n, coeff = [], xmc = [], ymc = [], ell = [], w = [], ELL = [], W = []) :\n xmc, ymc, ell, w, ELL, W = assignDefaults(x, y, xmc, ymc, ell, w, ELL, W)\n \n numP = int((pd + 1) * (pd + 2) / 2)\n \n if (len(xmc) == 1) and (len(ymc) == 1) :\n \n\n if coeff == [] :\n p = poly(x, y, pd)\n coeff = np.linalg.lstsq(p, f, rcond=None)[0]\n\n B = poly(X, Y, pd)\n approx = B.dot(coeff).flatten()\n coeff_copy = coeff\n \n else :\n \n approx = np.zeros(len(X), float)\n \n if coeff == [] :\n for i in range(len(xmc)) :\n IND = inSquare(x, y, xmc[i], ymc[i], ELL, W)\n if len(IND) < int(1.5 * numP) :\n raise ValueError(\"Not enough data for this polynomial \" \\\n + \"degree.\\nEither lower the polynomial degree or \" \\\n + \"decrease the number of subdivisions.\")\n p = poly(x[IND], y[IND], pd)\n lam = np.linalg.lstsq(p, f[IND], rcond=None)[0]\n coeff.append(lam)\n\n coeff_copy = coeff.copy()\n\n for i in range(len(xmc) - 1, -1, -1) :\n IND = inSquare(X, Y, xmc[i], ymc[i], ell, w)\n B = poly(X[IND], Y[IND], pd)\n lam = coeff.pop()\n approx[IND] = B.dot(lam).flatten()\n \n return approx, coeff_copy", "def poly_derivative(poly):\n if not type(poly) is list or len(poly) == 0 or type(poly[0]) is not int:\n return None\n\n derivative = []\n for i in range(1, len(poly)):\n derivative.append(poly[i] * i)\n\n if derivative == []:\n derivative = [0]\n\n return derivative", "def poly_desc(W, b):\n result = 'y = '\n for i, w in enumerate(W):\n result += '{:+.2f} x^{} '.format(w, len(W) - i)\n result += '{:+.2f}'.format(b[0])\n return result", "def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly", "def parse_poly(self, expr: str) -> Polynomial:\n return symbolic_polynomial(expr, self)", "def poly_derivative(poly):\n res = []\n if type(poly) is not list or len(poly) == 0:\n return None\n if len(poly) == 1:\n return([0])\n for i in range(1, len(poly)):\n if type(poly[i]) is not int:\n return None\n res.append(poly[i] * i)\n return(res)", "def polynomial(self, *args, indeterminate: str = 'X') -> Polynomial:\n return Polynomial([self.element(c) for c in args], base_field=self, indeterminate=indeterminate)", "def newton_divided_difference(x, y):\n\n n = x.size\n q = np.zeros((n, n - 1))\n # Insert 'y' in the first column of the matrix 'q'\n q = np.concatenate((y[:, None], q), axis=1)\n\n for i in range(1, n):\n for j in range(1, i + 1):\n q[i, j] = (q[i, j - 1] - q[i - 1, j - 1]) / (x[i] - x[i - j])\n\n # Copy the diagonal values of the matrix q to the vector f\n f = np.zeros(n)\n for i in range(0, n):\n f[i] = q[i, i]\n\n # Prints the polynomial\n print(\"The polynomial is:\")\n print(\"p(x)={:+.4f}\".format(f[0]), end=\"\")\n for i in range(1, n):\n print(\"{:+.4f}\".format(f[i]), end=\"\")\n for j in range(1, i + 1):\n print(\"(x{:+.4f})\".format(x[j] * -1), end=\"\")\n print(\"\")\n\n return [f]", "def poly_derivative(poly):\n result = []\n\n if poly is None or type(poly) != list or poly == []:\n return None\n\n for i in range(len(poly)):\n if type(poly[i]) not in (int, float):\n return None\n elif len(poly) == 1:\n result.append(0)\n else:\n if i == 0:\n continue\n result.append(i * poly[i])\n\n return result", "def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y", "def zzX_to_poly(f, *symbols):\n from sympy.polys import Poly\n\n terms = {}\n\n for monom, coeff in zzX_to_dict(f).iteritems():\n terms[monom] = Integer(int(coeff))\n\n return Poly(terms, *symbols)", "def poly_derivative(poly):\n if type(poly) != list:\n return None\n size = len(poly)\n if size == 0:\n return None\n for e in poly:\n if not isinstance(e, (int, float)):\n return None\n if size == 1:\n return [0]\n deriv = []\n for i in range(1, size):\n temp = poly[i] * i\n deriv.append(temp)\n return deriv", "def base_polynome(numbers):\n\n monomes = [ x**n for n in numbers ]\n polynome = sum(monomes)\n\n return poly(polynome, x)" ]
[ "0.6846461", "0.65011793", "0.6292352", "0.6274255", "0.62225056", "0.61097145", "0.60969347", "0.6090148", "0.60779357", "0.60200155", "0.60196537", "0.59783614", "0.5969226", "0.5965326", "0.5856345", "0.5849811", "0.58080703", "0.5775723", "0.5735005", "0.5733751", "0.5722096", "0.5698544", "0.56953406", "0.56824267", "0.5653592", "0.5636684", "0.563195", "0.56265783", "0.56259936", "0.55919147" ]
0.70788777
0
Draws interpolation plot for given interpolation polynomial and nodes.
def draw_interpolation_plot(start_x, end_x, interpolation_polynomial, nodes, freq=200, additional_polynomial=None, additional_nodes=None): # TODO: calculate figure size dynamically plt.figure(figsize=(8, 6), dpi=80) x = numpy.linspace(start_x, end_x, freq) # TODO: eval should be changed to something more secure (like numexpr evaluate())... y = eval(str(interpolation_polynomial)) plt.subplot(211) plt.plot(x, y, [node[0] for node in nodes], [node[1] for node in nodes], 'ro') plt.grid(True) if additional_polynomial: poly_values = eval(str(additional_polynomial)) plt.subplot(212) plt.plot(x, poly_values, [node[0] for node in additional_nodes], [node[1] for node in additional_nodes], 'ro') plt.grid(True) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawPolynomial(self, index, color, precision=200):\n graph = self.graphs[index]\n if len(graph) > 1:\n p = PolynomialInterpolation(graph, color)\n p.show(self.context, precision)", "def plot_interpolation(self):\r\n self.plot_all_logcalls(True)\r\n print_log('info', 'Interpolation was finished.')", "def plot_interpolation(ddG_x,ddG_y,ddG_x2,ddG_y2,ddG_x_interp,ddG_y_interp,close=True,save=True):\n\tplt.rc('text', usetex=True)\n\tplt.plot(ddG_x,ddG_y,'*',markersize=5,fillstyle='none')\n\tplt.plot(ddG_x2,ddG_y2,'o',markersize=5,fillstyle='none')\n\tplt.plot(ddG_x_interp, ddG_y_interp, '-')\n\tplt.legend(['input lambdas', 'cub/lin interpolated lambdas','cubic interpolation func'], loc='best')\n\tplt.xlabel(r'$\\lambda$') \n\tplt.ylabel(r\"$\\Delta \\Delta G / \\mathrm{kJ mol^{-1}}$\")\n\tif save:\n\t\tplt.savefig('ddG_interpolation.pdf')\n\tif close:\n\t\tplt.close()", "def make_plot(x,y):", "def plot_polynomial(self):\n plt.scatter(self.x_values, self.y_values)\n plt.title(f\"Graph of polynomial between {np.floor(min(self.x_values))} and {np.ceil(max(self.x_values))}\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.show()", "def _plot_interpolation(x, y, x_new, y_new, title=\"\"):\n f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)\n axes = (ax1, ax2, ax3)\n coord = [\"X\", \"Y\", \"Z\"]\n\n for idx, ax in enumerate(axes):\n ax.set_title(title + \" (\" + coord[idx] + \" coordinate)\", fontsize=12)\n ax.set_ylabel(\"m\")\n ax.plot(x, y[:, idx], \"bo\", label=\"Original data\")\n ax.plot(x_new, y_new[:, idx], \"ro\", label=\"Interpolated data\")\n\n ax3.set_xlabel(\"Time\")\n ax1.legend(fontsize=8, loc=1)\n f.subplots_adjust(hspace=0.3)\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\n plt.show()", "def solve_i():\r\n x = np.array([ -2.1, -1.45, -1.3, -0.2, 0.1, 0.15, 0.8, 1.1, 1.5, 2.8, 3.8 ])\r\n y = np.array([0.012155, 0.122151, 0.184520, 0.960789, 0.990050, 0.977751,\r\n 0.527292, 0.298197, 0.105399, 3.936690E-4, 5.355348E-7])\r\n # find and plot both interpolations and the oiginal points\r\n plt.figure(1)\r\n cubic_interpol(x,y)\r\n lin_interpol(x,y)\r\n plt.plot(x, y, 'rx', ms = 10, label = 'Points')\r\n # plot settings\r\n plt.title('Cubic & Linear Interpolation Given Points')\r\n plt.xlabel('x',fontsize = 14)\r\n plt.ylabel('y',fontsize = 14)\r\n plt.legend()", "def graph_points():\n fig_name = 'lect2_num_solv'\n\n # given data\n x = np.array([0.0, 0.4, 0.6, 0.8])\n ra = np.array([0.01, 0.0080, 0.005, 0.002])\n design_eq = np.divide(2.0, ra)\n print(\"Generic example design equation points: {}\".format([\"{:0.1f}\".format(x) for x in design_eq]))\n\n # cubic spline\n x_new = np.linspace(0.0, 0.8, 101)\n # alternately, from interpolation\n y_interp = interpolate.interp1d(x, design_eq, kind='quadratic')\n make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_interp(x_new),\n x_label=r'conversion (X, unitless)', y_label=r'$\\displaystyle\\frac{F_{A0}}{-r_A} \\left(L\\right)$',\n x_lima=0.0, x_limb=0.8, y_lima=0.0, y_limb=1000,\n fig_width=4, color2='green',\n )", "def plot_all_logcalls(self, interpolation=False):\r\n\r\n # instead of ax.hold(False)\r\n self.figure.clear()\r\n\r\n # create an axis\r\n self.ax1 = self.figure.add_subplot(111)\r\n self.ax1.set_title('Logcalls pattern')\r\n\r\n any_node_logcalls = dict()\r\n sr_x = list()\r\n\r\n for e in self.called_func_route:\r\n for i in self.nodes:\r\n (i not in sr_x) and sr_x.append(i)\r\n if e not in any_node_logcalls:\r\n any_node_logcalls[e] = list()\r\n any_node_logcalls[e].append(self.nodes[i].get_func_count(e))\r\n\r\n progress_value = 0\r\n self.progressBar.setValue(0)\r\n for e in self.called_func_route:\r\n sr_fx = any_node_logcalls[e]\r\n self.ax1.plot(sr_x, sr_fx, linestyle='', marker='o', color='b')\r\n\r\n if interpolation:\r\n Lx = self.get_sub_two_interpolation_func(sr_x, sr_fx)\r\n # Enlargement the range to 10 folds for drawing the result of interpolation.\r\n self.tmp_x = [\r\n i / 10.0 for i in range(sr_x[0] * 10, sr_x[-1] * 10 + 1)\r\n ]\r\n self.tmp_y = [Lx(i) for i in self.tmp_x]\r\n self.ax1.plot(\r\n self.tmp_x, self.tmp_y, linestyle='--', marker='', label=e)\r\n else:\r\n self.ax1.plot(sr_x, sr_fx, linestyle='--', marker='o', label=e)\r\n\r\n progress_value += 1\r\n self.progressBar.setValue(\r\n float(progress_value) / len(self.called_func_route) * 100)\r\n\r\n self.ax1.legend(loc='best')\r\n\r\n # refresh canvas\r\n self.canvas.draw()", "def plot(self):\n\t\tself.plotOfIP().plot()", "def polynomialInterpolation2D(self,graph,T):\n x=[graph[i][0] for i in range(len(graph))]\n y=[graph[i][1] for i in range(len(graph))]\n return lambda t:(self.polynomialInterpolation(x)(t),self.polynomialInterpolation(y)(t))", "def cubic_interpol(X_P, Y_P):\r\n y_derivs = derivatives( X_P, Y_P ).flatten() # flatten as FB_sub returns 2d array\r\n \r\n for j in np.arange( X_P.shape[0] - 1 ): # for every x[i] and x[i+1] pair\r\n plot_points = np.linspace( X_P[j], X_P[j+1], 20) # points to plot in the interval\r\n params = [ X_P[j], X_P[j+1], Y_P[j], Y_P[j+1],\r\n y_derivs[j], y_derivs[j+1]]\r\n f_points = f(plot_points, params)\r\n plt.plot(plot_points, f_points, 'b-', ms = .5, label = 'Cubic'if j==0 else \"\") # only label one plot\r", "def plot(self, X, sids, nids):\n X = tocontig(X) # ensure it's contig\n gw = self.glWidget\n gw.points = X\n gw.npoints = len(X)\n gw.sids = sids\n gw.nids = nids\n gw.color() # set colors\n gw.updateGL()", "def plot(self, X, sids, nids):\n X = tocontig(X) # ensure it's contig\n gw = self.glWidget\n gw.points = X\n gw.npoints = len(X)\n gw.sids = sids\n gw.nids = nids\n gw.color() # set colors\n gw.updateGL()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def plot_graph(self) -> None:", "def update_interpolated_and_dots(function_selector, discretization_Slider,\n interpolated_values):\n # Each base point (left or right foot and middle node) is shared by three\n # Finite Elements aside from the two the most right and the most left\n number_points = discretization_slider.value + 2\n\n x = np.linspace(LEFT_X, RIGHT_X, number_points)\n\n y = functions[function_selector.active](x)\n\n interpolated_values.data = {\n \"x\": x,\n \"y\": y\n }", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def plot_nodes(self, node_list):\n points = Marker()\n #visualizations points and lines..\n points.header.frame_id = \"map\"\n points.header.stamp = rospy.get_rostime()\n points.ns = \"markers\"\n points.id = 0\n points.type = points.POINTS\n points.action = points.ADD\n points.pose.orientation.w = 1.0\n points.scale.x = 2*self.rviz_tuning_plt\n points.scale.y = 2*self.rviz_tuning_plt\n points.color.r = 0.0\n points.color.g = 1.0\n points.color.b = 0.0\n points.color.a = 1.0\n points.lifetime = rospy.Duration()\n\n for node in node_list:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.01\n points.points.append(p1)\n \n self.pub_nodes.publish(points)", "def test_plot1(plot=1, version='scalar'):\n Lx = 10\n Ly = 10\n c = 1.0\n\n def I2(x, y):\n return exp(-(x-Lx/2.0)**2/2.0 -(y-Ly/2.0)**2/2.0)\n def f(x, y, t):\n return 0.0\n def bc(x, y, t):\n return 0.0\n\n I2 = StringFunction('exp(-(x-Lx/2.0)**2/2.0 -(y-Ly/2.0)**2/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n if plot:\n g = Gnuplot.Gnuplot(persist=1)\n g('set parametric')\n g('set data style lines')\n g('set hidden')\n g('set contour base')\n g('set zrange [-0.7:0.7]') # nice plot...\n \n def action(u, xv, yv, t):\n #print 'action, t=',t,'\\nu=',u, '\\nx=',x, '\\ny=', y\n if plot:\n data = Gnuplot.GridData(u, xv[:,0], yv[0,:], binary=0)\n g.splot(data)\n g('set title \"t=%g\"' % t)\n if plot == 2:\n g.hardcopy(filename='tmp_%020f.ps' % t, enhanced=1, mode='eps',\n color=0, fontname='Times-Roman', fontsize=14)\n time.sleep(1)\n time.sleep(0.2) # pause between frames\n\n implementation = {'ic': version, 'inner': version, 'bc': version}\n nx = 40; ny = 40; tstop = 20 # tstop = 700\n print 'test_plot1:', f, bc, I2\n dt, t_ic, t_inner, t_bc = \\\n solver(I2, f, c, bc, Lx, Ly, nx, ny, 0, tstop,\n user_action=action, implementation=implementation)\n print 'time ic: %s, time scheme: %s, time bc: %s' % (t_ic, t_inner, t_bc)\n time.sleep(3)", "def drawPlotParts(x, y, xlabel, ylabel, nparts):\n\tle = len(y)\n\tif x is None:\n\t\tx = list(range(le))\n\tstep = int(le / nparts)\n\tfor i in range(nparts):\n\t\tbeg = i * step\n\t\tend = le if i == nparts - 1 else beg + step\n\t\tdrawPlot(x[beg:end], y[beg:end], xlabel, ylabel)", "def visualize(nodes, weights, new_corners):\n import matplotlib.pyplot as plt\n\n if nodes.shape[1] == 1:\n plt.scatter(nodes[:], np.zeros(nodes.shape), s=weights * 40)\n plt.plot(\n [\n new_corners[0, 0],\n new_corners[0, 0],\n new_corners[1, 0],\n new_corners[1, 0],\n new_corners[0, 0],\n ],\n [0.5, -0.5, -0.5, 0.5, 0.5],\n )\n plt.show()\n elif nodes.shape[1] == 2:\n plt.scatter(nodes[:, 0], nodes[:, 1], s=weights * 40)\n plt.plot(\n [\n new_corners[0, 0],\n new_corners[1, 0],\n new_corners[2, 0],\n new_corners[0, 0],\n ],\n [\n new_corners[0, 1],\n new_corners[1, 1],\n new_corners[2, 1],\n new_corners[0, 1],\n ],\n )\n plt.show()\n elif nodes.shape[1] == 3:\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n ax.scatter(nodes[:, 0], nodes[:, 1], nodes[:, 2], s=weights * 40)\n\n p = new_corners\n for i in range(4):\n verts = [[p[(i) % 4, :], p[(i + 1) % 4, :], p[(i + 2) % 4, :]]]\n srf = Poly3DCollection(verts, alpha=0.25, facecolor=\"#800000\")\n plt.gca().add_collection3d(srf)\n plt.show()", "def polynomialInterpolation(self,s):\n #print(s)\n #s[i]=xi,s[j]=xj\n return Polynomial.createFromInterpolation(s,range(len(s)))\n #return Polynomial(s,T)", "def lin_interpol(x_p, y_p):\r\n f = np.zeros([ x_p.shape[0] - 1 , 4 ]) # Coefficents and interval array\r\n \r\n for i in range( x_p.shape[0] - 1 ): # for every x[i], x[i+1] pair\r\n \r\n x_coeff = (y_p[i+1] - y_p[i]) / (x_p[i+1] - x_p[i])\r\n const = (x_p[i+1]*y_p[i] - x_p[i]*y_p[i+1] ) / (x_p[i+1] - x_p[i])\r\n \r\n # save the x coefficent, constant and the interval for this line\r\n f[i,:] = x_coeff, const, x_p[i], x_p[i+1]\r\n \r\n for a, b, start, end in f: # for every line fitted\r\n line_x = np.linspace( start, end, 3) # points to plot in x_range\r\n line_y = line_x * a + b # find the fitted line value at these points\r\n plt.plot(line_x,line_y,'k--', lw = 1, label = 'Linear' if a==f[0][0] else \"\") # only label one plot\r", "def plot_planned_trajectory(ax, xs, ys, headings, steers, physical_params, interval = 20):\n ax.plot(xs, ys, color=\"r\")\n for i in range(len(steers)):\n # ellipse = Ellipse(xy = (x, y), width = x_length, height = y_length, angle = np.rad2deg(heading), alpha = 0.4, ec = \"k\", fc = fc)\n # ax.add_patch(ellipse)\n if i % interval == 0:\n plot_vehicle(ax, xs[i], ys[i], headings[i], steers[i], 0.7, 0.7, physical_params.wheel_length, physical_params.wheel_width)\n ax.set_xlabel(\"X Position\")\n ax.set_ylabel(\"Y Position\")\n ax.axis('equal')", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def draw_points():\n\n for node in self._nodes:\n\n x = node_properties[\"node_x\"][node]\n y = node_properties[\"node_y\"][node]\n ax.scatter(\n x,\n y,\n zorder=10,\n edgecolors=\"k\",\n linewidths=0.5,\n **self.get_node_data(node),\n )\n\n for label in self._nodes:\n\n x = node_properties[\"label_x\"][label]\n y = node_properties[\"label_y\"][label]\n rotation = node_properties[\"rotation\"][label]\n ha = node_properties[\"ha\"][label]\n\n attr = {**dict(backgroundcolor=\"white\"), **text_attr}\n ax.text(\n x,\n y,\n textwrap.shorten(text=label, width=TEXTLEN),\n rotation=rotation,\n ha=ha,\n va=\"center\",\n rotation_mode=\"anchor\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n **attr,\n )", "def plot():\n pass", "def PlotMeshNumbering(self, figure=None, show_plot=True):\n\n self.__do_essential_memebers_exist__()\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n\n if self.element_type == \"tri\":\n\n if figure is None:\n figure = plt.figure()\n plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])\n plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)\n\n for i in range(0,self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"quad\":\n\n if figure is None:\n figure = plt.figure()\n point_radius = 3.\n\n C = self.InferPolynomialDegree() - 1\n\n edge_elements = self.GetElementsEdgeNumberingQuad()\n reference_edges = NodeArrangementQuad(C)[0]\n reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)\n reference_edges = np.delete(reference_edges,1,1)\n\n self.GetEdgesQuad()\n x_edges = np.zeros((C+2,self.all_edges.shape[0]))\n y_edges = np.zeros((C+2,self.all_edges.shape[0]))\n\n BasesOneD = np.eye(2,2)\n for iedge in range(self.all_edges.shape[0]):\n ielem = edge_elements[iedge,0]\n edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]\n x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T\n\n\n plt.plot(x_edges,y_edges,'-k')\n\n for i in range(self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n\n import matplotlib as mpl\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n if figure is None:\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))\n view = mlab.view()\n figure.scene.disable_render = True\n\n color = mpl.colors.hex2color('#F88379')\n\n linewidth = 3.\n # trimesh_h = mlab.triangular_mesh(self.points[:,0],\n # self.points[:,1], self.points[:,2], self.faces[:,:3],\n # line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),\n # representation='wireframe') # representation='surface'\n\n # # CHANGE LIGHTING OPTION\n # trimesh_h.actor.property.interpolation = 'phong'\n # trimesh_h.actor.property.specular = 0.1\n # trimesh_h.actor.property.specular_power = 5\n\n # PLOTTING EDGES\n from Florence.PostProcessing import PostProcess\n tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,\n plot_points=True, plot_edges=True, plot_surfaces=False)\n\n x_edges = tmesh.x_edges\n y_edges = tmesh.y_edges\n z_edges = tmesh.z_edges\n connections = tmesh.connections\n\n src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())\n src.mlab_source.dataset.lines = connections\n h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)\n # AVOID WARNINGS\n # lines = mlab.pipeline.stripper(src)\n # h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)\n\n # ELEMENT NUMBERING\n # for i in range(0,self.elements.shape[0]):\n # coord = self.points[self.elements[i,:],:]\n # x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n # y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n # z_avg = np.sum(coord[:,2])/self.elements.shape[1]\n\n # # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)\n # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)\n\n # POINT NUMBERING\n for i in range(self.elements.shape[0]):\n for j in range(self.elements.shape[1]):\n text_obj = mlab.text3d(self.points[self.elements[i,j],0],\n self.points[self.elements[i,j],1],self.points[self.elements[i,j],2],str(self.elements[i,j]),\n color=(0,0,0.),scale=0.05)\n\n\n figure.scene.disable_render = False\n\n if show_plot:\n # mlab.view(*view)\n mlab.show()", "def redraw_whole_plot(self):\n pcent_rand = self.rand\n pcent_decimal = pcent_rand/100\n self.x = np.array([\n n*np.random.uniform(low=1-pcent_decimal, high=1+pcent_decimal) \n for n in np.linspace(3, 9, self.num_points)\n ])\n self.y = np.array([\n n*np.random.uniform(low=1-pcent_decimal, high=1+pcent_decimal)\n for n in np.linspace(3, 9, self.num_points)\n ])\n self.redraw_slope()" ]
[ "0.61234254", "0.5854555", "0.58188057", "0.5757321", "0.57179767", "0.5697689", "0.5670117", "0.56530726", "0.56499636", "0.5633391", "0.56109315", "0.56016254", "0.5506603", "0.5506603", "0.5494239", "0.5491141", "0.5464488", "0.54549974", "0.5449367", "0.5433634", "0.5379167", "0.5357248", "0.5311231", "0.52913535", "0.52781713", "0.5265587", "0.524423", "0.52378047", "0.5233742", "0.522808" ]
0.7956468
0
This method generates a header file containing the data contained in the numpy array provided. It is used to capture the tensor data (for both inputs and expected outputs) to be bundled into the standalone application.
def _create_header_file(tensor_name, npy_data, output_path, data_linkage): file_path = pathlib.Path(f"{output_path}/" + tensor_name).resolve() # create header file raw_path = file_path.with_suffix(".h").resolve() with open(raw_path, "w") as header_file: header_file.write("#include <stddef.h>\n") header_file.write("#include <stdint.h>\n") header_file.write("#include <dlpack/dlpack.h>\n") header_file.write(f"const size_t {tensor_name}_len = {npy_data.size};\n") _emit_data_linkage(header_file, data_linkage) header_file.write(f"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =") header_file.write("{") for i in np.ndindex(npy_data.shape): header_file.write(f"{npy_data[i]}, ") header_file.write("};\n\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_header(_metadata, rename_padding=False):\n template = \"\"\"\\\n VERSION {version}\n FIELDS {fields}\n SIZE {size}\n TYPE {type}\n COUNT {count}\n WIDTH {width}\n HEIGHT {height}\n VIEWPOINT {viewpoint}\n POINTS {points}\n DATA {data}\n \"\"\"\n str_metadata = _metadata.copy()\n\n if not rename_padding:\n str_metadata['fields'] = ' '.join(_metadata['fields'])\n else:\n new_fields = []\n for f in _metadata['fields']:\n if f == '_':\n new_fields.append('padding')\n else:\n new_fields.append(f)\n str_metadata['fields'] = ' '.join(new_fields)\n str_metadata['size'] = ' '.join(map(str, _metadata['size']))\n str_metadata['type'] = ' '.join(_metadata['type'])\n str_metadata['count'] = ' '.join(map(str, _metadata['count']))\n str_metadata['width'] = str(_metadata['width'])\n str_metadata['height'] = str(_metadata['height'])\n str_metadata['viewpoint'] = ' '.join(map(str, _metadata['viewpoint']))\n str_metadata['points'] = str(_metadata['points'])\n tmpl = template.format(**str_metadata)\n return tmpl", "def generate_header(value_type, num_elements, element_multiplier, imag, name_length, name):\n result = []\n\n result += Ensemble.int32_to_bytes(value_type) # Value Type\n result += Ensemble.int32_to_bytes(num_elements) # Number of elements\n result += Ensemble.int32_to_bytes(element_multiplier) # Element Multiplier\n result += Ensemble.int32_to_bytes(imag) # Image\n result += Ensemble.int32_to_bytes(name_length) # Name Length\n result += name.encode() # Name\n\n return result", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def write_cpp_header(self):\n prefix = \"#include <frc/controller/\"\n headers = []\n headers.append(prefix + self.plant_coeffs_header + \".h>\")\n headers.append(prefix + self.ctrl_coeffs_header + \".h>\")\n headers.append(prefix + self.obsv_coeffs_header + \".h>\")\n headers.append(prefix + self.loop_header + \".h>\")\n\n with open(\n self.class_name + \"Coeffs.\" + self.header_extension, \"w\"\n ) as header_file:\n print(\"#pragma once\" + os.linesep, file=header_file)\n for header in sorted(headers):\n print(header, file=header_file)\n header_file.write(os.linesep)\n self.__write_cpp_func_name(\n header_file, self.plant_coeffs_type, \"PlantCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.ctrl_coeffs_type, \"ControllerCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.obsv_coeffs_type, \"ObserverCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.loop_type, \"Loop\", in_header=True\n )", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.txt\", \"w\")\n file.write(str(\"\\t\".join(hdata)) + \"\\n\")", "def IIR_sos_header(fname_out,SOS_mat):\r\n Ns,Mcol = SOS_mat.shape\r\n f = open(fname_out,'wt')\r\n f.write('//define a IIR SOS CMSIS-DSP coefficient array\\n\\n')\r\n f.write('#include <stdint.h>\\n\\n')\r\n f.write('#ifndef STAGES\\n')\r\n f.write('#define STAGES %d\\n' % Ns)\r\n f.write('#endif\\n')\r\n f.write('/*********************************************************/\\n');\r\n f.write('/* IIR SOS Filter Coefficients */\\n');\r\n f.write('float32_t ba_coeff[%d] = { //b0,b1,b2,a1,a2,... by stage\\n' % (5*Ns))\r\n for k in range(Ns):\r\n if (k < Ns-1):\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e,\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n else:\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # for k in range(Ns):\r\n # if (k < Ns-1):\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f,\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # else:\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n f.write('};\\n')\r\n f.write('/*********************************************************/\\n')\r\n f.close()", "def write_header(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"w+\")\r\n self.file.write(self.version)\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line)\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line)\r\n print(self.body_header_line.line)", "def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file", "def generateHeader(param_dict, filename_out, test_mode=False, template=\"uvfits_headers/header.tpl\"):\n findAndReplace(param_dict, template,filename_out, test_mode)", "def write_sim_header_data(self, output):\n for section in output:\n if section[0] == Datatype.title:\n self._writer.writerow([section[1]])\n if section[0] == Datatype.param_list:\n for field in section[1]:\n self._writer.writerow([field[0], field[1]])", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()", "def create_header(analysis_outdir, metadata, rg_dict, specimen_dict, logger=default_logger):\n\n rgid = rg_dict[\"ID\"].replace(\".\", \"_\")\n header = \"%s/header-%s.sam\" %(analysis_outdir, rg_dict[\"ID\"])\n header_file = open(header, \"w\")\n header_file.write(\"@HD\\tVN:1.4\\n\")\n PI_STR = \"\"\n if len(rg_dict[\"PI\"]):\n PI_STR=\"PI:%s\\t\" % (rg_dict[\"PI\"])\n header_file.write(\"@RG\\tID:%s:%s\\tCN:%s\\tPL:%s\\tPM:%s\\tLB:%s:%s:%s\\t%sSM:%s\\tPU:%s:%s\\tDT:%s\\n\"\n %(metadata[\"center_name\"], rgid,metadata[\"center_name\"], metadata[\"platform\"],metadata[\"platform_model\"], metadata[\"seqtype\"],\n metadata[\"center_name\"], rg_dict[\"LB\"], PI_STR, metadata[\"aliquot_id\"], rg_dict[\"CN\"], rg_dict[\"PU\"], getUTCDate(rg_dict[\"DT\"])))\n header_file.write(\"@CO\\tdcc_project_code:%s-US\\n\" %metadata[\"disease\"])\n header_file.write(\"@CO\\tsubmitter_donor_id:%s\\n\" %metadata[\"participant_id\"])\n header_file.write(\"@CO\\tsubmitter_specimen_id:%s\\n\" %metadata[\"sample_id\"])\n header_file.write(\"@CO\\tsubmitter_sample_id:%s\\n\" %metadata[\"aliquot_id\"])\n\n if metadata[\"sample_type\"] not in specimen_dict:\n msg = \"sample_type %s not found in specimen mapping\" % metadata[\"sample_type\"]\n logger.error(msg)\n if not FORCE_RUN:\n raise HeaderException(msg)\n\n if \"sample_type\" in metadata and metadata[\"sample_type\"] in specimen_dict:\n (icgc_type, sample_class) = specimen_dict[metadata[\"sample_type\"]]\n else:\n icgc_type = \"unknown\"\n sample_class = \"unknown\"\n\n #Sanity check about use_cntl\n if \"use_cntl\" in metadata:\n if metadata[\"use_cntl\"] == \"N/A\" and sample_class == \"tumour\":\n msg = \"Tumour sample requires use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n if sample_class == \"normal\" and metadata[\"use_cntl\"] != \"N/A\":\n msg = \"Normal sample requires N/A use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n\n header_file.write(\"@CO\\tdcc_specimen_type:%s\\n\" % icgc_type)\n header_file.write(\"@CO\\tuse_cntl:%s\\n\" %(metadata.get(\"use_cntl\", \"NA\")))\n header_file.close()\n return header", "def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n \n fileobj.write(csv_line( ['Notes'] + [x.name for x in self.angles] + ['Wait For/n', 'Value'] ) )", "def get_header(self, root):\n header = etree.SubElement(root, \"FileHeader\")\n header.set(\"revMajor\", \"1\")\n header.set(\"revMinor\", \"0\")\n header.set(\"date\", datetime.today().strftime(\"%Y-%m-%dT%H:%M:%S\"))\n header.set(\"description\", \"Generated OpenSCENARIO File\")\n header.set(\"author\", \"QGIS OSCGenerator Plugin\")", "def FIR_header(fname_out,h):\r\n M = len(h)\r\n N = 3 # Coefficients per line\r\n f = open(fname_out,'wt')\r\n f.write('//define a FIR coefficient Array\\n\\n')\r\n f.write('#include <stdint.h>\\n\\n')\r\n f.write('#ifndef M_FIR\\n')\r\n f.write('#define M_FIR %d\\n' % M)\r\n f.write('#endif\\n')\r\n f.write('/************************************************************************/\\n');\r\n f.write('/* FIR Filter Coefficients */\\n');\r\n f.write('float32_t h_FIR[M_FIR] = {')\r\n kk = 0;\r\n for k in range(M):\r\n #k_mod = k % M\r\n if (kk < N-1) and (k < M-1):\r\n f.write('%15.12f,' % h[k])\r\n kk += 1\r\n elif (kk == N-1) & (k < M-1):\r\n f.write('%15.12f,\\n' % h[k])\r\n if k < M:\r\n f.write(' ')\r\n kk = 0\r\n else:\r\n f.write('%15.12f' % h[k]) \r\n f.write('};\\n')\r\n f.write('/************************************************************************/\\n')\r\n f.close()", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n fileobj.write(csv_line( ['Comment'] + [x.name.lower() for x in self.angles] + ['Wait For', 'Value'] ) )", "def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"", "def generate_headerfile(template, n_division=10000, df=6, start_chi=25, filepath=\"Chi2PLookup.h\", verbose=False):\n divisor = \"const int Chi2PLookup::divisor = {};\".format(n_division)\n\n names = []\n cutoff = []\n p_values_arrays = []\n degrees_of_freedom = range(1, df+1)\n\n if verbose:\n print(\"Generating p-value arrays...\")\n print(\" df={}\".format(df))\n print(\" precision={}\".format(n_division))\n\n for df in degrees_of_freedom:\n var_name = \"pValues_{}\".format(df)\n names.append(var_name)\n max_chi = max_chi_value(df=df, start_chi=start_chi)\n cutoff.append(max_chi)\n n_elements = max_chi * n_division\n\n chi_values = (val / n_division for val in range(0, n_elements + 1))\n p_values = (str(1 - chi2.cdf(val, df)) for val in chi_values)\n\n if verbose:\n print(\"\\tAdding p-values array to template for degree of freedom = {} ...\".format(df))\n\n p_values_arrays.append(\"double {}[] = {{{}}};\".format(var_name, \", \".join(p_values)))\n\n cutoff_array = \"const int Chi2PLookup::cutoff[] = {{{}}};\".format(\", \".join([str(i) for i in cutoff]))\n p_values_array_of_arrays = \"const double * Chi2PLookup::pValues[] = {{{}}};\\n\".format(\", \".join(names))\n\n template = template.format(divisor, cutoff_array, \"\\n\".join(p_values_arrays), p_values_array_of_arrays)\n\n if verbose:\n print(\"Saving file to: {}\".format(os.path.abspath(filepath)))\n\n with open(filepath, \"w\") as outfile:\n outfile.write(template)\n\n return template", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def WriteHeader(self):\n return", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.html\", \"w\")\n file.write(\"<html>\\n\\t<head>\\n\\t<style>\\n\" +\n \"\\t\\t\\ttable, th, td {border: 1px solid\\n\" +\n \"\\t\\t\\tblack;border-collapse: collapse;}\" +\n \"\\n\\t</style>\\n\" +\n \"\\t</head>\\n\\t<body>\\n\\t\\t<table style=\\\"width:100%\\\">\\n\")\n file.write(\"\\t\\t\\t<tr>\\n\")\n for line in hdata:\n file.write(\n \"\\t\\t\\t\\t\\t<th>\\n\\t\\t\\t\\t\\t\\t\"\n + str(line) + \"\\n\\t\\t\\t\\t\\t</th>\\n\")\n file.write(\"\\t\\t\\t</tr>\\n\")", "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def generate_output_file(data, extension, headers):\n output_data = _replace_boolean(data)\n output_name = _generate_output_name(extension)\n with open(output_name, 'a', newline='') as file:\n _file_writer(file, extension, output_data, headers)", "def _write_header(self):\n # The last line here must not have a trailing \\n\n self.buffer.write_line(\"def template(self, __io, model=None):\")\n self.buffer.scope_line(\"view = self\")", "def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def writePOVRAYHeader(self, fh):\n settings = self.mainWindow.preferences.povrayForm\n\n focalPoint = self.camera.GetFocalPoint()\n campos = self.camera.GetPosition()\n viewup = self.camera.GetViewUp()\n angle = settings.viewAngle\n if settings.shadowless:\n shadowless = \"shadowless \"\n else:\n shadowless = \"\"\n\n if self.parent.blackBackground:\n rval = gval = bval = 0\n else:\n rval = gval = bval = 1\n\n fh.write(\"camera { perspective location <%f,%f,%f>\\n\" % (- campos[0], campos[1], campos[2]))\n fh.write(\" look_at <%f,%f,%f>\\n\" % (- focalPoint[0], focalPoint[1], focalPoint[2]))\n fh.write(\" angle %f\\n\" % angle)\n fh.write(\" sky <%f,%f,%f> }\\n\" % (- viewup[0], viewup[1], viewup[2]))\n fh.write(\"light_source { <%f,%f,%f> color rgb <1,1,1> %s }\\n\" % (- campos[0], campos[1], campos[2], shadowless))\n fh.write(\"background { color rgb <%f,%f,%f> }\\n\" % (rval, gval, bval))" ]
[ "0.62532175", "0.6250455", "0.6128656", "0.61236686", "0.61083066", "0.60900545", "0.6076962", "0.60498697", "0.6037466", "0.6030116", "0.5991017", "0.59467375", "0.5944712", "0.59186554", "0.5880443", "0.5877636", "0.587325", "0.5867404", "0.5816105", "0.58092946", "0.58088666", "0.5794874", "0.5794466", "0.5767885", "0.57637346", "0.57587516", "0.57574004", "0.5754054", "0.57498926", "0.56999576" ]
0.7628469
0
Convert a tflite model buffer in a Relay module
def convert_to_relay(tflite_model_buf, bind_params_by_name=True): # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1 try: import tflite.Model # pylint: disable=import-outside-toplevel tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite # pylint: disable=import-outside-toplevel tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except ImportError: raise ImportError("The tflite package must be installed") mod, params = relay.frontend.from_tflite(tflite_model) if bind_params_by_name: mod["main"] = relay.build_module.bind_params_by_name(mod["main"], params) return mod, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_relay_module_and_inputs_from_tflite_file(tflite_model_file, bind_params_by_name=True):\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n mod, params = convert_to_relay(tflite_model_buf, bind_params_by_name)\n\n inputs = dict()\n for param in mod[\"main\"].params:\n name = str(param.name_hint)\n data_shape = [int(i) for i in param.type_annotation.shape]\n dtype = str(param.type_annotation.dtype)\n if np.issubdtype(dtype, np.floating):\n # Since np.random.uniform only allows the ranges of float32,\n # at first float16 is used and scaled afterwards, if necessary.\n in_min, in_max = (np.finfo(\"float16\").min, np.finfo(\"float16\").max)\n data = np.random.uniform(low=in_min, high=in_max, size=data_shape).astype(dtype)\n scale = np.finfo(dtype).min / np.finfo(\"float16\").min\n data *= scale\n elif np.issubdtype(dtype, np.integer):\n in_min, in_max = (np.iinfo(dtype).min, np.iinfo(dtype).max)\n data = np.random.randint(in_min, high=in_max, size=data_shape, dtype=dtype)\n else:\n raise TypeError(f\"Type {dtype} not supported\")\n inputs[name] = data\n\n return mod, inputs, params", "def from_tflite(model, prog_name): #, shape_dict, dtype_dict):\n try:\n import tflite.Model\n import tflite.SubGraph\n import tflite.BuiltinOperator\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n assert isinstance(model, tflite.Model.Model)\n\n # keep the same as tflite\n assert model.SubgraphsLength() == 1, \"only support one subgraph (main subgraph)\"\n subgraph = model.Subgraphs(0)\n\n # model inputs / outputs\n model_inputs = subgraph.InputsAsNumpy()\n model_outputs = subgraph.OutputsAsNumpy()\n assert model_inputs.size == 1, \"Model should have only one input\"\n assert model_outputs.size == 1, \"Model should have only one output\"\n\n # op code in model\n op_converter = OperatorConverter(model, subgraph, prog_name)\n op_converter.is_dequantize = False\n op_converter.check_unsupported_ops()\n\n in_tensor = op_converter.get_tensors(model_inputs)[0]\n out_tensor = op_converter.get_tensors(model_outputs)[0]\n\n op_converter.define_model_sizes(\"IN\", in_tensor)\n op_converter.define_model_sizes(\"OUT\", out_tensor)\n\n op_converter.nn_add_input(in_tensor)\n\n output_nodes = op_converter.convert_op_to_hexagon_nn()\n\n op_converter.nn_add_output(output_nodes)\n\n op_converter.print_nn_nodes()\n\n print(\"tensor_tab:\")\n print(op_converter.tensor_tab)\n\n op_converter.close()\n print(\"Converted Hexagon Model saved to {}\".format(op_converter.h_file.name))", "def convert_from_frozen_graph():\n input_arrays = [\"input\"]\n converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(\n graph_def_file='/media/main/Data/Task/RPiCigDetector/utils/test_model/frozen_inference_graph.pb',\n # both `.pb` and `.pbtxt` files are accepted.\n input_arrays=['input'],\n input_shapes={'input': [1, 224, 224, 3]},\n output_arrays=['MobilenetV1/Predictions/Softmax']\n )\n converter.allow_custom_ops = True\n # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\n # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_type = tf.lite.constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {input_arrays[0]: (128, 128)}\n tflite_model = converter.convert()\n\n # Save the model.\n with open('model.tflite', 'wb') as f:\n f.write(tflite_model)", "def convert_to_model(self, *args):", "def decode(self, model: bytes):\n _, path = tempfile.mkstemp()\n with open(path, \"wb\") as fd:\n fd.write(model)\n onnx_model = onnx.load(path)\n pytorch_model = ConvertModel(onnx_model)\n os.remove(path)\n return pytorch_model", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def load_vm_flatbuffer(\n vm_flatbuffer: bytes, *, driver: Optional[str] = None, backend: Optional[str] = None\n) -> BoundModule:\n config = _create_config(driver=driver, backend=backend)\n vm_module = _binding.VmModule.copy_buffer(config.vm_instance, vm_flatbuffer)\n return load_vm_module(vm_module, config)", "def model_wrapper(self):\n original = self.args.rnn_type\n if(self.args.rnn_type=='DeepCoNN'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='TRANSNET'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM_TNET'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='DATT'):\n self.args.rnn_type ='RAW_MSE_DUAL_DOT'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='MPCN'):\n self.args.rnn_type = 'RAW_MSE_MPCN_FN_FM'\n self.args.base_encoder = 'NBOW'\n\n print(\"Conversion to {} | base:{}\".format(\n self.args.rnn_type,\n self.args.base_encoder))", "def tflite_load_model(model_file):\n interpreter = tf.lite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def unpack(self, buff, verbose=0):\n\n\n # See https://docs.python.org/3/library/struct.html#struct.pack\n # for struck pack format\n\n # Local methods to unpack numbers in little-endian format\n idx={'x':0}\n\n def read_uint8():\n idx['x']+=1\n return struct.unpack('<B', buf[idx['x']-1:idx['x']])[0]\n def read_uint32():\n idx['x']+=4\n return struct.unpack('<I', buf[idx['x']-4:idx['x']])[0]\n def read_float32():\n idx['x']+=4\n return struct.unpack('<f', buf[idx['x']-4:idx['x']])[0]\n\n # Return empty model in case the byte-array contains no information\n if len(buf) == 0:\n return None\n\n # Read global stddev and mean (not used in RQRMI version 1.1)\n _=read_float32()\n _=read_float32()\n\n num_of_stages=read_uint32()\n _log(verbose, 'Num of stages: %d' % num_of_stages)\n\n # Preallocate array\n trained_rqrmi=[None for _ in range(num_of_stages)]\n\n for s in range(num_of_stages):\n\n # Read the current stage\n num_of_models=read_uint32()\n\n _log(verbose, '\\nStage %d num of models: %d' % (s, num_of_models))\n\n # Preallocate net_list\n net_list=[None for _ in range(num_of_models)]\n\n for m in range(num_of_models):\n # Read version\n version=read_uint8()\n if version==0:\n _log(verbose, '\\nSkipping model <%d,%d>: model not compiled' % (s, m))\n continue\n elif version!=2:\n _log(verbose, '\\nUnsupported version for model <%d,%d>' % (s, m))\n continue\n\n _log(verbose, '\\nLoading model <%d, %d>: ' % (s,m))\n\n # Read model parameters\n mu=read_float32()\n sig=read_float32()\n fac=read_float32()\n omin=read_float32()\n num_of_layers=read_uint32()\n _log(verbose, 'layers: %d, ' % num_of_layers)\n\n # Preallocate net values\n net_values=[None for _ in range(2*num_of_layers-1)]\n\n # Read network structure\n structure=[None for _ in range(num_of_layers)]\n for l in range(num_of_layers):\n structure[l]=read_uint32()\n\n # Layer 0 bias\n net_values[0]=np.empty(structure[0])\n\n # Preallocate all other layers\n for l in range(1, num_of_layers):\n net_values[2*l-1]=np.empty(structure[l]) # Layer bias\n net_values[2*l-0]=np.empty([structure[l-1], structure[l]]) # Layer weights\n\n _log(verbose, 'structure: [%s]' % ','.join([str(x) for x in structure]))\n\n # Read values of first layer\n net_values[0][0]=read_float32()\n _=read_float32() # First layer weight is one (always)\n\n # Read values\n for l in range(1, num_of_layers):\n # Read bias\n for i in range(structure[l]):\n net_values[2*l-1][i]=read_float32()\n # Read weights\n for y in range(structure[l-1]):\n for x in range(structure[l]):\n net_values[2*l][y,x]=read_float32()\n\n # Update stage's net list\n net_list[m]=(mu, sig, fac, omin, net_values)\n\n # Update output with stage\n trained_rqrmi[s] = net_list\n\n # Read the maximum error of each last stage submodel\n self.error_list = []\n for e in range(len(self.trained_rqrmi[-1])):\n self.error_list.append(read_uint32())\n\n _log(verbose, '\\n')\n self.trained_rqrmi = trained_rqrmi", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs))\n _x = self.model.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.id))\n _x = self.model.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs))\n _x = self.model.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.track.id))\n length = len(self.model.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose:\n _v1 = val1.position\n _x = _v1\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v2 = val1.orientation\n _x = _v2\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v3 = val1.stamp\n _x = _v3\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_projected:\n _v4 = val1.position\n _x = _v4\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v5 = val1.orientation\n _x = _v5\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_resampled:\n _v6 = val1.position\n _x = _v6\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v7 = val1.orientation\n _x = _v7\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.model.track.pose_flags))\n length = len(self.model.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n _x = self\n buff.write(_struct_3I.pack(_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs))\n _x = self.data.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.id))\n _x = self.data.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs))\n _x = self.data.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.track.id))\n length = len(self.data.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose:\n _v8 = val1.position\n _x = _v8\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v9 = val1.orientation\n _x = _v9\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v10 = val1.stamp\n _x = _v10\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_projected:\n _v11 = val1.position\n _x = _v11\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v12 = val1.orientation\n _x = _v12\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_resampled:\n _v13 = val1.position\n _x = _v13\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v14 = val1.orientation\n _x = _v14\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.data.track.pose_flags))\n length = len(self.data.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def run_model(model):\n\n model.create_initialised_input()\n\n model.run_from_buffer()\n\n output = model.output_parse()\n return output", "def convert_model(self, model: nn.Module) -> nn.Module:\n if self.sync_bn is not None:\n try:\n model = convert_sync_batchnorm(model, self.sync_bn)\n except ValueError as e:\n self.logger.error('cfg.sync_bn should be \"torch\" or '\n f'\"mmcv\", but got {self.sync_bn}')\n raise e\n\n return model", "def vid2tensor( self, current_frame):", "def convert_quantized_tflite_model(frozen_graph_file, tflite_file_path):\n # Convert the model.\n converter = tf.contrib.lite.TFLiteConverter.from_frozen_graph(\n graph_def_file=frozen_graph_file,\n input_arrays=[\"normalized_input_image_tensor\"],\n input_shapes={\"normalized_input_image_tensor\": [1, 300, 300, 3]},\n output_arrays=['TFLite_Detection_PostProcess',\n 'TFLite_Detection_PostProcess:1',\n 'TFLite_Detection_PostProcess:2',\n 'TFLite_Detection_PostProcess:3'],\n )\n converter.allow_custom_ops = True\n\n converter.quantized_input_stats = {\"normalized_input_image_tensor\": (0., 1.)}\n # mean, std_dev (input range is [-1, 1])\n converter.inference_type = tf.lite.constants.QUANTIZED_UINT8 # this is the recommended type.\n # converter.inference_input_type = tf.uint8 # optional\n # converter.inference_output_type = tf.uint8 # optional\n tflite_model = converter.convert()\n\n # Save the model.\n with open(tflite_file_path, 'wb') as f:\n f.write(tflite_model)", "def to_payload(self, model):\n return model", "def test_export_pytorch_model(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs))\n _x = self.model_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.id))\n _x = self.model_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs))\n _x = self.model_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.track.id))\n length = len(self.model_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose:\n _v57 = val1.position\n _x = _v57\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v58 = val1.orientation\n _x = _v58\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v59 = val1.stamp\n _x = _v59\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_projected:\n _v60 = val1.position\n _x = _v60\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v61 = val1.orientation\n _x = _v61\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_resampled:\n _v62 = val1.position\n _x = _v62\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v63 = val1.orientation\n _x = _v63\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.model_aligned.track.pose_flags))\n length = len(self.model_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs))\n _x = self.data_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.id))\n _x = self.data_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs))\n _x = self.data_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.track.id))\n length = len(self.data_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose:\n _v64 = val1.position\n _x = _v64\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v65 = val1.orientation\n _x = _v65\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v66 = val1.stamp\n _x = _v66\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_projected:\n _v67 = val1.position\n _x = _v67\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v68 = val1.orientation\n _x = _v68\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_resampled:\n _v69 = val1.position\n _x = _v69\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v70 = val1.orientation\n _x = _v70\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.data_aligned.track.pose_flags))\n length = len(self.data_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n buff.write(_struct_9d.pack(*self.R))\n buff.write(_struct_3d.pack(*self.T))\n _x = self\n buff.write(_struct_df.pack(_x.dist_rot, _x.dist_trans))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n transpose_out_name = node_entry[\"input_names\"][0]\n inter_output_names = [node_entry[\"output_names\"][0]]\n # axis==3 means channel is specified along the 3rd axis\n if attrs[\"axis\"] == 3:\n transpose_out_name = f\"transpose_{node_entry['name']}\"\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n [node_entry[\"input_names\"][0]],\n [transpose_out_name],\n perm=[0, 3, 1, 2],\n )\n model_container.add_nodes([node_transposed])\n inter_output_names = [f\"batch_norm_{node_entry['name']}\"]\n\n input_names = [transpose_out_name] + node_entry[\"input_names\"][1:]\n batch_norm_node = onnx.helper.make_node(\n cls.__name__, input_names, inter_output_names, epsilon=attrs[\"epsilon\"]\n )\n model_container.add_nodes([batch_norm_node])\n\n if attrs[\"axis\"] == 3:\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n inter_output_names,\n [node_entry[\"output_names\"][0]],\n perm=[0, 2, 3, 1],\n )\n model_container.add_nodes([node_transposed])", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def convert_model(self, backend, model, weight, **kwargs):\n om_save_path = kwargs[\"save_dir\"]\n input_shape = kwargs[\"input_shape\"]\n out_nodes = kwargs[\"out_nodes\"]\n log_save_path = os.path.dirname(model)\n command_line = [\"bash\", self.current_path + \"/model_convert.sh\", backend,\n model, weight, om_save_path, log_save_path, input_shape, out_nodes]\n try:\n subprocess.check_output(command_line)\n except subprocess.CalledProcessError as exc:\n logging.error(\"convert model to bolt failed. The return message is : {}.\".format(exc))", "def model_fn(model_dir):\n\n net = gluon.nn.SymbolBlock.imports('%s/model.json' % model_dir,\n ['data'], \n param_file='%s/model.params' % model_dir,\n ctx=mx.cpu())\n\n return net", "def test_forward_mobilenet_v1(accel_type=\"ethos-u55-256\"):\n np.random.seed(23)\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/\"\n \"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\",\n \"mobilenet_v1_1.0_224_quant.tflite\",\n )\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n input_tensor = \"input\"\n input_dtype = \"uint8\"\n input_shape = (1, 224, 224, 3)\n in_min, in_max = util.get_range_for_dtype_str(input_dtype)\n input_data = np.random.randint(in_min, high=in_max, size=input_shape, dtype=input_dtype)\n\n relay_mod, params = convert_to_relay(tflite_model_buf, input_data, \"input\")\n input_data = {input_tensor: input_data}\n output_data = generate_ref_data(relay_mod, input_data)\n\n mod = partition_for_ethosu(relay_mod, params)\n compiled_models = infra.build_source(\n mod, input_data, output_data, accel_type, output_tolerance=10\n )\n infra.verify_source(compiled_models, accel_type)", "def create_model(hparams, mode):\n\n graph = tf.Graph()\n\n with graph.as_default():\n with tf.name_scope(\"input_pipe\"):\n dataset = create_dataset(hparams, mode)\n iterator = dataset.make_initializable_iterator()\n model = LMandBDRNNModel(hparams=hparams,\n iterator=iterator,\n mode=mode)\n\n sess = tf.Session(graph=graph)\n\n modeltuple = ModelTuple(graph=graph, iterator=iterator,\n model=model, session=sess)\n\n return modeltuple", "def convert_layers(model):\n\n import logging\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n for name, module in model._modules.items():\n if len(list(module.children())) > 0:\n model._modules[name] = convert_layers(model=module)\n try:\n module_str = str(module)\n module_new = eval(module_str)\n try:\n module_new.weight = module.weight\n module_new.bias = module.bias\n except:\n pass\n model._modules[name] = module_new\n logger.info(\"Quantizing \" + str(name) + \" \" + str(module))\n except:\n pass\n return model", "def preprocess_module(mod):\n\n def alter_conv(attrs, inputs, tinfos, out_type):\n new_attrs = dict(attrs)\n data_info = tinfos[0]\n weight_info = tinfos[1]\n (desired_data_layout, desired_kernel_layout) = (\"NCHW\", \"OIHW\")\n new_attrs[\"data_layout\"] = desired_data_layout\n new_attrs[\"kernel_layout\"] = desired_kernel_layout\n\n if is_depthwise_conv2d(\n data_info.shape,\n attrs[\"data_layout\"],\n weight_info.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n ):\n dkl = desired_kernel_layout\n new_attrs[\"kernel_layout\"] = dkl[1] + dkl[0] + dkl[2] + dkl[3]\n return relay.nn.conv2d(*inputs, **new_attrs)\n\n with OpAttrContext(\"nn.conv2d\", \"FTVMAlterOpLayout\", alter_conv):\n seq = tvm.transform.Sequential(\n [\n transform.ConvertLayout({\"nn.conv2d\": [\"NCHW\", \"OIHW\"]}),\n transform.ConvertLayout({\"nn.conv2d_transpose\": [\"NCHW\", \"OIHW\"]}),\n transform.AlterOpLayout(),\n transform.FoldConstant(),\n ]\n )\n with tvm.transform.PassContext(opt_level=3):\n preprocessed_mod = seq(mod)\n return preprocessed_mod", "def _get_model(self, model_path='model.tflite'):\n interpreter = tf.lite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n return interpreter", "def reconstruct_input_ext(self, model_in):", "def to_local(self, id_tensor):\n ...", "def load_model(model_file):\n # Load TFLite model and allocate tensors.\n interpreter = tflite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter" ]
[ "0.6981948", "0.6846798", "0.6283583", "0.61803657", "0.5930052", "0.5720828", "0.5706919", "0.56350565", "0.5599901", "0.5577139", "0.55073345", "0.55071956", "0.5415228", "0.53956825", "0.53790677", "0.5360498", "0.5250914", "0.52445364", "0.5237697", "0.52300274", "0.52069974", "0.5206539", "0.51938456", "0.5193306", "0.5192036", "0.5169634", "0.51580673", "0.51526356", "0.51375127", "0.5095351" ]
0.7927206
0
Generate reference data through executing the relay module
def generate_ref_data(mod, input_data, params=None, target="llvm"): with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): lib = relay.build(mod, target=target, params=params) lib_name = "mod.so" temp = utils.tempdir() lib_path = temp.relpath(lib_name) lib.export_library(lib_path) lib = tvm.runtime.load_module(lib_path) grt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu())) grt_mod.set_input(**input_data) grt_mod.run() output_count = grt_mod.get_num_outputs() out = [grt_mod.get_output(i).numpy() for i in range(output_count)] if isinstance(mod, tvm.relay.Function): main = mod else: main = mod["main"] if main.attrs is None or main.attrs["output_tensor_names"] is None: output_tensor_names = ( ["output"] if output_count == 1 else [f"output{i}" for i in range(output_count)] ) else: output_tensor_names = main.attrs["output_tensor_names"] return dict(zip(output_tensor_names, out))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n self.parser.parse_args()\n\n sys.stdout.write(\"ref: %s\\n\\n\" % self.gen_ref())", "def make_reference(self):\n self.make_reference2()", "def generate(self):", "def genReferences( self, aWeb ):\n try:\n for t in self.commands:\n ref= t.ref( aWeb )\n if ref is not None:\n yield ref\n except Error as e:\n raise", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def script(self):", "def get_data():\n pass", "def use(self):", "def data(self):", "def _get_reference(self):\n super()._get_reference()\n\n # Additional object references from this env\n self.cube_body_id = self.sim.model.body_name2id(\"pot\")\n self.handle_1_site_id = self.sim.model.site_name2id(\"pot_handle_1\")\n self.handle_0_site_id = self.sim.model.site_name2id(\"pot_handle_2\")\n self.table_top_id = self.sim.model.site_name2id(\"table_top\")\n self.pot_center_id = self.sim.model.site_name2id(\"pot_center\")", "def get_data(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def call(self):", "def generate(self):\n pass" ]
[ "0.61367774", "0.573806", "0.56130826", "0.5543687", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.5495478", "0.5490769", "0.54810923", "0.544518", "0.5430902", "0.5426326", "0.5412864", "0.5412864", "0.5412864", "0.5412864", "0.5411444", "0.54028183", "0.54028183", "0.54028183", "0.53885436", "0.5386609" ]
0.61549
0
A helper function to create a Relay IRModule with inputs and params from a tflite file
def create_relay_module_and_inputs_from_tflite_file(tflite_model_file, bind_params_by_name=True): with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() mod, params = convert_to_relay(tflite_model_buf, bind_params_by_name) inputs = dict() for param in mod["main"].params: name = str(param.name_hint) data_shape = [int(i) for i in param.type_annotation.shape] dtype = str(param.type_annotation.dtype) if np.issubdtype(dtype, np.floating): # Since np.random.uniform only allows the ranges of float32, # at first float16 is used and scaled afterwards, if necessary. in_min, in_max = (np.finfo("float16").min, np.finfo("float16").max) data = np.random.uniform(low=in_min, high=in_max, size=data_shape).astype(dtype) scale = np.finfo(dtype).min / np.finfo("float16").min data *= scale elif np.issubdtype(dtype, np.integer): in_min, in_max = (np.iinfo(dtype).min, np.iinfo(dtype).max) data = np.random.randint(in_min, high=in_max, size=data_shape, dtype=dtype) else: raise TypeError(f"Type {dtype} not supported") inputs[name] = data return mod, inputs, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tflite_load_model(model_file):\n interpreter = tf.lite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def convert_to_relay(tflite_model_buf, bind_params_by_name=True):\n # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1\n try:\n import tflite.Model # pylint: disable=import-outside-toplevel\n\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n except AttributeError:\n import tflite # pylint: disable=import-outside-toplevel\n\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n mod, params = relay.frontend.from_tflite(tflite_model)\n if bind_params_by_name:\n mod[\"main\"] = relay.build_module.bind_params_by_name(mod[\"main\"], params)\n return mod, params", "def load_model(model_file):\n # Load TFLite model and allocate tensors.\n interpreter = tflite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def get_src(self):\n\n self.codegen = json.loads(self.cmod.get_source(\"json\"))\n self.sub_module_name = self.codegen[\"symbol\"]\n self.nodes = self.codegen[\"nodes\"]\n self.clml_code.append(self.MakeHeader.substitute(module=self.sub_module_name))\n\n def get_tensor_from_map(\n node_seq, shape=None, layout=\"CL_TENSOR_LAYOUT_OPTIMAL_QCOM\", dtype=\"float32\"\n ):\n if node_seq in self.node_map:\n return self.node_map[node_seq]\n else:\n node = self.nodes[node_seq]\n dtype = str(node[\"attrs\"][\"dtype\"][0][0])\n if node[\"op\"] == \"input\":\n self.clml_code.append(\"// Input Node\")\n node_out_name = self.sub_module_name + \"_\" + \"input_\" + str(node_seq)\n else:\n node_out_name = node[\"name\"]\n if shape is None:\n shape = str(tuple(node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n\n self.clml_code.append(\n self.MakeCLMLTensor.substitute(\n name=node_out_name, shape=shape, dtype=dtype, layout=layout\n )\n )\n self.clml_code.append(\n self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)\n )\n if node[\"op\"] == \"input\":\n self.clml_code.append(\n Template(\"runner.inputs.push_back($clml_input);\").substitute(\n clml_input=node_out_name\n )\n )\n self.input_meta.append(\n self.MakeInputMetaInfo.substitute(\n in_name=node_out_name, dtype=dtype, shape=shape\n )\n )\n\n if self.nodes[node_seq][\"op\"] == \"const\":\n self.clml_code.append(\n Template('runner.consts.push_back(\"$nid\");').substitute(nid=node[\"name\"])\n )\n self.node_map[node_seq] = node_out_name\n return node_out_name\n\n def make_output_tensor(\n node, node_seq, shape=None, layout=\"CL_TENSOR_LAYOUT_OPTIMAL_QCOM\", dtype=\"float32\"\n ):\n if dtype is None:\n dtype = str(node[\"attrs\"][\"dtype\"][0][0])\n if shape is None:\n shape = str(tuple(node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n node_out_name = self.sub_module_name + \"_\" + \"layer_out_\" + str(node_seq)\n self.clml_code.append(\n self.MakeCLMLTensor.substitute(\n name=node_out_name,\n shape=shape,\n dtype=dtype,\n layout=layout,\n )\n )\n return node_out_name\n\n for node_seq, node in enumerate(self.nodes):\n if node[\"op\"] == \"kernel\":\n self.clml_code.append(\"// Kernel Node : \" + node[\"name\"])\n if node[\"name\"] == \"nn.conv2d\" or node[\"name\"] == \"nn.depthwise_conv2d\":\n if \"padding\" in node[\"attrs\"]:\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"padding\"][0]))[1:-1]\n else:\n padding = \"0, 0, 0, 0\"\n dilation = str(tuple(int(x) for x in node[\"attrs\"][\"dilation\"][0]))[1:-1]\n strides = str(tuple(int(x) for x in node[\"attrs\"][\"strides\"][0]))[1:-1]\n groups = node[\"attrs\"][\"groups\"][0][0]\n if node[\"name\"] == \"nn.conv2d\":\n mode = \"CL_CONVOLUTION_MODE_CONVOLUTION_QCOM\"\n else:\n mode = \"CL_CONVOLUTION_MODE_DEPTHWISE_QCOM\"\n activation = \"CL_ACTIVATION_RELU\"\n has_act = False\n if \"activation_type\" in node[\"attrs\"]:\n has_act = True\n activation = node[\"attrs\"][\"activation_type\"][0][0]\n if activation == \"relu\":\n activation = \"CL_ACTIVATION_RELU\"\n elif activation == \"relu6\":\n activation = \"CL_ACTIVATION_RELU6\"\n else:\n RuntimeError(\"Unknown activation:\" + activation)\n has_bias = bool((node[\"inputs\"] == 3) or (node[\"inputs\"] == 7))\n has_bn = bool((node[\"inputs\"] == 6) or (node[\"inputs\"] == 7))\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n weight_tensor = get_tensor_from_map(node[\"inputs\"][1][0])\n if not has_bias:\n bias_tensor = \"runner.unusedTensor\"\n else:\n bias_tensor = get_tensor_from_map(node[\"inputs\"][2][0])\n\n node_out_name = make_output_tensor(node, node_seq)\n\n if not has_bn:\n self.clml_code.append(\n self.MakeConv2D.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n bias_tensor=bias_tensor,\n output_tensor=node_out_name,\n padding=padding,\n dilation=dilation,\n strides=strides,\n groups=groups,\n mode=mode,\n activation=activation,\n has_bias=\"true\" if has_bias else \"false\",\n has_act=\"true\" if has_act else \"false\",\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n else:\n bn_index = 3 if has_bias else 2\n bn_attrs = tuple(node[\"attrs\"][\"batchnorm\"][0][0])\n axis = bn_attrs[0]\n bn_shape = [1, 1, 1, 1]\n bn_node = self.nodes[node[\"inputs\"][bn_index][0]]\n bn_shape[axis] = bn_node[\"attrs\"][\"shape\"][0][0]\n dtype = bn_node[\"attrs\"][\"dtype\"][0][0]\n\n bn_scale_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_bias_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 1][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_mean_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 2][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_var_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 3][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n self.clml_code.append(\n self.MakeConv2DWithBN.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n bias_tensor=bias_tensor,\n output_tensor=node_out_name,\n bn_scale_tensor=bn_scale_tensor,\n bn_bias_tensor=bn_bias_tensor,\n bn_mean_tensor=bn_mean_tensor,\n bn_var_tensor=bn_var_tensor,\n bn_attrs=str(bn_attrs)[1:-1],\n padding=padding,\n dilation=dilation,\n strides=strides,\n groups=groups,\n mode=mode,\n activation=activation,\n has_bias=\"true\" if has_bias else \"false\",\n has_act=\"true\" if has_act else \"false\",\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.relu6\" or node[\"name\"] == \"nn.relu\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n relu_type = (\n \"CL_ACTIVATION_RELU\" if node[\"name\"] == \"nn.relu\" else \"CL_ACTIVATION_RELU6\"\n )\n self.clml_code.append(\n self.MakeRelu.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n relu_type=relu_type,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.batch_norm\":\n bn_attrs = tuple(node[\"attrs\"][\"batchnorm\"][0][0])\n axis = bn_attrs[0]\n bn_shape = [1, 1, 1, 1]\n bn_node = self.nodes[node[\"inputs\"][0][0]]\n bn_shape[axis] = bn_node[\"attrs\"][\"shape\"][0][0]\n dtype = bn_node[\"attrs\"][\"dtype\"][0][0]\n bn_scale_tensor = get_tensor_from_map(\n node[\"inputs\"][0][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_bias_tensor = get_tensor_from_map(\n node[\"inputs\"][1][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_mean_tensor = get_tensor_from_map(\n node[\"inputs\"][2][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_var_tensor = get_tensor_from_map(\n node[\"inputs\"][3][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n\n self.clml_code.append(\n self.MakeBN.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n bn_scale_tensor=bn_scale_tensor,\n bn_bias_tensor=bn_bias_tensor,\n bn_mean_tensor=bn_mean_tensor,\n bn_var_tensor=bn_var_tensor,\n bn_attrs=str(bn_attrs)[1:-1],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\"nn.max_pool2d\", \"nn.avg_pool2d\", \"nn.l2_pool2d\"]:\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n pool_size = str(tuple(int(x) for x in node[\"attrs\"][\"pool_size\"][0]))[1:-1]\n strides = str(tuple(int(x) for x in node[\"attrs\"][\"strides\"][0]))[1:-1]\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"padding\"][0]))[1:-1]\n self.clml_code.append(\n self.MakePool2D.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n pool_type=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\"nn.global_max_pool2d\", \"nn.global_avg_pool2d\"]:\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n in_node = self.nodes[node[\"inputs\"][0][0]]\n in_shape = str(tuple(in_node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n self.clml_code.append(\n self.MakeGlobalPool2D.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n in_shape=in_shape,\n pool_type=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"reshape\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeReshape.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"concatenate\":\n input_len = len(node[\"inputs\"])\n in_list = str(\n [get_tensor_from_map(node[\"inputs\"][x][0]) for x in range(input_len)]\n )[1:-1]\n node_out_name = make_output_tensor(node, node_seq)\n axis = node[\"attrs\"][\"axis\"][0][0]\n self.clml_code.append(\n self.MakeConcatenate.substitute(\n in_list=in_list,\n output_tensor=node_out_name,\n axis=axis,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.dense\":\n in_node = self.nodes[node[\"inputs\"][0][0]]\n in_shape = tuple(in_node[\"attrs\"][\"shape\"][0][0])\n wt_shape = tuple(in_node[\"attrs\"][\"shape\"][0][0])\n input_tensor = get_tensor_from_map(\n node[\"inputs\"][0][0], layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\"\n )\n weight_tensor = get_tensor_from_map(\n node[\"inputs\"][1][0],\n shape=str(tuple([1, 1, wt_shape[0], wt_shape[1]]))[1:-1],\n layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\",\n )\n node_out_name = make_output_tensor(\n node,\n node_seq,\n shape=str(tuple([in_shape[0], wt_shape[0], 1, 1]))[1:-1],\n layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\",\n )\n self.clml_code.append(\n self.MakeDense.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n output_tensor=node_out_name,\n in_shape=str(in_shape)[1:-1],\n wt_shape=str(wt_shape)[1:-1],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.softmax\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeSoftMax.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.pad\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n pad_mode = node[\"attrs\"][\"pad_mode\"][0][0]\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"pad_width\"][0]))[1:-1]\n self.clml_code.append(\n self.MakePad.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n pad_mode=pad_mode,\n padding=padding,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.batch_flatten\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeBatchFlatten.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"clip\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n a_max = node[\"attrs\"][\"a_max\"][0][0]\n a_min = node[\"attrs\"][\"a_min\"][0][0]\n self.clml_code.append(\n self.MakeClip.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n a_max=a_max,\n a_min=a_min,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\n \"add\",\n \"subtract\",\n \"multiply\",\n \"minimum\",\n \"maximum\",\n \"divide\",\n ]:\n input_a = get_tensor_from_map(node[\"inputs\"][0][0])\n input_b = get_tensor_from_map(node[\"inputs\"][1][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeBinaryOp.substitute(\n input_a=input_a,\n input_b=input_b,\n output_tensor=node_out_name,\n op=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n else:\n RuntimeError(\"Unsupported Op:\" + node[\"name\"])\n self.clml_code.append(\n self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)\n )\n self.node_map[node_seq] = node_out_name\n\n elif node[\"op\"] not in [\"const\", \"input\"]:\n print(\"Unknown Node type:\", node[\"op\"])\n\n # Populate outputs\n out_nodes = self.codegen[\"heads\"]\n self.clml_code.append(\"// Populate outputs\")\n for nid_triple in out_nodes:\n nid = nid_triple[0]\n out_node = self.nodes[nid]\n dtype = str(out_node[\"attrs\"][\"dtype\"][0][0])\n shape = str(tuple(out_node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n out_name = self.sub_module_name + \"_\" + \"layer_out_\" + str(nid)\n self.clml_code.append(\n Template(\n 'runner.outputs.insert({\"$out_name\", runner.storage_map[\"$out_name\"]});'\n ).substitute(out_name=out_name)\n )\n self.clml_code.append(\n Template('runner.outputs_dtypes.insert({\"$out_name\", \"$dtype\"});').substitute(\n out_name=out_name, dtype=dtype\n )\n )\n self.clml_code.append(\n Template(\n \"runner.outputs_shapes.insert\" '({\"$out_name\", std::vector<size_t>({$shape})});'\n ).substitute(out_name=out_name, shape=shape)\n )\n self.output_meta.append(\n self.MakeOutputMetaInfo.substitute(out_name=out_name, dtype=dtype, shape=shape)\n )\n\n # Mem allocation & Param copy\n self.clml_code.append(\"// Allocate Tensor Memory and copy params\")\n self.clml_code.append(\"runner.AllocateMemAndPopulateParams();\")\n\n # Meta data preparation\n self.clml_code.append(\n self.MakeMetaInfo.substitute(\n name=self.sub_module_name,\n input_count=len(self.input_meta),\n output_count=len(self.output_meta),\n input_meta=\"\\\\\\n\".join(self.input_meta),\n output_meta=\"\\\\\\n\".join(self.output_meta),\n )\n )\n\n self.clml_code.append(self.MakeFooter.substitute())\n return (self.sub_module_name, self.clml_code)", "def from_tensorflow(self, graph, layout=\"NHWC\", shape=None, outputs=None):\n func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs)\n self._mod[\"main\"] = func\n return self._mod, self._params", "def from_tflite(model, prog_name): #, shape_dict, dtype_dict):\n try:\n import tflite.Model\n import tflite.SubGraph\n import tflite.BuiltinOperator\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n assert isinstance(model, tflite.Model.Model)\n\n # keep the same as tflite\n assert model.SubgraphsLength() == 1, \"only support one subgraph (main subgraph)\"\n subgraph = model.Subgraphs(0)\n\n # model inputs / outputs\n model_inputs = subgraph.InputsAsNumpy()\n model_outputs = subgraph.OutputsAsNumpy()\n assert model_inputs.size == 1, \"Model should have only one input\"\n assert model_outputs.size == 1, \"Model should have only one output\"\n\n # op code in model\n op_converter = OperatorConverter(model, subgraph, prog_name)\n op_converter.is_dequantize = False\n op_converter.check_unsupported_ops()\n\n in_tensor = op_converter.get_tensors(model_inputs)[0]\n out_tensor = op_converter.get_tensors(model_outputs)[0]\n\n op_converter.define_model_sizes(\"IN\", in_tensor)\n op_converter.define_model_sizes(\"OUT\", out_tensor)\n\n op_converter.nn_add_input(in_tensor)\n\n output_nodes = op_converter.convert_op_to_hexagon_nn()\n\n op_converter.nn_add_output(output_nodes)\n\n op_converter.print_nn_nodes()\n\n print(\"tensor_tab:\")\n print(op_converter.tensor_tab)\n\n op_converter.close()\n print(\"Converted Hexagon Model saved to {}\".format(op_converter.h_file.name))", "def _get_model(self, model_path='model.tflite'):\n interpreter = tf.lite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n return interpreter", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def compile_ir(engine, llvm_ir):\n # Create a LLVM module object from the IR\n mod = llvm.parse_assembly(llvm_ir)\n mod.verify()\n # Now add the module and make sure it is ready for execution\n engine.add_module(mod)\n engine.finalize_object()\n return mod", "def __init__(self, model_name: str, label_file: str) -> None:\n\n # Append TFLITE extension to model_name if there's no extension\n _, ext = os.path.splitext(model_name)\n if not ext:\n model_name += '.tflite'\n\n # Initialize the TFLite model.\n interpreter = Interpreter(model_path=model_name, num_threads=4)\n interpreter.allocate_tensors()\n\n self._input_index = interpreter.get_input_details()[0]['index']\n self._output_index = interpreter.get_output_details()[0]['index']\n self._interpreter = interpreter\n\n self.pose_class_names = self._load_labels(label_file)", "def read_module(stream):\n data = stream.read()\n if len(data) % 4 != 0:\n raise ParseError('File length is not divisible by 4')\n words = array.array('I', data)\n binary = SpirvBinary(words)\n\n module = ir.Module()\n module.value_to_id = {}\n try:\n parse_global_instructions(binary, module)\n parse_functions(binary, module)\n return module\n finally:\n del module.value_to_id", "def file_based_input_fn_builder(input_file):\n # 存放解析自TFRecord文件的数据\n name_to_features = {\n \"input_q\":tf.FixedLenFeature([shape],tf.int64),\n \"input_K\":tf.FixedLenFeature([],tf.float32),\n \"input_v\":tf.FixedLenFeature([],tf.float32),\n }\n\n def _decode_record(record,name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record,name_to_features)\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size = 100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record:_decode_record(record, name_to_features),\n batch_size = batch_size,\n drop_remainder=drop_remainder))\n return d\n return input_fn", "def create_model_from_file(\n weights_fn: Path, gpu: bool = True, device_num: int = 0,\n) -> Tuple[torch.nn.Module, int, dict]:\n if gpu:\n map_location = f\"cuda:{device_num}\"\n else:\n map_location = \"cpu\"\n weights_fn = weights_fn.resolve()\n logging.info(\"Loading model dictionary from file.\")\n model_dict = torch.load(weights_fn, map_location=map_location)\n model = create_model_on_device(device_num, model_dict[\"model_struc_dict\"])\n logging.info(\"Loading in the saved weights.\")\n model.load_state_dict(model_dict[\"model_state_dict\"])\n return model, model_dict[\"model_struc_dict\"][\"classes\"], model_dict[\"label_codes\"]", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def load_module(id=None, datatype=None, action=None,\n version='0.0', fields=[]):\n\n icon = {\n 'URI': config.IMAGES + \"load.png\",\n 'terminals': {\n 'output': (20, 10, 1, 0),\n }\n }\n \n terminals = [\n dict(id='output',\n datatype=datatype,\n use='out',\n description='data',\n ),\n ]\n\n files_field = {\n \"type\":\"[file]\",\n \"label\": \"Files\",\n \"name\": \"files\",\n \"value\": '',\n }\n intent_field = {\n \"type\":\"string\",\n \"label\":\"Intent\",\n \"name\": \"intent\",\n \"value\": '',\n }\n \n # Combine everything into a module.\n module = Module(id=id,\n name='Load',\n version=version,\n description=action.__doc__,\n #icon=icon,\n terminals=terminals,\n fields=[files_field, intent_field] + fields,\n action=action,\n )\n\n return module", "def create_model(hparams, mode):\n\n graph = tf.Graph()\n\n with graph.as_default():\n with tf.name_scope(\"input_pipe\"):\n dataset = create_dataset(hparams, mode)\n iterator = dataset.make_initializable_iterator()\n model = LMandBDRNNModel(hparams=hparams,\n iterator=iterator,\n mode=mode)\n\n sess = tf.Session(graph=graph)\n\n modeltuple = ModelTuple(graph=graph, iterator=iterator,\n model=model, session=sess)\n\n return modeltuple", "def create_graph():\n with gfile.FastGFile(os.path.join(\n FLAGS.model_dir, FLAGS.model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_lm_architecture(args)\n\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = getattr(\n args, \"tokens_per_sample\", DEFAULT_MAX_TARGET_POSITIONS\n )\n\n if args.character_embeddings:\n embed_tokens = CharacterTokenEmbedder(\n task.source_dictionary,\n eval(args.character_filters),\n args.character_embedding_dim,\n args.decoder_embed_dim,\n args.char_embedder_highway_layers,\n )\n elif args.adaptive_input:\n embed_tokens = AdaptiveInput(\n len(task.source_dictionary),\n task.source_dictionary.pad(),\n args.decoder_input_dim,\n args.adaptive_input_factor,\n args.decoder_embed_dim,\n options.eval_str_list(args.adaptive_input_cutoff, type=int),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n embed_tokens = cls.build_embedding(\n args, task.source_dictionary, args.decoder_input_dim\n )\n\n if args.tie_adaptive_weights:\n assert args.adaptive_input\n assert args.adaptive_input_factor == args.adaptive_softmax_factor\n assert (\n args.adaptive_softmax_cutoff == args.adaptive_input_cutoff\n ), \"{} != {}\".format(\n args.adaptive_softmax_cutoff, args.adaptive_input_cutoff\n )\n assert args.decoder_input_dim == args.decoder_output_dim\n\n decoder = LinearTransformerDecoder(\n args, task.target_dictionary, embed_tokens, no_encoder_attn=True\n )\n return cls(decoder)", "def from_program(self, program, shape_dict, scope):\n\n self.shape_dict = shape_dict\n if scope is None:\n import paddle\n\n scope = paddle.fluid.global_scope()\n self.check_unsupported_ops(program)\n self.extract_parameters(program, scope)\n self.ops_to_relay(program)\n\n output_names = list()\n for block in program.blocks:\n for op in block.ops:\n if op.type == \"fetch\":\n output_names.append(op.input(\"X\")[0])\n\n outputs = [self.nodes[name] for name in output_names]\n outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)\n\n free_vars = analysis.free_vars(outputs)\n func = _function.Function(free_vars, outputs)\n mod = IRModule.from_expr(func)\n return mod, self.params", "def test_forward_mobilenet_v1(accel_type=\"ethos-u55-256\"):\n np.random.seed(23)\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/\"\n \"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\",\n \"mobilenet_v1_1.0_224_quant.tflite\",\n )\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n input_tensor = \"input\"\n input_dtype = \"uint8\"\n input_shape = (1, 224, 224, 3)\n in_min, in_max = util.get_range_for_dtype_str(input_dtype)\n input_data = np.random.randint(in_min, high=in_max, size=input_shape, dtype=input_dtype)\n\n relay_mod, params = convert_to_relay(tflite_model_buf, input_data, \"input\")\n input_data = {input_tensor: input_data}\n output_data = generate_ref_data(relay_mod, input_data)\n\n mod = partition_for_ethosu(relay_mod, params)\n compiled_models = infra.build_source(\n mod, input_data, output_data, accel_type, output_tolerance=10\n )\n infra.verify_source(compiled_models, accel_type)", "def __compile_ir(self):\n self.builder.ret_void()\n llvm_ir = str(self.module)\n mod = self.binding.parse_assembly(llvm_ir)\n mod.verify()\n\n self.engine.add_module(mod)\n self.engine.finalize_object()\n self.engine.run_static_constructors()\n return mod", "def _make_model_v2():\n class CustomModule(tf.Module):\n\n def __init__(self):\n super().__init__()\n self.m = tf.Variable([1.0, 1.0, 1.0], name='slope')\n\n @tf.function\n def __call__(self, x):\n y = self.m * x + 1\n return y\n\n @tf.function(input_signature=[tf.TensorSpec((None, 3), tf.float32)])\n def length(self, x):\n return tf.reduce_sum(self(x) - x, name='length')\n\n @tf.function(input_signature=[tf.TensorSpec([], tf.float32),\n tf.TensorSpec((None, 3), tf.float32)])\n def scalar_multiply(self, z, x):\n return tf.multiply(z, x, name='scale_mult')\n\n module = CustomModule()\n\n # Make a concrete version of __call__\n call = module.__call__.get_concrete_function(tf.TensorSpec((None, 3)))\n\n tf.saved_model.save(\n module, tf_export_path, signatures={\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: call,\n 'length': module.length,\n 'scalar_multiply': module.scalar_multiply\n }\n )", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(sys.argv[1], 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create_inputs(params):\n sess = tf.Session()\n\n lr_images, hr_labels = [], []\n training_dir = params['training_dir'].format(params['ratio'])\n\n # Raise exception if user has not ran prepare_data.py yet\n if not os.path.isdir(training_dir):\n raise Exception(\"You must first run prepare_data.py before you can train\")\n\n lr_shape = (params['lr_size'], params['lr_size'], 3)\n hr_shape = output_shape = (params['lr_size'] - params['edge'], params['lr_size'] - params['edge'], 3 * params['ratio']**2)\n for file in os.listdir(training_dir):\n train_file = open(\"{}/{}\".format(training_dir, file), \"rb\")\n train_data = np.fromfile(train_file, dtype=np.uint8)\n\n lr_image = train_data[:17 * 17 * 3].reshape(lr_shape)\n lr_images.append(lr_image)\n\n hr_label = train_data[17 * 17 * 3:].reshape(hr_shape)\n hr_labels.append(hr_label)\n\n return lr_images, hr_labels", "def create_module(sbml_model_file, model_name, model_output_dir, condition_df,\n observable_df):\n\n from amici.petab_import import import_model\n import_model(sbml_model=sbml_model_file, observable_table=observable_df,\n model_name=model_name, model_output_dir=model_output_dir,\n verbose=True, condition_table=condition_df)", "def compile_IR(ir):\n triple = re.search(\n r'target\\s+triple\\s*=\\s*\"(?P<triple>[-\\d\\w\\W_]+)\"\\s*$',\n ir, re.M).group('triple')\n\n # Create execution engine\n target = llvm.Target.from_triple(triple)\n target_machine = target.create_target_machine()\n backing_mod = llvm.parse_assembly(\"\")\n engine = llvm.create_mcjit_compiler(backing_mod, target_machine)\n\n # Create LLVM module and compile\n mod = llvm.parse_assembly(ir)\n mod.verify()\n engine.add_module(mod)\n engine.finalize_object()\n engine.run_static_constructors()\n\n return engine", "def run_module(args, module_path, workspace, module_data):\n\n mod_path = module_path.replace('./', '')\n curr_path = os.getcwd()\n tfvar_path = module_path.replace('./components/', '')\n print(\"curr_path = {0}\".format(curr_path))\n print(\"DEBUG module_path = {0}\".format(module_path))\n module_name = module_path.split('/')[-1]\n print(\"DEBUG module_name = {0}\".format(module_name))\n\n key_config = \"\\\"key={0}/terraform.tfstate\\\"\".format(module_name)\n bucket_region_config = \"\\\"region={0}\\\"\".format(module_data[\"bucket_region\"])\n bucket_config = \"\\\"bucket={0}\\\"\".format(module_data[\"bucket\"])\n dynamodb_config = \"\\\"dynamodb_table={0}\\\"\".format(module_data[\"dynamodb\"])\n\n plan_output_file = \"plan.out\"\n tf_varfile = f\"{curr_path}/tfvars/{tfvar_path}/{workspace}.tfvars\"\n tf_varfile_common = f\"{curr_path}/tfvars/terraform.tfvars\"\n tf_varfile_tags = f\"{curr_path}/tfvars/core/taggings/{workspace}.tfvars\"\n backend_override = f\"{curr_path}/variables/config/backend_override.tf\"\n providers_override = f\"{curr_path}/variables/config/providers_override.tf\"\n\n softlinking_files(mod_path)\n\n remove_prev_run = f\"cd {module_path} && rm -f {plan_output_file} && rm -rf .terraform\"\n cp_override_cmd = f\"cd {module_path} && cp {backend_override} . && cp {providers_override} .\"\n\n tf_plan_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -out {plan_output_file} --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags}\"\n tf_plan_destroy_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -destroy --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags} -out {plan_output_file}\"\n tf_apply_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform apply {plan_output_file}\"\n tf_init_cmd = f\"cd {module_path} && terraform init --backend-config={key_config} --backend-config={bucket_region_config} --backend-config={dynamodb_config} --backend-config={bucket_config} && terraform workspace new {workspace} || terraform workspace select {workspace}\"\n print(tf_init_cmd) # let's leave this in\n\n os.system(remove_prev_run)\n os.system(cp_override_cmd)\n os.system(tf_init_cmd)\n\n if args.action.lower() == 'plan':\n # always auto approve 'plan' action\n os.system(tf_plan_cmd)\n elif args.action.lower() == 'plan-destroy':\n # always auto approve 'plan' action\n os.system(tf_plan_destroy_cmd)\n elif args.action.lower() == 'apply':\n if args.approve:\n # auto-approve flag enabled so skip user confirmation\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n os.system(tf_plan_cmd)\n # confirm with user first\n if user_confirmation(\"Sure you want to APPLY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")\n elif args.action.lower() == 'apply-destroy':\n if args.approve:\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n # confirm with user first\n os.system(tf_plan_destroy_cmd)\n if user_confirmation(\"Sure you want to APPLY DESTROY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")", "def _create_string_input_trainable_model():\n\n class BlockWithStringInputs(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.cast = onnxblock.blocks.Cast(to=onnx.TensorProto.FLOAT)\n self.linear = onnxblock.blocks.Linear(4, 2)\n\n def build(self, string_input):\n return self.linear(self.cast(string_input))\n\n string_block = BlockWithStringInputs()\n with onnxblock.empty_base() as model_accessor:\n model_accessor.model.graph.input.extend(\n [\n onnx.helper.make_tensor_value_info(\"input\", onnx.TensorProto.STRING, [1, 4]),\n ]\n )\n _ = string_block(\"input\")\n\n return string_block.to_model_proto()" ]
[ "0.63068986", "0.6023051", "0.5813477", "0.5714911", "0.5690382", "0.5531973", "0.55067134", "0.5490141", "0.5472677", "0.54595083", "0.5441401", "0.5435935", "0.5368517", "0.5319013", "0.5307069", "0.5298142", "0.5294426", "0.5289352", "0.52519286", "0.5250053", "0.5224872", "0.5201036", "0.5175264", "0.51663953", "0.51645976", "0.5154294", "0.5152527", "0.5149556", "0.51124364", "0.5108633" ]
0.8060826
0
2. SELECTION PHASE. If a tree does not reproduce, it becomes extinct. Thus, this leads to the requirement of a competitive exclusion in order to eliminate those trees with lower metric values. This is done to limit the maximum number of trees in the forest. Initially, fast reproduction of trees take place and all of them are included in the forest. The fitter trees reproduce more than the undesirable ones. Here, "fitter" is either in terms of objective or novelty (in novelty search). This elimination mechanism is activated when the population exceeds the preselected maximum number of trees in the forest. To do so, the trees and their seeds are ranked and those with lower fitness values are removed to sustain a manageable tree population.
def select(self): def truncate(self): """ Truncates forest to maximum number of trees. """ self.population = self.population[:self.max_number_trees] def SortOnItem(list_, item_loc): """ Sorts based on a given item. """ templist = [elmt[item_loc] for elmt in list_] index = np.argsort(templist) return [list_[i] for i in index] # adds current seedlings to forest for tree in self.seedlings: # if tree does not competes with another existing one, adds it if tree not in self.population: self.population.append(tree) # sorts the trees of the forest in ascending values - minimization self.population = SortOnItem(self.population, item_loc=0) # removes unfit trees from forest truncate(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prune( tree, impurity_crit, dataSet, treeSeq ):\n\n\t\tsaved = {}\n\n\t\ttotal_leaf_impurity, num_leaves = DecisionTree._fetch(tree, impurity_crit, dataSet, saved)\n\n\t\tnodes, sets, G = saved['node'], saved['set'], saved['G']\n\n\t\t# choose TreeNode such that g is minimum to prune\n\t\tmin_g_ind = np.argmin(G)\n\t\tnode2Prune = nodes[min_g_ind]\n\t\tnode2Prune.value = DecisionTree._make_leaf(sets[min_g_ind], impurity_crit)\n\t\tnode2Prune.cut_off = None\n\n\t\t# get a new tree pruned\n\t\ttreeSeq['alpha'].append(G[min_g_ind])\n\t\ttreeSeq['tree'].append(tree)\n\t\ttreeSeq['num_leaves'].append(num_leaves-node2Prune.leaves()+1)\n\n\t\tif not (tree.left.cut_off is None and tree.right.cut_off is None):\n\n\t\t\tDecisionTree._prune(deepcopy(tree), impurity_crit, dataSet, treeSeq )\n\t\telse:\n\t\t\treturn", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def test_rand_100_depth_remains_less_than_8():\n from bbst import Bst\n from random import shuffle\n max_depth = 0\n for x in range(10):\n rando = [x for x in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n tree_depth = tree.depth()\n if tree_depth > max_depth:\n max_depth = tree_depth\n assert max_depth == 8", "def extra_trees_test(n_jobs=1):\n # model = models.RandomForest.ExtraTreesModel()\n # model.run('cv')\n\n # tune the model - 15 trees already gives .13 RMSE, I think that's slightly better than RF with that number of trees\n params = {\n 'n_estimators': [15, 50, 100, 250]\n }\n model = models.RandomForest.ExtraTreesModel(\n grid_search_parameters=params,\n grid_search_sample=0.5,\n n_jobs=n_jobs\n )\n model.run('grid_search', refit=True)\n # 2014-01-21 05:45:28 - Base - INFO - Found best parameters:\n # 2014-01-21 05:45:28 - Base - INFO - {'n_estimators': 250}\n # 2014-01-21 05:45:28 - Base - INFO - Predicting on holdout set\n # 2014-01-21 05:45:41 - classes - INFO - RMSE: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - RMSE on holdout set: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - Grid search completed in 8916.21896791\n # 2014-01-21 05:45:41 - Base - INFO - Model completed in 9332.45440102\n\n # As expected, more trees = better performance. Seems like the performance is on par/slightly better than random forest", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def improve_tree(tree, freq_dict):\n # todo", "def fit_tree_stump_forest(X_train: np.ndarray, y_train: np.ndarray, n_estimators: int) -> RandomForestClassifier:\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf = clf.fit(X_train, y_train)\n return clf", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def MaxParsimonyNoTable(X, Tree, tip_row_dict, do_tips=False, naming=False):\n # 2 represents {0,1} set\n sp_to_arr = lambda sp_arr: np.array(sp_arr.todense().astype(np.int8))[0]\n wrap = lambda x: sp_to_arr(X[tip_row_dict[x.name]]) if x.is_leaf() and not do_tips else sp_to_arr(x.genotype)\n tree_len = 0\n for _ in Tree.traverse(): tree_len += 1\n for i, node in tqdm.tqdm(enumerate(Tree.traverse('postorder')), total=tree_len,\n desc='Ancestral Reconstruction: 1st pass'):\n if node.is_leaf():\n if not do_tips:\n node.genotype = X[tip_row_dict[node.name]]\n continue\n if naming: node.name = i\n children = [wrap(c) for c in node.children]\n res = children[0].copy()\n eq = np.equal(*children)\n res[children[0] == 2] = children[1][children[0] == 2] # 2 is the union {0,1}\n res[children[1] == 2] = children[0][children[1] == 2]\n res[(children[0] != 2) & (children[1] != 2) & ~eq] = 2\n node.genotype = sp.csr_matrix(res)\n\n post = Tree.traverse('preorder')\n root = next(post)\n root.random = (wrap(root) == 2)\n root.genotype[root.genotype == 2] = np.random.choice([1, 0], size=(root.genotype == 2).sum())\n for node in tqdm.tqdm(post, total=tree_len - 1, desc='Ancestral Reconstruction: 2nd pass'):\n if node.is_leaf(): continue\n parent_ = wrap(node.up)\n node_ = wrap(node)\n res = node_.copy()\n res[node_ == 2] = parent_[node_ == 2]\n node.random = (node.up.random) & (node_ == 2) # these are unstable positions - will not be counted\n node.genotype = sp.csr_matrix(res)\n\n return Tree", "def __init__(self, n_trees=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, \n max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,\n verbose=0, min_density=None, compute_importances=None): \n self.random_forest = RandomForestClassifier(n_trees, criterion, max_depth, min_samples_split, min_samples_leaf, \n max_features, max_leaf_nodes, bootstrap, oob_score, n_jobs, random_state,\n verbose, min_density, compute_importances)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def test_with_data(data):\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n unpruned = induce_node_tree(training_set, original_issues, \"D\", -1)\r\n pruned = prune_tree(unpruned, tuning_set)\r\n\r\n return pruned", "def max_depth_forest(self):\n return max(x.tree_.max_depth for x in self.result.estimators_)", "def decision(grid):\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()", "def recursive_feature_elimination(self):\n\t\tsvc = SVC(kernel=\"linear\")\n\t\tself.model = Pipeline([\n\t\t\t('feature_selection', RFE(estimator=svc, n_features_to_select=8, step=10)),\n\t\t\t('classification', self.model)\n\t\t\t])", "def __init__(self,num_trees=100, depth_limit=5, example_subsample_rate=0.4,\n attr_subsample_rate=0.4):\n\n # TODO: finish this.\n self.num_trees = num_trees\n self.depth_limit = depth_limit\n self.example_subsample_rate = example_subsample_rate\n self.attr_subsample_rate = attr_subsample_rate\n self.classifier = RandomForest(self.num_trees, self.depth_limit, self.example_subsample_rate,\n self.attr_subsample_rate)", "def guessTreeOpt(train, test, valid):\n best = findApproxDepth(train, valid, 5, 5)\n tree = DecisionTree(train)\n print(\"building tree from full set\")\n tree.buildTree(best[0], best[1], True)\n print(\"tree built, testing tree\")\n acc = testTreeF(tree, test)\n print(\"accuracy of:\", \"%.2f\" % (acc * 100))\n return tree", "def __init__(self, max_depth=None, criterion='gini', random_state=0):\n print(\"Initialize the model Decision Tree Classifier... \")\n self.random_state = random_state\n self.model = tree.DecisionTreeClassifier(max_depth=max_depth, criterion=criterion, random_state=random_state)", "def step_tree(self):\n if random.random() < self.world.f or self.any_neighbor_burning():\n self.set_state(\"orange\")", "def _next_to_prune(tree, children=None):\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)" ]
[ "0.62971836", "0.62759644", "0.6227639", "0.6085251", "0.6084228", "0.6072655", "0.6057905", "0.6017821", "0.6007997", "0.5916229", "0.58216655", "0.5750822", "0.5743819", "0.5695105", "0.5636572", "0.56322336", "0.5611677", "0.5590373", "0.5590373", "0.55826205", "0.55778784", "0.5545633", "0.55383354", "0.55381334", "0.5524706", "0.5510218", "0.5496254", "0.54962456", "0.5489816", "0.54662806" ]
0.7099714
0
Truncates forest to maximum number of trees.
def truncate(self): self.population = self.population[:self.max_number_trees]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_depth_forest(self):\n return max(x.tree_.max_depth for x in self.result.estimators_)", "def reset_max_depth(self) -> None:\n # The max depth is now calculated on the fly, so this is a no-op.\n pass", "def truncate_features(self):\n num_variable = len(self.Train_data['X'][0])\n for i in xrange(len(self.Train_data['X'])):\n num_variable = min([num_variable, len(self.Train_data['X'][i])])\n # truncate train, validation and test\n for i in xrange(len(self.Train_data['X'])):\n self.Train_data['X'][i] = self.Train_data['X'][i][0:num_variable]\n for i in xrange(len(self.Validation_data['X'])):\n self.Validation_data['X'][i] = self.Validation_data['X'][i][0:num_variable]\n for i in xrange(len(self.Test_data['X'])):\n self.Test_data['X'][i] = self.Test_data['X'][i][0:num_variable]\n return num_variable", "def set_tree_limit(self, n):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n _check_call(_LIB.TreeliteSetTreeLimit(self.handle, ctypes.c_size_t(n)))", "def deleteMaxTree(root):\n try:\n if (isRed(root['left'])):\n root = rotateRight(root)\n\n if (root['right'] is None):\n return None\n\n if ((not isRed(root['right'])) and\n ((not isRed(root['right']['left'])))):\n\n root = moveRedRight(root)\n\n root['right'] = deleteMaxTree(root['right'])\n root = balance(root)\n return root\n\n except Exception as exp:\n error.reraise(exp, 'RBT:deleteMinTree')", "def maximumDistance(self):\n from ete2 import Tree\n t = Tree(name='LUCA_root')\n empty_forest = {'sp':t,'gns':t,'fam':t,'ord':t,'cls':t,'phy':t,'kng':t}\n return self.distanceToTree(empty_forest,update_inner_attributes=False)", "def prune(self, n_leaves):\n self.tree_ = prune(self.tree_, n_leaves)\n return self", "def _subtree_below_maximum_leaves(self, root, threshold):\r\n\r\n nodes = root.get_terminals()\r\n return len(nodes) <= threshold", "def test_trunc_depth(self):\n from supvisors.statistics import StatisticsInstance\n instance = StatisticsInstance(12, 5)\n # test that the truc_depth method does nothing when less than 5 elements in list\n test_list = [1, 2, 3, 4]\n instance.trunc_depth(test_list)\n self.assertListEqual([1, 2, 3, 4], test_list)\n # test that the truc_depth method keeps only the last 5 elements in list\n test_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n instance.trunc_depth(test_list)\n self.assertListEqual([6, 7, 8, 9, 10], test_list)", "def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []", "def _trim_tree(state):\n for n in list(state.tree.leaf_node_gen):\n if n.type_str == TYPE_NODE_TAG:\n n.parent.child_list.remove(n)\n return _trim_tree(state)", "def resize_to_maximum(self):\n if self.initialized:\n max_size = self._compute_maximum_size()\n self.set_max_size(max_size)\n self.resize(max_size)", "def maxsize(self, maxsize):\n self.shape = (int(maxsize), ) + self.shape[1:]\n self.clear()", "def update_max_fringe_size(self, fringe_len):\n if self.max_fringe_size < fringe_len:\n self.max_fringe_size = fringe_len", "def take_max(self):\n return self.delete_first()", "def final_forest(element):\n if isinstance(element, TamariIntervalPoset):\n return element.initial_forest()\n elif element in DyckWords():\n binary_tree = element.to_binary_tree_tamari()\n elif element in BinaryTrees() or element in LabelledBinaryTrees():\n binary_tree = element\n else:\n raise ValueError(\"Do not know how to construct the initial forest of {}\".format(element))\n\n def get_relations(bt, start=1):\n r\"\"\"\n Recursive method to get the binary tree final forest relations\n with only one recursive reading of the tree.\n\n The vertices are being labelled with integers starting with\n ``start``.\n\n OUTPUT:\n\n - the indexes of the nodes on the left border of the tree\n (these become the roots of the forest)\n - the relations of the final forest (as a list of tuples)\n - the next available index for a node (size of tree +\n ``start``)\n \"\"\"\n if not bt:\n return [], [], start # leaf\n roots, relations, index = get_relations(bt[0], start=start)\n rroots, rrelations, rindex = get_relations(bt[1], start=index + 1)\n roots.append(index)\n relations.extend(rrelations)\n relations.extend([(j, index) for j in rroots])\n return roots, relations, rindex\n\n roots, relations, index = get_relations(binary_tree)\n return TamariIntervalPoset(index - 1, relations)", "def get_n_leaves(clf):\n leaves = clf.tree_.children_left == -1\n leaves = np.arange(0,clf.tree_.node_count)[leaves]\n return len(leaves)", "def del_max(self):\r\n maxVal = self.find_max()\r\n if maxVal is not None:\r\n self.items[1] = self.items[self.size]\r\n self.items[self.size] = None\r\n self.size -= 1\r\n self.perc_down(1)", "def sift_down_recursion(self, index):\n if self.size() == 0:\n return\n\n left = self.left_child(index)\n right = self.right_child(index)\n # if the element is leaf\n if left >= self.size():\n return\n\n max_child_index = left\n if right < self.size():\n if self.heap[right] > self.heap[left]:\n max_child_index = right\n\n # if already max heap, return\n if self.heap[index] >= self.heap[max_child_index]:\n return\n\n self.heap[index], self.heap[max_child_index] = self.heap[max_child_index], self.heap[index]\n\n index = max_child_index\n self.sift_down_recursion(index)", "def unsetMaxLevel(self):\n return _libsbml.QualitativeSpecies_unsetMaxLevel(self)", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def test_remove_top_but_not_root(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"tea\")\n assert delete_tree.size == tree_size - 1", "def merge_forest(self, forest):\n\t\tif len(forest) == 1:\n\t\t\treturn forest[0]\n\n\t\tfinal_tree = forest.pop()\n\t\ttrees_to_process = forest\n\t\twhile not len(trees_to_process) == 0:\n\t\t\ttree_in_question = trees_to_process.pop(0)\n\t\t\tif final_tree.contains(tree_in_question.value):\n\t\t\t\tnodes_to_process = [final_tree]\n\t\t\t\twhile not len(nodes_to_process) == 0:\n\t\t\t\t\tnode_in_question = nodes_to_process.pop()\n\t\t\t\t\tif node_in_question.value == tree_in_question.value:\n\t\t\t\t\t\tnode_in_question.children += [tree_in_question]\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tif not len(node_in_question.children) == 0:\n\t\t\t\t\t\t\tnodes_to_process += node_in_question.children\n\t\t\telse:\n\t\t\t\ttrees_to_process += [final_tree]\n\t\t\t\tfinal_tree = tree_in_question\n\t\treturn final_tree", "def largest_killing_spree(self, largest_killing_spree):\n\n self._largest_killing_spree = largest_killing_spree", "def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)" ]
[ "0.60658157", "0.5753063", "0.57320607", "0.5690253", "0.56196177", "0.557107", "0.5399001", "0.53606296", "0.5356624", "0.5334575", "0.53127694", "0.529486", "0.5244779", "0.52287835", "0.52241135", "0.5215633", "0.5206999", "0.51637185", "0.5151103", "0.5127064", "0.5113451", "0.5082578", "0.50764436", "0.50717384", "0.5068558", "0.5068275", "0.50567216", "0.50459456", "0.50371647", "0.50360316" ]
0.7533811
0
3. REPRODUCTION PHASE. The trees will produce seeds based on their relative fitness which will then be spread over the problem space. Each seed, in turn, will grow into a new tree depending on external factors. A linear increase in the number of seeds produced by the trees of the forest is considered from max_seeds for the tree with the lowest value to min_seeds for the one with the highest value (i.e. minimization problem).
def reproduce(self): def compute_seeds(fitness): """ Computes the number of seeds given a fitness value. """ seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \ (self.max_seeds-self.min_seeds) + self.min_seeds return round(seeds) # evaluates max and min fitness for current year max_fitness = max(tree[0] for tree in self.population) min_fitness = min(tree[0] for tree in self.population) # computes the number of seeds produced per tree for tree in self.population: tree[1].seeds = int(compute_seeds(tree[0]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def generate(\n seeds=10,\n param_num_nodes=7,\n mode='train',\n param_dim=10,\n param_sel=100,\n param_mu=10,\n param_br=0.05,\n param_activity_wt=None,\n A=None,\n sp_to_id=None,\n min_coord=None,\n max_coord=None,\n org_pts=None,\n ):\n global dim, sel, mu, br, activity_wt, tree_lc, tree_rc, num_nodes\n\n dim=param_dim\n sel=param_sel\n mu=param_mu\n br=param_br\n activity_wt=param_activity_wt\n num_nodes = param_num_nodes\n\n sp_root = 0\n tree = None\n\n if mode == 'train':\n tree, tree_lc, tree_rc = generate_tree(sp_root, num_nodes)\n if param_activity_wt is None:\n # weights for the linear activity function\n num_wts = int(((dim * (dim + 1))/2) + 1)\n activity_wt = np.random.normal(0, 1, num_wts)\n\n if org_pts is None:\n org_pts = []\n # simulate data points\n # format: exampleID, species, values\n # region, species, coord1, coord2, ...., activity_value\n\n for i in tqdm(range(int(seeds))):\n pt_id = i\n\n # pick a random point of d-dimension\n rand_pt = np.random.uniform(min_coord, max_coord, dim)\n curr_pt = np.append([pt_id, sp_root], rand_pt)\n curr_activity = get_activity(modify_pt(rand_pt), activity_wt)\n # print('curr_pt:', curr_pt, 'curr_activity:', curr_activity); exit(0)\n org_pts.append(np.append(curr_pt, curr_activity))\n\n generated_points = []\n full_org_pts = []\n\n if mode == 'train':\n pool = Pool(16)\n sample_bag = pool.map(generate_bag, org_pts)\n for item in sample_bag:\n for val in item:\n val = list(val)\n full_org_pts.append(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n else:\n for val in org_pts:\n val = list(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n\n return generated_points, activity_wt, org_pts, full_org_pts, tree", "def test_rand_100_depth_remains_less_than_8():\n from bbst import Bst\n from random import shuffle\n max_depth = 0\n for x in range(10):\n rando = [x for x in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n tree_depth = tree.depth()\n if tree_depth > max_depth:\n max_depth = tree_depth\n assert max_depth == 8", "def run(self):\n population_p = self.create_population()\n population_p = self.sort_population(population_p)\n best_x = population_p[0]\n for k in range(self.iteration):\n population_r = []\n # random.shuffle(population_p)\n for i in range(0, self.population_length, 2):\n mother = 0\n father = 1\n children = [self.random_chromosome(), self.random_chromosome()]\n while (mother == father) or (children[0] in population_p) or (children[1] in\n population_p):\n mother = random.randint(0, self.population_length - 1)\n father = random.randint(0, self.population_length - 1)\n children = self.cross(population_p[mother], population_p[father])\n children[0] = self.mutate(children[0])\n children[1] = self.mutate(children[1])\n\n population_r.append(children[0])\n population_r.append(children[1])\n\n population_p = self.new_population(population_p, population_r)\n if self.fitness(population_p[0]) < self.fitness(best_x):\n best_x = population_p[0]\n\n # print(population_p)\n return best_x", "def compute_seeds(fitness):\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)", "def build_random_function(min_depth, max_depth):\n\n # your code goes here", "def simulate_graph(seed, cluster_sizes, del_factor, ins_factor):\n rand.seed(seed)\n cluster_boundaries = np.cumsum(cluster_sizes)\n print(\"#seed:\", seed)\n print(\"#deletion factor:\", del_factor)\n print(\"#insertion factor:\", ins_factor)\n optimal_costs = np.array([0])\n for c in range(0, len(cluster_sizes)-1):\n n_c = cluster_sizes[c+1]\n offset_c = cluster_boundaries[c]\n edges_c = generate_edges(n_c, offset_c)\n disturb_cluster(n_c, offset_c, edges_c, del_factor, optimal_costs)\n additional_edges(cluster_boundaries, ins_factor, optimal_costs)\n print(\"#optimal costs:\", optimal_costs)", "def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def greedy_build(nodes, priors=None, cutoff=200, considered=set(), uniq='', targets=[]):\n\n\t# Tracks frequency of states for each character in nodes\n\tcharacter_mutation_mapping = defaultdict(int)\n\n\t# G models the network that is returned recursively\n\tG = nx.DiGraph()\n\n\troot = root_finder(nodes)\n\n\t# Base case check for recursion, returns a graph with one node corresponding to the root of the remaining nodes\n\tif len(nodes) <= cutoff or len(nodes) == 1:\n\t\troot = root_finder(nodes)\n\t\tG.add_node(root)\n\t\treturn G, [[root, nodes]]\n\n\t# Accounting for frequency of mutated states per character, in order to choose the best split\n\tfor node in nodes:\n\t\tnode_list = node.split(\"_\")[0].split('|')\n\t\tfor i in range(0, len(node_list)):\n\t\t\tchar = node_list[i]\n\t\t\tif char != '0' and char != '-':\n\t\t\t\tcharacter_mutation_mapping[(str(i), char)] += 1\n #if char != '0':\n # if char == \"-\":\n # character_mutation_mapping[(str(i), char)] -= 1\n # else:\n # character_mutation_mapping[(str(i), char)] += 1\n\n\t# Choosing the best mutation to split on (ie character and state)\n\tcharacter, state = 0, 0\n\tmax_cost = 0\n\n\tmin_prior = 1\n\tif priors:\n\t\tfor i in priors.keys():\n\t\t\tfor j in priors[i].keys():\n\t\t\t\tmin_prior = min(min_prior, priors[i][j])\n\n\tfor i,j in character_mutation_mapping:\n\t\tif not (i,j) in considered:\n\t\t\tif not priors:\n\t\t\t\tif max_cost < character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\t\t\telse:\n\t\t\t\tif j not in priors[int(i)]:\n\t\t\t\t\tpriors[int(i)][j] = min_prior\n\t\t\t\tif max_cost < -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\tcharacter = int(character)\n\n\n\t# If there is no good split left, stop the process and return a graph with the remainder of nodes\n\tif character == 0 and state == 0:\n\t\tif len(nodes) == 1:\n\t\t\tG.add_node(nodes[0])\n\t\telse:\n\t\t\tfor i in range(0, len(nodes)):\n\t\t\t\tif nodes[i] != root:\n\t\t\t\t\tG.add_edge(root, nodes[i])\n\t\treturn G, []\n\n\t# Splitting nodes based on whether they have the mutation, don't have the mutation, or are NA('-') in that character\n\t# Right split is where nodes with the mutation go, everyone else goes to left split or NA chars\n\tleft_split, right_split, NA_chars = [], [], []\n\tright_split_temp = []\n\tleft_split_temp = []\n\tfor node in nodes:\n\t\tnode_list = node.split('|')\n\t\tif node_list[character] == state:\n\t\t\tright_split.append(node)\n\t\telif node_list[character] == '-':\n\t\t\tNA_chars.append(node)\n\t\telse:\n\t\t\tleft_split.append(node)\n\n\n\t# Seperates all nodes with NA in the character chosen to be split upon\n\t# Puts in right split or left split based on which list shares more mutated characters with this string\n\tfor node in NA_chars:\n\t\tright_split_score = 0\n\t\tleft_split_score = 0\n\t\tnode_list = node.split('|')\n\t\tnum_not_missing = len([n for n in node_list if n != \"-\"])\n\t\tfor i in range(0, len(node_list)):\n\t\t\tif node_list[i] != '0' and node_list[i] != '-':\n\t\t\t\tfor node_2 in left_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tleft_split_score += 1\n\t\t\t\tfor node_2 in right_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tright_split_score += 1\n\n\t\tavg_left_split_score = left_split_score / float(len(left_split) * num_not_missing + 1)\n\t\tavg_right_split_score = right_split_score / float(len(right_split) * num_not_missing + 1)\n\n\t\tif avg_left_split_score < avg_right_split_score:\n\t\t\tright_split_temp.append(node)\n\t\telse:\n\t\t\tleft_split_temp.append(node)\n\n\tright_split += right_split_temp\n\tleft_split += left_split_temp\n\n\t# Add character, state that split occurred to already considered mutations\n\tconsidered.add((str(character), state))\n\tG = nx.DiGraph()\n\t#splitter = str(character) + \" \" + str(state) + \" (\" + uniq + \")\"\n\tsplitter = root\n\n\t# Recursively build left side of network (ie side that did not mutation at the character with the specific state)\n\tG.add_node(splitter)\n\tleft_subproblems = []\n\tleft_network = None\n\tif len(left_split) != 0:\n\t\tleft_root = root_finder(left_split)\n\t\t# if left_root not in left_split and left_root in targets:\n\t\t# \tleft_root = left_root + \"_unique\"\n\n\t\tleft_network, left_subproblems = greedy_build(left_split, priors, cutoff, considered.copy(), uniq + \"0\", targets=targets)\n\n\t\tleft_nodes = [node for node in left_network.nodes() if left_network.in_degree(node) == 0]\n\t\tdup_dict = {}\n\t\tfor n in left_network:\n\t\t\tif n in list(G.nodes()) and n != left_root:\n\t\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(left_root.encode('utf-8')).hexdigest())\n\t\tleft_network = nx.relabel_nodes(left_network, dup_dict)\n\t\tG = nx.compose(G, left_network)\n\t\tif root != left_root:\n\t\t\tG.add_edge(splitter, left_root, weight=0, label=\"None\")\n\n\t# Recursively build right side of network\n\tright_network, right_subproblems = greedy_build(right_split, priors, cutoff, considered.copy(), uniq + \"1\", targets=targets)\n\tright_nodes = [node for node in right_network.nodes() if right_network.in_degree(node) == 0]\n\tright_root = root_finder(right_split)\n\n\tdup_dict = {}\n\tfor n in right_network:\n\t\tif n in list(G.nodes()) and n != right_root:\n\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(right_root.encode('utf-8')).hexdigest())\n\tfor n in dup_dict:\n\t\trename_dict = {n: dup_dict[n]}\n\t\tif right_network.out_degree(n) != 0:\n\t\t\tright_network = nx.relabel_nodes(right_network, rename_dict)\n\t\telse:\n\t\t\trename_dict = {n: dup_dict[n]}\n\t\t\tG = nx.relabel_nodes(G, rename_dict)\n\n\tG = nx.compose(G, right_network)\n\t# if right_root not in right_split and right_root in targets:\n\t# \tright_root = right_root + \"_unique\"\n\t#for node in right_nodes:\n\tif root != right_root:\n\t\tif not priors:\n\t\t\tG.add_edge(splitter, right_root, weight=1, label = str(character) + \": 0 -> \" + str(state))\n\t\telse:\n\t\t\tG.add_edge(splitter, right_root, weight=-np.log(priors[int(character)][state]), label=str(character) + \": 0 -> \" + str(state))\n\n\n\treturn G, left_subproblems + right_subproblems", "def expand_tree(self, N=1):\n # type: (int) -> None\n assert self._initialized, 'Search not initialized.'\n for _ in range(N): \n x_rand = self.sample_free()\n x_nearest = self.nearest(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n if self.coll_free(x_nearest, x_new):\n self.index+=1\n X_near = [x for x in self.near(x_new) if self.coll_free(x, x_new)]\n cost_min = self.costs[self.research_index(self.nodes,x_nearest)][1] + self.dist(x_nearest, x_new)\n x_min = x_nearest\n for x in X_near:\n cost = self.costs[self.research_index(self.nodes,x)][1] + self.dist(x, x_new)\n if cost < cost_min:\n cost_min = cost\n x_min = x\n \n self.nodes.append(x_new)\n j=self.research_index(self.nodes,x_min)\n self.parents[self.index,j]=1\n self.costs[self.index] = (x_new,self.costs[j][1] + self.dist(x_min, x_new))\n for x in X_near:\n k=self.research_index(self.nodes,x)\n if self.costs[self.index][1] + self.dist(x_new, x) < self.costs[k][1]:\n self.parents[self.index]=np.zeros(self.N)\n self.parents[self.index,k] = 1\n self.costs[k] = (self.costs[k][0],self.costs[self.index][1] + self.dist(x_new, x))", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def initialisation(Rsize, config, n_global_in, n_global_out, ke):\n # Creating population of Rsize*Rsize new random individuals\n # population = [[Individual(config, n_global_in, n_global_out)]*Rsize for _ in range(Rsize)]\n reef = [Individual(config, n_global_in, n_global_out) for _ in range(Rsize * Rsize)]\n print \"Reef created with \" + str(len(reef)) + \" solutions\"\n print \"Original size: \" + str(len(reef))\n\n # Eval population\n\n reef, count_evaluations = eval_population(reef, ke)\n # for ind in reef:\n # print str(ind.fitness)\n\n # Calculating fitness mean and std deviation\n fitness = fitness_mean_std(reef)\n\n fitness_mean_validation = fitness[\"validation\"][\"mean\"]\n fitness_std_validation = fitness[\"validation\"][\"std\"]\n fitness_max_validation = fitness[\"validation\"][\"max\"]\n fitness_min_validation = fitness[\"validation\"][\"min\"]\n\n # Deleting corals according to formula\n # It is not the same that the depredation one\n # new_population = [[ind if initial_deletion_check(ind.fitness, fitness_mean, fitness_std) else None for ind in line ] for line in population]\n new_reef = [\n ind if initial_deletion_check(ind.fitness[\"accuracy_validation\"], fitness_mean_validation, fitness_std_validation) else None for\n ind in reef]\n\n print \"Population reduced to: \" + str(len(filter(lambda w: w is not None, new_reef))) + \" solutions\"\n\n # for ind in filter(lambda w: w is not None, new_reef):\n # print str(ind.fitness)\n\n return new_reef", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def _mutate(self, tree, spread, dtype):\n\n # defines wrapper functions\n def uniform(lower, upper):\n \"\"\"\n Draws a random float number from a uniform distribution\n given by U[lower, upper].\n \"\"\"\n\n return lower + random.random() * (upper - lower)\n\n def normal(mean, std):\n \"\"\"\n Draws a random float number from a normal distribution\n with mean 'mu' and standard deviation 'sigma': N[mu, sigma].\n \"\"\"\n\n return random.gauss(mean, std)\n\n # creates a seedling based on the DNA of its mother tree\n new_tree = copy.deepcopy(tree)\n\n # trade-off between exploitation and exploration\n if (random.random() > self.epsilon):\n\n # mutates initial solution vector - i.e. local seeding\n for i in range(self.dim):\n if (random.random() < self.mut_proba):\n if (dtype == \"normal\"):\n new_tree.vector[i] += normal(0, spread)\n\n elif (dtype == \"uniform\"):\n new_tree.vector[i] += uniform(-1, 1)\n\n else:\n raise AttributeError(\"'dtype' must either be 'normal' or 'uniform'.\")\n\n else:\n\n # explores new region of the search space - i.e. global seeding\n new_tree = Tree(self.lower, self.upper)\n\n return new_tree", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def train(eps, ntrees, min_size, max_splits, nfeats_test, resample=True):\n # TODO your code here\n trees = []\n for _ in range(ntrees):\n # repeatedly add values from the list of expression profiles without removal to a set\n # (so there could be duplicate expression profiles in the set we are creating) until the size of the set\n # is equal to the size of the original list of profiles\n if resample:\n resampled_eps = []\n for _ in range(len(eps)):\n idx = random.randint(0, len(eps) - 1)\n resampled_eps.append(eps[idx])\n trees.append(\n ExpressionDecisionTree.train(resampled_eps, len(resampled_eps), min_size, max_splits, nfeats_test))\n else:\n trees.append(\n ExpressionDecisionTree.train(eps, len(eps), min_size, max_splits, nfeats_test))\n return ExpressionRandomForest(trees)", "def repair(self, population_size=POPULATION_SIZE, iterations=100):\n self.validate()\n\n population = self.initial_population(population_size)\n\n last_key = ast.dump(self.target_tree)\n\n for iteration in range(iterations):\n population = self.evolve(population)\n\n best_tree = population[0]\n fitness = self.fitness(best_tree)\n\n if self.log:\n print(f\"Evolving population: \"\n f\"iteration{iteration:4}/{iterations} \"\n f\"fitness = {fitness:.5} \\r\", end=\"\")\n\n if self.log >= 2:\n best_key = ast.dump(best_tree)\n if best_key != last_key:\n print()\n print()\n self.log_tree(f\"New best code (fitness = {fitness}):\",\n best_tree)\n last_key = best_key\n\n if fitness >= 1.0:\n break\n\n if self.log:\n print()\n\n if self.log and self.log < 2:\n self.log_tree(f\"Best code (fitness = {fitness}):\", best_tree)\n\n best_tree = self.reduce(best_tree)\n fitness = self.fitness(best_tree)\n\n self.log_tree(f\"Reduced code (fitness = {fitness}):\", best_tree)\n\n return best_tree, fitness", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def build_random_trees(rows, n_features, max_depth, min_size, n_trees, random_dataset_size):\n trees = []\n for tree_number in range(n_trees):\n print(\"Building tree number:\", tree_number, \"of\", n_trees)\n # Select random dataset from original dataset\n random_dataset = select_random_rows(rows, random_dataset_size)\n\n # Select random features (columns)\n random_features = []\n for random_feature in range (n_features):\n # generate random index number to pick column\n random_column = randrange(len(rows))\n random_features.append(random_column)\n # generate the random tree with randomly picked features (columns) and a random dataset\n random_tree = build_single_random_tree(random_dataset, random_features, max_depth, min_size, 1)\n # add to list of trees\n trees.append(random_tree)\n return trees", "def run(self, verbose=False):\n\n cost = {}; cost[\"best\"] = []; cost[\"mean\"] = []\n for i in range(self.max_iters):\n\n # prints out information at current cycle\n if verbose:\n print(\"Iteration: {}\".format(i),\n \"Fitness: {}\".format(self.forest[0][0]))\n\n # reproduction phase\n self.reproduce()\n\n # seed dispersal phase\n self.seedlings = []\n for tree in self.population:\n self.disperse(tree[1])\n tree[1].year += 1\n\n # selection phase\n self.select()\n\n # decays exploration parameters\n if (self.epsilon > 0):\n self.epsilon -= self.epsilon_decay\n\n # stores statistics and updates counter of iterations\n cost[\"best\"].append(self.population[0][0])\n cost[\"mean\"].append( sum( [ tree[0] for tree in self.population ] )\\\n / len(self.population) )\n self.iteration += 1\n\n return cost", "def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # while the termination criteria is not satisfied, makes another generation\n while not self.termination_criteria.satisfied(self.generation, time.time()-start_time, self.population):\n self.generation += 1\n #print str(self.generation)\n next_generation = []\n\n if self.elitism:\n # Keeps the 10% best individuals\n best_individuals = heapq.nsmallest(int(0.1*self.population_size), self.population, lambda individual: individual.get_fitness())\n next_generation += copy.deepcopy(best_individuals)\n\n # select genetic operation probabilistically\n # this is a roulette wheel selection\n operations = numpy.random.choice(['reproduction', 'crossover', 'mutation'], size=self.population_size, p=[self.reproduction, self.crossover, self.mutation]).tolist()\n individuals = numpy.random.choice(self.population, p=self.normalized_fitness, size=2*self.population_size, replace=True).tolist()\n\n while len(next_generation) < self.population_size:\n operation = operations.pop()\n individual = individuals.pop()\n individual.get_fitness() # enforce fitness calculation\n\n if operation == 'reproduction':\n next_generation.append(individual)\n elif operation == 'crossover':\n individual2 = individuals.pop()\n individual2.get_fitness() # enforce fitness calculation\n individual1, individual2 = individual.crossover(individual2)\n next_generation.append(individual1)\n next_generation.append(individual2)\n elif operation == 'mutation':\n individual1 = individual.mutate()\n next_generation.append(individual1)\n\n self.population = next_generation\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n\n mean = numpy.mean(self.population_fitness)\n std = numpy.std(self.population_fitness)\n min = self.population_fitness.min()\n\n info_mean = pandas.DataFrame([[self.generation, mean, min, std]], columns=[\"generation\", \"mean\", \"min\", \"std\"])\n self.generation_info = self.generation_info.append(info_mean, ignore_index=True)", "def stochastic_universal_selection(self, fitness, num_parents):\n\n fitness_sum = numpy.sum(fitness)\n if fitness_sum == 0:\n self.logger.error(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n raise ZeroDivisionError(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n probs = fitness / fitness_sum\n probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.\n probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.\n\n curr = 0.0\n\n # Calculating the probabilities of the solutions to form a roulette wheel.\n for _ in range(probs.shape[0]):\n min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]\n probs_start[min_probs_idx] = curr\n curr = curr + probs[min_probs_idx]\n probs_end[min_probs_idx] = curr\n probs[min_probs_idx] = 99999999999\n\n pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.\n first_pointer = numpy.random.uniform(low=0.0, \n high=pointers_distance, \n size=1)[0] # Location of the first pointer.\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_pointer = first_pointer + parent_num*pointers_distance\n for idx in range(probs.shape[0]):\n if (rand_pointer >= probs_start[idx] and rand_pointer < probs_end[idx]):\n parents[parent_num, :] = self.population[idx, :].copy()\n parents_indices.append(idx)\n break\n\n return parents, numpy.array(parents_indices)", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def __call__(self, s, n=1000):\n\n root = StateNode(None, s, self.game)\n \n if root.parent is not None:\n raise ValueError(\"Root's parent must be None.\")\n \n for _ in range(n):\n #selection\n node = _get_next_node(root, self.tree_policy)\n #simulation\n node.reward = self.default_policy(node)\n #print(node.reward)\n #back\n self.backup(node)\n \n root.reset(copy.deepcopy(self.game_bak))\n \n #for i in root.children:\n # print(root.children[i].__dict__)\n # for j in root.children[i].children:\n # print(root.children[i].children[j].__dict__)\n # print(\"=======\")\n return rand_max(root.children.values(), key=lambda x: x.q).action, rand_max(root.children.values(), key=lambda x: x.q).q", "def evolve(population, targetSum, targetProduct, retain=0.2, random_select=0.05, mutate=0.01):\n\n graded = [ ( fitness(x, targetSum,targetProduct), x ) for x in population]\n graded = [ x[1] for x in sorted(graded) ]\n retain_length = int(len(graded) * retain)\n parents = graded[:retain_length]\n\n # randomly add other individuals to promote genetic\n # diversity\n for individual in graded[retain_length:]:\n if random_select > random.random():\n parents.append(individual)\n\n # crossover parents to create offspring\n #print(\"starting on crossover\")\n desired_length = len(population) - len(parents)\n children = []\n while len(children) < desired_length:\n male = randint(0, len(parents) - 1)\n female = randint(0, len(parents) -1)\n if male != female:\n male = parents[male]\n female = parents[female]\n half = int(len(male) / 2)\n child = male[: half] + female[half:]\n children.append(child)\n\n # mutate some individuals\n #print(\"starting on mutation\")\n for individual in children:\n if mutate > random.random():\n half = int(len(individual) / 2 )\n pos_geneSum = randint(0, (half - 1))\n pos_geneProd = randint(half, (len(individual) - 1))\n tmp = individual[pos_geneSum]\n individual[pos_geneSum] = individual[pos_geneProd]\n individual[pos_geneProd] = tmp\n\n parents.extend(children)\n return parents", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]" ]
[ "0.6857431", "0.6693448", "0.620608", "0.61089987", "0.60859585", "0.60058355", "0.59534144", "0.5926585", "0.5920129", "0.59118456", "0.58440024", "0.5819384", "0.5805844", "0.5803149", "0.5793257", "0.5775363", "0.5769751", "0.5761044", "0.5759271", "0.5751068", "0.5741327", "0.5735114", "0.57342404", "0.573164", "0.5725142", "0.5724565", "0.57161117", "0.571406", "0.57060975", "0.56925696" ]
0.784303
0
Computes the number of seeds given a fitness value.
def compute_seeds(fitness): seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \ (self.max_seeds-self.min_seeds) + self.min_seeds return round(seeds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_fitness(self, **kwargs):\n self.__fitness = self.fitness_function.calculate(self.__genes, **kwargs)\n self.num_fitness_eval += 1\n return self.__fitness", "def fitness(self):\n # TO BE DECIDED\n return 1", "def calcFitness (self) :\n fitnessArray = [[8, 4, 2, 1],\n [16, 8, 4, 2],\n [32, 16, 8, 4],\n [64, 32, 16, 8]]\n # fitnessArray = [[160, 80, 5, 4],\n # [320, 40, 4, 3],\n # [640, 20, 3, 2],\n # [1280, 10, 2, 1]]\n fitness = 0\n for k in range(4) :\n for i in range (4) :\n fitness += self.grid[k,i] * fitnessArray[k][i]\n return (fitness / 100)", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def fitness_function(self, population: List[Network]) -> List[Union[float, int]]:\n # The seed changes\n self.last_used_seed += 1\n\n # Snakes are re-generated\n snakes = []\n for n in population:\n snakes.append(Snake(11, Experiment.ExperimentAI(n)))\n\n # Metrics are calculated\n scores, times = self.snake_game.simulate(snakes, self.last_used_seed)\n\n # The fitnesses are calculated\n fitnesses = []\n for i in range(len(scores)):\n f = scores[i]*(1.0 + 1.0/float(times[i]))\n fitnesses.append(f)\n\n return fitnesses", "def evaluate(self, state):\n\n fitness = np.sum(state)\n self.num_evals += 1\n #print(self.num_evals)\n return fitness", "def fitness(dna):\n fitness = 0\n for c in range(DNA_SIZE):\n if dna[c] == OPTIMAL[c]:\n fitness += 1\n return fitness", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def calculate_fitness_value(self):\n sequence = ''.join(self.genes)\n if sequence in seq_to_fitness:\n self.fitness_value = seq_to_fitness[sequence]\n else:\n self.fitness_value = polly_stats.get_amount_of_bad_regions(\n self.genes, self.environment)", "def fitness(ind):\n return kNN.distance(ind),", "def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )", "def evaluate(self, state):\n\n fitness = 0\n\n for i in range(1, len(state)):\n if state[i] != state[i - 1]:\n fitness += 1\n\n return fitness", "def _calculate_fitness(self):\n pass", "def calculate_fitness_test(self, **kwargs):\n if self.genes_test is None:\n raise ValueError(\"Genes test is not set!\")\n\n self.__fitness_test = self.fitness_function.calculate(self.__genes_test, **kwargs)\n self.num_fitness_eval += 1", "def fitness(self):\n pass", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def average_fitness(individuals):\n fitness_num = 0\n for individual in individuals:\n fitness = individual.get_fitness()\n fitness_num += fitness\n return fitness_num / len(individuals)", "def calc_sum_fitness(self):\n fitness: float = 0\n for c in self.characters:\n fitness += c.fitness\n self.sum_fitness = round(fitness, 3)", "def fitness(self) -> float:\n return self._fitness", "def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run", "def fitness_function(neural_net):\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness", "def compute_tot_fitness(fitness_function, pop):\n probs = np.zeros(len(pop)) # list to house probabilites\n best_member = ''\n best_fitness = -10**18\n total_fitness = 0 # The sum of of all the fitness values from the population.\n for i, chromosome in enumerate(pop):\n new_fitness = fitness_function(chromosome)\n if new_fitness > best_fitness:\n best_member = chromosome\n best_fitness = new_fitness\n total_fitness += new_fitness\n probs[i] = new_fitness\n probs = probs / total_fitness\n return total_fitness, best_fitness, best_member, probs", "def fitness(individual):\n different_pos = 0\n return different_pos", "def fitness(self, solution):\n cur_fit = 0\n for i in range(self.N):\n cur_fit += self.dist(solution[i % self.N], solution[(i + 1) % self.N])\n return cur_fit", "def calculate_cluster_fitness(self, cluster_id: ObjectId):\n\n genomes = self.genome_repository.get_genomes_in_cluster(cluster_id)\n\n cluster_fitness = 0\n\n for genome in genomes:\n cluster_fitness += genome.fitness\n if cluster_fitness == 0:\n return 0\n\n return cluster_fitness / len(list(genomes))", "def _fitness(individual, X, y):\n yhat = individual.evaluate(X)\n return ((y - yhat) ** 2).sum()", "def fitness(individual, n_clusters=3, n_seeds=5):\n\n dataframe = common.scale_dataframe(individual)\n corr = abs(individual.dataframe.corr().iloc[0, 1])\n differences = []\n for seed in range(n_seeds):\n km = KMeans(n_clusters, random_state=seed).fit(dataframe)\n differences.append(silhouette_score(dataframe, km.labels_) - corr)\n\n best = max(differences)\n return best", "def fitness(self):\n return (len(self.body)**2) * self.age", "def get_overall_fitness(self):\n total_fitness = 0\n for chromosome_list in self.chromo_list:\n if chromosome_list:\n for chromosomes in chromosome_list:\n total_fitness += chromosomes.fitness\n\n return float(total_fitness/(self.number_chromosomes*\\\n float(self.best_fitness)))", "def calc_fitness_by_gen(self):\r\n f_sum = 0\r\n # first loop gives us the sum of the fitness\r\n for c, _ in self.temp_hist_by_gen.items():\r\n f_sum += c.fitness()\r\n # now we calc the chances by fitness of each one\r\n for c, _ in self.temp_hist_by_gen.items():\r\n self.temp_hist_by_gen[c] = c.fitness() / f_sum" ]
[ "0.6422853", "0.63974106", "0.62456286", "0.6235945", "0.612314", "0.60374177", "0.6036776", "0.59866303", "0.5981496", "0.5973722", "0.5966571", "0.59424984", "0.5921845", "0.5910393", "0.5909234", "0.5907262", "0.59053224", "0.5892766", "0.58769244", "0.5876505", "0.5841656", "0.5802337", "0.57970726", "0.57953894", "0.57186586", "0.5700254", "0.56901973", "0.5672386", "0.56572104", "0.56548774" ]
0.7105341
0
Draws a random float number from a uniform distribution given by U[lower, upper].
def uniform(lower, upper): return lower + random.random() * (upper - lower)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _rand_float(self, low, high):\n\n return self.np_random.uniform(low, high)", "def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def random_float(low: float, high: float):\n seed = time.time()\n random.seed(seed)\n return random.uniform(low, high)", "def uniform(a: float, b: float) -> float:\n ...", "def random_float():\n return (random() - 0.5) * 2", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def uniform_random_value(l_boundary: float, r_boundary: float) -> float:\n return uniform(l_boundary, r_boundary)", "def rand_uniform(a, b):\n\n\treturn a + lcg.draw_rand_number() * (b - a)", "def UniformRV(low, high):\n return RV(ss.randint(low, high))", "def test_uniform(self):\r\n\r\n s = np.random.uniform(-1.35, 0.5, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def uniform_sample(upper, num):\n sample = []\n for i in range(num):\n value = random.randint(0, upper - 1)\n sample.append(value)\n return sample", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def uniform(low, high, size, dtype=np.float32):\n rng = np.random.default_rng(0)\n out = (high - low) * rng.random(size, dtype=dtype) + low\n return out", "def random() -> float:\n ...", "def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n low = tensor.as_tensor_variable(low)\r\n high = tensor.as_tensor_variable(high)\r\n if dtype is None:\r\n dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype)\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)\r\n op = RandomFunction('uniform',\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast))\r\n return op(random_state, size, low, high)", "def uniform(\n self, low: float = 0, high: float = 1, size: Optional[Iterable[int]] = None\n ):\n _seed = self._seed() if callable(self._seed) else self._seed\n return _uniform(\n low=low,\n high=high,\n size=size,\n seed=_seed,\n device=self._device,\n handle=self._handle,\n )", "def uniform_dist(low, high):\n return sp_uniform(low, high - low)", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def random_floats(low, high=None, size=None):\n if high is None:\n high = low\n low = 0\n return low + (np.random.random(size) * (high - low))", "def random():\r\n return R.NextDouble()", "def fix_rand_value(lo_bound: float, up_bound: float) -> float:\n # In this patch test function for determinism, just return lower bound\n nonlocal _i, _vals_sequence\n v_return = _vals_sequence[_i]\n _i = (_i + 1) % len(_vals_sequence)\n return v_return", "def draw_uniform(z, generator,device='cpu'):\n\n if '64' in str(torch.get_default_dtype()):\n if 'cuda' == device:\n h = torch.cuda.DoubleTensor(z, 1).uniform_(generator=generator)\n elif 'cpu' == device:\n h = torch.DoubleTensor(z, 1).uniform_(generator=generator)\n else:\n if 'cuda' == device:\n h = torch.cuda.FloatTensor(z, 1).uniform_(generator=generator)\n elif 'cpu' == device:\n h = torch.FloatTensor(z, 1).uniform_(generator=generator)\n return h", "def randomize(lower, upper):\n return lower + (random.random() * (upper - lower))", "def sample_uniform():\n global samples_uniform, isample_uniform\n\n # sample of U(0, 1)\n u = samples_uniform[isample_uniform]\n\n # moving to next index of samples global array\n isample_uniform += 1\n if isample_uniform >= len(samples_uniform):\n # exhausted all samples -> re-drawing samples from U(0, 1)\n samples_uniform = np.random.uniform(size=SIZE_SAMPLES_UNIFORM)\n isample_uniform = 0\n\n return u", "def rand_followers(scale_factor=100):\n return round(-scale_factor * log(rand.uniform()))", "def preturbScalar(value, vrange, distr=\"uniform\"):\n\tif distr == \"uniform\":\n\t\tscale = 1.0 - vrange + 2 * vrange * random.random() \n\telif distr == \"normal\":\n\t\tscale = 1.0 + np.random.normal(0, vrange)\n\telse:\n\t\texisWithMsg(\"unknown noise distr \" + distr)\n\treturn value * scale", "def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)" ]
[ "0.7035064", "0.67842853", "0.66727394", "0.65749025", "0.650535", "0.64895654", "0.6346513", "0.63048273", "0.62389475", "0.61971956", "0.6175181", "0.6122292", "0.61146706", "0.61146706", "0.6104602", "0.6089752", "0.60430914", "0.6024726", "0.6020649", "0.597315", "0.5962495", "0.59539646", "0.5949301", "0.59025496", "0.58703953", "0.5815848", "0.5788794", "0.57874167", "0.5783046", "0.5759214" ]
0.73602164
0
initmethod = ['random', 'pca'] algos = ['seq','batch'] all_neigh = ['gaussian','manhatan','bubble','cut_gaussian','epanechicov' ] alfa_types = ['linear','inv','power']
def set_algorithm(self, initmethod = 'pca', algtype = 'batch', neighborhoodmethod = 'gaussian', alfatype = 'inv', alfaini = .5, alfafinal = .005): self.initmethod = initmethod self.algtype = algtype self.alfaini = alfaini self.alfafinal = alfafinal self.neigh = neighborhoodmethod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, options, is_training=False):\n self.options = options\n self.is_training = is_training\n self.add_bi_directional_edges = None\n self.add_self_loop_edges = None\n self.use_reverse_edges = None", "def __init__(self, algorithm, iters, **params):\n self.algorithm=algorithm\n self.iters=iters\n if self.iters <= 0:\n raise ValueError(\"the number of iterations must be greater than zero\")\n if self.algorithm=='dtree':\n self.depth = params.pop('depth')\n if self.algorithm=='ann':\n self.gamma = params.pop('gamma')\n\n self.estimators_ = []\n self.estimator_weights_ = np.zeros(self.iters, dtype=np.float) \n self.clf=object", "def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)", "def __init__(self, total_args):\n\t\tself.alpha = 0.0\n\t\tself.salida = 0.0\n\t\tself.bias = pseudoaleatorio(-1.0, 1.0)\n\t\tself.pesos = []\n\t\tfor i in range(total_args):\n\t\t\tself.pesos.append(pseudoaleatorio(-1.0, 1.0))", "def __init__(self, type, slen=4, alen=1, lexsize=256):\n # vowels {i,u,e,o} in articulatory features (hi, bk, rd) \\in {-1,0,1}\n self.vowels = N.array(((1.0, 0.0, 0.0),\n (1.0, 1.0, 0.0),\n (0.0, 0.0, 0.0),\n (0.0, 1.0, 0.0)))\n self.vf = {(1.0, 0.0, 0.0): \"i\",\n (1.0, 1.0, 0.0): \"u\",\n (0.0, 0.0, 0.0): \"e\",\n (0.0, 1.0, 0.0): \"o\"}\n self.consonants = list(\"bcdfghjklmnpqrstvwxyz\")\n # acoustic:articulatory mapping fxn for vowel prototypes\n # acoustic reps are F1,F2' pairs, articulatory reps are feature-based\n self.vowel_map = {}\n self.vowel_spread = 0\n self.memory = N.empty((lexsize, slen, 2))\n # each agent has its own articulatory variability\n #TODO: maybe this should be inferred by the learners\n # on the basis of their data?\n self.alpha = N.random.normal(15, 2)\n self.beta = N.random.normal(2, 0.25)\n if self.beta < 1.0:\n self.beta = 1.1\n\n if type == \"learner\":\n self.stems = N.empty((lexsize, 4, 3), dtype=float)\n #self.affixes = N.empty((1,4))\n elif type == \"speaker\":\n tmp = [[x, y, 0.0] for x in [0.0, 1.0] for y in [0.0, 1.0]]\n self.stems = N.array([[a, b, c, d] for a in tmp for b in tmp\n for c in tmp for d in tmp])\n else:\n sys.exit(\"Undefined agent type. Aborting.\")\n # vectorized versions of some fxns\n self.vec_perceive = vectorize(self.perceive)\n self.vec_articulate = vectorize(self.articulate)\n self.vec_acoustify = vectorize(self.acoustify)", "def caixa_preta(\n algorithm=1,\n generations=50,\n population=[],\n is_single=False,\n is_uniform=True,\n is_elitist=True,\n is_ultra_elitist=False,\n elitism=0.85,\n xfactor=0.3,\n win_factor=1\n ):\n\n data_fit = []\n\n if algorithm == 1:\n for gen in range(generations):\n # calcula o fitness da população\n fit = fitness(population)\n # faz a seleção por torneio\n f_selec_torn, f_perd_torn = torneio(population, fit, win_factor)\n # faz o cruzamento\n child_sliced = cruzamento(f_selec_torn, is_single=is_single, is_uniform=is_uniform)\n # faz a mutação do filho\n child_mutaded = mutacao(child_sliced, xfactor=xfactor)\n # seleciona a nova população\n population = substituicao(population, father=f_selec_torn, children=child_mutaded, elitism=elitism, is_ultra_elitist=is_ultra_elitist, is_elitist=is_elitist)\n\n if type(population[0][0]) != int:\n import pdb;pdb.set_trace()\n\n fit = fitness(population)\n\n data_fit.append(best_fitness(fit))\n\n else:\n for gen in range(generations):\n # calcula o fitness da população\n fit = fitness(population)\n # calcula a média do fitness do conjunto\n fit_avg = fitness_medio(fit)\n # calcula a probabilidade de seleção\n prob_selec = selecao_prob(population, fit, fit_avg)\n # faz a seleção por rodeio\n f_selec_rol = roleta(population, prob_selec)\n # faz o cruzamento\n child_sliced = cruzamento(f_selec_rol, is_single=is_single, is_uniform=is_uniform)\n # faz a mutação do filho\n child_mutaded = mutacao(child_sliced, xfactor)\n # seleciona a nova população\n population = substituicao(population, f_selec_rol, child_mutaded, elitism, is_elitist)\n fit = fitness(population)\n\n data_fit.append(best_fitness(fit))\n\n return data_fit", "def __init__(self, neighbourhood, algorithm, iterations, set_up):\n self.input = neighbourhood\n self.algorithm = algorithm\n self.set_up = set_up\n self.iterations = int(iterations)\n self.configs = self.get_configs()\n self.houses = self.load_houses()\n self.big_iterations = -1\n self.small_iterations = 0\n self.caps = []\n self.batteries = {}\n self.lowest = 99999\n self.index = 0\n self.run_algorithm()", "def probinit(self, aaa, n_obj):\n # Set algorithm...\n if aaa == 'nsga':\n algo = nsga_II(m=0.05)\n else:\n algo = jde(memory=True)\n #algo = mde_pbx()\n #algo = de_1220()\n\n # ...and initialize problem with instance atributes\n prob = mga_1dsm(seq = self.FBseq,\n multi_objective = n_obj,\n dsm_dv_barrier = self.MAX_DV)\n\n prob.set_vinf((self.C3)**0.5)\n prob.set_tof(self.TOF[0], self.TOF[1])\n prob.set_entry_barrier(self.entry_barrier)\n prob.set_launch_window(self.EPOCHSTART, self.EPOCHEND)\n return prob, algo", "def default_pars(type='simple_plus'):\n norients = 16\n orients = [o * np.pi / norients for o in range(norients)]\n divfreqs = [2, 3, 4, 6, 11, 18]\n freqs = [1. / n for n in divfreqs]\n phases = [0]\n\n # this is something new.\n # there are 6 steps. and I can turn on/off these steps. potentially.\n steps = {'preproc_resize',\n 'preproc_lowpass',\n 'normin',\n 'filter', 'activ',\n 'normout', 'dimr'}\n\n # dict with all representation parameters\n representation = {\n\n # - preprocessing\n # prepare images before processing\n 'preproc': {\n # resize input images by keeping aspect ratio and fix the biggest edge\n 'max_edge': 150,\n # kernel size of the box low pass filter\n 'lsum_ksize': 3,\n },\n\n # - input local normalization\n # local zero-mean, unit-magnitude\n 'normin': {\n # kernel shape of the local normalization\n 'kshape': (3, 3),\n # magnitude threshold\n # if the vector's length is below, it doesn't get resized\n 'threshold': 1.0,\n },\n\n # - linear filtering\n 'filter': {\n # kernel shape of the gabors\n 'kshape': (43, 43),\n # list of orientations\n 'orients': orients,\n # list of frequencies\n 'freqs': freqs,\n # list of phases\n 'phases': phases,\n # threshold (variance explained) for the separable convolution\n # should be set to 1 or bigger when debugging.\n 'sep_threshold': .9,\n 'max_component': 100000,\n # just big enough (using inf would be more correct technically, though that will be a problem for JSON)\n 'fix_bug': False, # whether fixing separated convolution bug.\n 'mode': 'same', # this is only available for non legacy. can be also ``'valid'``.\n },\n\n # - simple non-linear activation\n 'activ': {\n # minimum output (clamp)\n 'minout': 0,\n # maximum output (clamp)\n 'maxout': 1,\n 'type': 'clamp', # can also be `square`, `exp`, `recsquare`, `rec`\n },\n\n # - output local normalization\n 'normout': {\n # kernel shape of the local normalization\n 'kshape': (3, 3),\n # magnitude threshold\n # if the vector's length is below, it doesn't get resized\n 'threshold': 1.0,\n },\n\n # - dimension reduction\n 'dimr': {\n # kernel size of the local sum (2d slice)\n 'lsum_ksize': 17,\n # fixed output shape (only the first 2 dimensions, y and x)\n 'outshape': (30, 30),\n },\n }\n\n if type == 'simple_plusplus_2nd_scale':\n representation['preproc']['max_edge'] = 75\n\n if type == 'simple_plus':\n featsel = {\n # Include representation output ? True or False\n 'output': True,\n\n # Include grayscale values ? None or (height, width)\n 'input_gray': (100, 100),\n # Include color histograms ? None or nbins per color\n 'input_colorhists': None,\n # Include input norm histograms ? None or (division, nfeatures)\n 'normin_hists': None,\n # Include filter output histograms ? None or (division, nfeatures)\n 'filter_hists': None,\n # Include activation output histograms ? None or (division, nfeatures)\n 'activ_hists': (2, 10000),\n # Include output norm histograms ? None or (division, nfeatures)\n 'normout_hists': (1, 10000),\n # Include representation output histograms ? None or (division, nfeatures)\n 'dimr_hists': (1, 10000),\n }\n elif type == 'simple':\n featsel = {\n # Include representation output ? True or False\n 'output': True,\n\n # Include grayscale values ? None or (height, width)\n 'input_gray': None,\n # Include color histograms ? None or nbins per color\n 'input_colorhists': None,\n # Include input norm histograms ? None or (division, nfeatures)\n 'normin_hists': None,\n # Include filter output histograms ? None or (division, nfeatures)\n 'filter_hists': None,\n # Include activation output histograms ? None or (division, nfeatures)\n 'activ_hists': None,\n # Include output norm histograms ? None or (division, nfeatures)\n 'normout_hists': None,\n # Include representation output histograms ? None or (division, nfeatures)\n 'dimr_hists': None,\n }\n elif type == 'simple_plusplus_2nd_scale':\n featsel = {\n # Include representation output ? True or False\n 'output': True,\n\n # Include grayscale values ? None or (height, width)\n 'input_gray': (37, 37),\n # Include color histograms ? None or nbins per color\n 'input_colorhists': None,\n # Include input norm histograms ? None or (division, nfeatures)\n 'normin_hists': None,\n # Include filter output histograms ? None or (division, nfeatures)\n 'filter_hists': None,\n # Include activation output histograms ? None or (division, nfeatures)\n 'activ_hists': (2, 10000),\n # Include output norm histograms ? None or (division, nfeatures)\n 'normout_hists': (1, 10000),\n # Include representation output histograms ? None or (division, nfeatures)\n 'dimr_hists': (1, 10000),\n }\n else:\n raise NotImplementedError('not supported pars type!')\n\n return deepcopy({'steps': steps,\n 'representation': representation,\n 'featsel': featsel})", "def __init__(self, features, labels, adj,\n ising=True, symmetrize=True,\n max_neigh_sample=None, positive=True):\n self.features = features\n self.labels = labels\n self.adj = adj\n self.ising = ising\n self.symmetrize = symmetrize\n self.max_neigh_sample = max_neigh_sample\n self.positive = positive", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def init_algorithm(config, id_algo, id_discdds, discdds):\n # instance the algorithm\n set_current_config(config)\n algo = config.algos.instance(id_algo) \n # initialize the algorithm with the dynamics\n # TODO: add computation time\n #t0 = time.clock()\n algo.set_name_for_log(id_algo)\n algo.init(id_discdds, discdds) \n #init_time = time.clock() - t0\n return algo", "def __init__(self):\n self.rho=[]\n self.te=[]\n self.ti=[]\n self.ne=[]\n self.ni=[]\n self.ni1=[]\n self.ni2=[]\n self.ni3=[]\n self.vtor=[]\n self.zeff=[]\n\n self.nion=1\n self.Z=[]\n self.A=[]\n self.coll_mode=[]", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def general_gantest(proba, nbr_qubits):\n for m in [4096, 2048]:\n for l in [1, 2, 3]:\n print(\"Easy mode results for m={} and l={}:\".format(m, l))\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=0, easy=True)\n print(\"\\n\")\n print(\"Distribution learning results for m={} and l={}:\".format(m, l))\n for d in [256, 512]:\n print(\"For \", d, \": \")\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=d, easy=False)\n print(\"Singleton learning results for m={} and l={}:\".format(m, l))\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=0, easy=False)", "def __init__(self, folder):\n print \"folder passed is \", folder\n self.folder = folder\n self.geometry = gf.geometry(self.folder)\n self.elements = gf.dictionary_set()\n self.area = np.zeros(shape = (8))\n self.Vol = (self.geometry.properties['span_number']*(self.geometry.properties['span_width']*\n self.geometry.properties['span_height'] + self.geometry.properties['cover_height']\n *self.geometry.properties['span_width']/2))\n self.F = np.zeros(shape = (8, 8))\n of.view_factor(self.geometry, self.F, self.area, self.Vol)\n tran = [self.geometry.properties['tra_cover_out'],0.0,0.0,\n self.geometry.properties['tra_sidewall_out'],\n self.geometry.properties['tra_cover_in'],\n self.geometry.properties['tra_sidewall_in'],0.0,0.0]\n emi = [self.geometry.properties['emi_cover_out'],1.0,1.0,\n self.geometry.properties['emi_sidewall_out'],\n self.geometry.properties['emi_cover_in'],\n self.geometry.properties['emi_sidewall_in'],1.0,1.0] \n self.tr, self.em, self.re = of.optictal_prop(tran,emi)\n if ((self.tr + self.em).any() > 1.0):\n print \"error in optical properties\"\n self.T = np.zeros(shape = (2,10))\n self.RH = np.zeros(shape = (2,10))\n # 8 inside,9 outside \n self.qcond = np.zeros(shape = (2,8))\n self.qconv = np.zeros(shape = (2,8))\n self.qrad = np.zeros(shape = (2,8))\n self.j = np.zeros(shape = (2,8))\n self.g = np.zeros(shape = (2,8))\n self.alpha = np.zeros(shape = (2,8))\n deltaT = 300\n RH_in = 0.6\n fg.set_initial_conditions(self.geometry.properties['t_air_inside'],\n 278,\n RH_in,self.T,self.RH , self.geometry.properties['t_air'],self.g,\n self.geometry.properties['sky_temp'])\n self.T, self.j, self.g, self.alpha, self.qrad, self.qconv = fg.solver_T(self.T,self.qrad,self.qconv,self.alpha,self.j,self.g,self.em,self.tr,\n self.geometry.properties['wind_speed'],\n self.F,self.geometry.properties['heat_flux'],1,1.0,self.area,\n self.geometry.properties['rho'],self.geometry.properties['cp'],\n self.Vol,self.geometry.properties['degree_window'],deltaT)", "def __init__(self,numagents,bits,in_nodes,print_flag=False):\n self.numagents = numagents\n self.agent = []\n self.in_bits = in_nodes\n\n \"\"\" initialize empty dict for trajectories\"\"\"\n self.trajectories={}\n self.accepted_changes = 0\n self.learning_rate = numpy.zeros((3,1),dtype=int)\n\n \"\"\" initialize network topology \"\"\"\n self.adjacency = numpy.zeros((numagents,numagents),dtype=int)\n\n\n # Erdös Renyi network; symmetric G(n,p) with p=0.5;\n # if p > ln(n)/n graph is almost surely connected\n \n p0=0.75\n for ia1 in range(self.numagents):\n for ia2 in range(ia1+1,self.numagents):\n if ia1 < 3 and ia2 < 3: self.adjacency[ia1,ia2] = 1 #fully connected in_nodes\n else:\n pER = numpy.random.random()\n if pER <= p0: self.adjacency[ia1,ia2] = 1\n self.adjacency[ia2,ia1] = self.adjacency[ia1,ia2] #symmetrize\n\n\n\n \"\"\"#complete graph (dense)\n for ia1 in range(self.numagents):\n for ia2 in range(ia1+1,self.numagents):\n self.adjacency[ia1,ia2] = 1\n self.adjacency[ia2,ia1] = 1\n \"\"\"\n\n\n \"\"\"\n #4-lattice\n for ia in range(self.numagents):\n if ia > 1 and ia < self.numagents-2:\n ia1 = ia + 1\n ia2 = ia + 2\n ia3 = ia - 1\n ia4 = ia - 2\n\n elif ia == 0:\n ia1 = ia + 1\n ia2 = ia + 2\n ia3 = self.numagents-1\n ia4 = self.numagents-2\n\n elif ia == 1:\n ia1 = ia + 1\n ia2 = ia + 2\n ia3 = ia - 1\n ia4 = self.numagents-1\n\n elif ia == self.numagents-2:\n ia1 = ia + 1\n ia2 = 0\n ia3 = ia - 1\n ia4 = ia - 2\n elif ia == self.numagents-1:\n ia1 = 0\n ia2 = 1\n ia3 = ia- 1\n ia4 = ia -2\n\n self.adjacency[ia,ia1] = 1\n self.adjacency[ia1,ia] = 1\n self.adjacency[ia,ia2] = 1\n self.adjacency[ia2,ia] = 1\n self.adjacency[ia,ia3] = 1\n self.adjacency[ia3,ia] = 1\n self.adjacency[ia, ia4] = 1\n self.adjacency[ia4, ia] = 1\n \"\"\"\n\n \n #SF-network, BA(m=1)\n \"\"\"\n m=1\n main_graph = networkx.barabasi_albert_graph(self.numagents, m)\n self.adjacency = networkx.to_numpy_array(main_graph)\n \"\"\"\n\n # density is number of edges/number of possible edges\n self.graph_density = numpy.sum(self.adjacency)/(self.numagents*(self.numagents-1) )\n\n\n \"\"\" initialize agents: \"\"\"\n iagents = range(self.in_bits)\n majority = True\n for ia in range(self.numagents):\n nbh = 3 #ALT: take mean neighborhood from graph; nbh = global_functions.dict_from_adj(self.adjacency)\n mu1 = numpy.random.randint(1, nbh + 1) # int(thresh)\n mu2 = numpy.random.randint(mu1, nbh + 1) #\n if ia in iagents: i = True\n else: i = False\n self.agent.append(agent(ia,ia,bits,nbh,i,mu1,mu2,majority=majority)) #initialize with majority rule\n #print(ia,self.agent[ia].rule)\n\n if print_flag:\n adj_dict = global_functions.dict_from_adj(self.adjacency)\n numagents = 0\n for ia in range(len(self.adjacency)):\n if numpy.sum(self.adjacency[ia]) != 0 or numpy.sum(self.adjacency[:, ia]) != 0: numagents += 1\n print(\"Initialize network with\", self.numagents, \"agents on a graph with\"\n , len(adj_dict), \"edges and density of\", numpy.around(self.graph_density,2))\n print(\"Input agents:\", iagents)\n print(\"Average degree:\", numpy.around(numpy.sum(self.adjacency)/self.numagents,2))\n\n if self.numagents <= 12:\n for l in self.adjacency: print(l)\n else:\n for key in adj_dict: print(key,\"->\",adj_dict[key])\n\n print(\"Perceptual rule of type:\")\n if majority: print(\"majority rule: x -> 1: if sum(input) >= out_degree/2\")\n else:\n print(\"x -> 1: if mu1 <= sum(input) <= mu2\")\n print(\"mu1 =\",[self.agent[ia].mu1 for ia in range(self.numagents)],\n \"mu2 =\",[self.agent[ia].mu2 for ia in range(self.numagents)])\n #for key in itertools.product({0, 1}, repeat = self.out_bits):\n # print(key, self.agent[0].update(key))\n print(\"Network successfully initialized\")", "def __init__(self, epsilon=0.05,gamma=0.6, alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n FQLearningAgent.__init__(self, **args)", "def __init__(\n self,\n net,\n eps,\n sua=False,\n pi=False,\n update_freq=1,\n alpha=1.0,\n constraint_norm=False,\n data_size=50000,\n use_batch_norm=False,\n ):\n self.net = net\n self.state = net.state_dict()\n self.mean_state = copy.deepcopy(self.state)\n self.data_size = data_size\n self.use_batch_norm = use_batch_norm\n\n self.eps = eps\n self.sua = sua\n self.pi = pi\n self.update_freq = update_freq\n self.alpha = alpha\n self.constraint_norm = constraint_norm\n self.params = []\n self._iteration_counter = 0\n for mod in net.modules():\n mod_class = mod.__class__.__name__\n if mod_class in [\"Linear\", \"Conv2d\"]:\n mod.register_forward_pre_hook(self._save_input)\n mod.register_backward_hook(self._save_grad_output)\n params = [mod.weight]\n if mod.bias is not None:\n params.append(mod.bias)\n d = {\"params\": params, \"mod\": mod, \"layer_type\": mod_class}\n self.params.append(d)\n\n elif \"BatchNorm\" in mod_class and use_batch_norm:\n mod.register_forward_pre_hook(self._save_input)\n mod.register_backward_hook(self._save_grad_output)\n\n params = [mod.weight, mod.bias]\n\n d = {\"params\": params, \"mod\": mod, \"layer_type\": mod_class}\n self.params.append(d)\n\n super(KFACLaplace, self).__init__(self.params, {})\n # super(KFACLaplace, self).__init__()", "def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n #self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)", "def init_process(mech):\n gases[mech] = ct.Solution(mech)\n gases[mech].transport_model = 'Multi'", "def on_game_start(self, config):\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]\n self.structureInPlace = False\n self.destructorsLeft = 0\n self.destructorsMiddle = 0\n self.juicyTargets = 0\n self.juicyCorner = False\n self.floodGatesOpen = True\n self.defenseRating = 0\n self.defenseCost = 0\n self.attackedFromLeft = 0\n\n self.mainStructure = [[ 25, 13],[ 24, 12],[ 23, 11],[ 22, 10],[ 21, 9],[ 20, 8],[ 19, 7],[ 18, 6],[ 17, 5],[ 16, 4],[ 15, 3],[ 14, 2],[ 13, 1]]\n\n\n self.filter0 =[[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],\\\n [ 9, 13],[ 10, 13],[ 17, 13],[ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13]] \n self.filter1 = [[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],[ 9, 13],[ 10, 13],[ 17, 13],\\\n [ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13],[ 2, 12],[ 25, 12],[ 3, 11],[ 24, 11],[ 4, 10]]\n self.filter2 = [[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],[ 9, 13],[ 10, 13],[ 17, 13],\\\n [ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13],[ 2, 12],[ 25, 12],[ 3, 11],[ 24, 11],[ 4, 10]]\n self.filter3 = [[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13]]\n\n self.destructor0 = [[ 13, 13]]\n self.destructor1 = [[ 13, 13],[ 14, 13]]\n self.destructor2 = [[ 13, 13],[ 14, 13]]\n self.destructor3 = [[ 13, 13],[ 14, 13]]\n\n self.initExclusionList = [[0,0]]\n self.exclusionList = [[0,0]]", "def __init__(self, G, population, condition_axelrod, condition_centola):\n super(ExpandableAlgorithm, self).__init__(G, population)\n self._overlap_function = overlap_similarity\n self._post_args = None\n self.condition_axelrod = condition_axelrod\n self.condition_centola = condition_centola", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['D_adv','D_cls', 'G_A','G_B', 'cycle_A','G_adv','reg','idt']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n visual_names_A = ['real_A','A','mask_A', 'fake_B','B','mask_B', 'rec_A']\n #visual_names_B = ['real_B', 'fake_A', 'rec_B']\n # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)\n # if self.isTrain and self.opt.lambda_identity > 0.0:\n # visual_names_A.append('idt_B')\n # #visual_names_B.append('idt_A')\n\n # combine visualizations for A and B\n self.visual_names = visual_names_A #+ visual_names_B\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.\n \n\n # define networks (both Generators and discriminators)\n # The naming is different from those used in the paper.\n # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n self.netG_A=[]\n self.netG_B=[]\n self.netG_Amask=[]\n self.netG_Bmask=[]\n if self.isTrain:\n self.model_names += ['G_A', 'G_Amask', 'G_B', 'G_Bmask', 'D', 'Dadv']\n else: # during test time, only load Gs\n self.model_names = ['G_A', 'G_Amask', 'G_B', 'G_Bmask']\n for i in range(opt.num_class):\n tG_A, tG_Amask = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n \n self.netG_A.append(tG_A)\n self.netG_Amask.append(tG_Amask)\n tG_B, tG_Bmask = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_B.append(tG_B)\n self.netG_Bmask.append(tG_Bmask)\n\n self.netD= networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,opt.num_class)\n self.netDadv = networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids, 1)\n \n\n if self.isTrain:\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\n assert(opt.input_nc == opt.output_nc)\n # create image buffer to store previously generated images\n # self.fake_A_pool = ImagePool(opt.pool_size)\n # create image buffer to store previously generated images\n # self.fake_B_pool = ImagePool(opt.pool_size)\n # define loss functions\n # define GAN loss.\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionGAN_D = networks.GANLoss('multi-label').to(self.device)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizers_G=[]\n for i in range(opt.num_class):\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A[i].parameters(\n ), self.netG_B[i].parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) \n self.optimizers_G.append(self.optimizer_G)\n \n self.optimizer_D = torch.optim.Adam(self.netD.parameters(\n ), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers=self.optimizers_G+[self.optimizer_D]", "def construct_parameters(self, method= \"random\", W = np.zeros(1), b = np.zeros(1), initialization=True):\n #W = np.asarray(W, dtype=object)\n #b = np.asarray(b, dtype=object)\n for i in reversed(range(1,len(self.architecture))):\n \n if initialization==True:\n if self.activations[i-1] in {'relu' , 'leakyrelu' , 'ealu'}:\n variance = np.sqrt(2/(self.architecture[i-1])) #He initialization\n elif self.activations[i-1] == 'tanh':\n variance = np.sqrt(6/(self.architecture[i-1] + self.architecture[i])) #Xavier initialization\n elif self.activations[i-1] in ('swish' , 'sigmoid'):\n variance = np.sqrt(1/(self.architecture[i-1]))\n else:\n variance = 1\n \n elif initialization == False:\n variance = 1\n \n if method == 'random':\n self.weights_and_biases[f'W{i}'] = np.random.rand(self.architecture[i-1], self.architecture[i])*variance #randomised initialisation \n self.weights_and_biases[f'b{i}'] = np.zeros(self.architecture[i])*variance\n \n elif method == 'manual': #manual initialisation using given weights and biases\n self.weights_and_biases[f'W{i}'] = W[i-1]\n self.weights_and_biases[f'b{i}'] = b[i-1] \n return self.weights_and_biases", "def build_algorithm(self, algorithm_type):\n distance_matrix = self.matrix_handler.distance_matrix\n algorithm_execution_parameters = {}\n if algorithm_type == \"spectral\":\n # We need to set number of clusters for performance and we get sigma if defined\n algorithm_execution_parameters[\"max_clusters\"] = self.evaluation_parameters[\"maximum_clusters\"]\n if \"sigma\" in self.clustering_parameters[\"algorithms\"][\"spectral\"]:\n algorithm_execution_parameters[\"sigma_sq\"] = self.clustering_parameters[\"algorithms\"][\"spectral\"][\"sigma\"]\n # else it calculates its own sigma\n\n if algorithm_type in [\"spectral\",\"dbscan\",\"gromos\",\"kmedoids\",\"random\",\"hierarchical\"] :\n return ClusteringExplorer.get_clustering_algorithm_class()[algorithm_type](distance_matrix, **algorithm_execution_parameters)\n else:\n print \"[ERROR][ClusteringExplorer::build_algorithms] Not known algorithm type ( %s )\"%(algorithm_type)\n self.notify(\"SHUTDOWN\", \"Not known algorithm type ( %s )\"%(algorithm_type))\n exit()", "def gen_ap_def():\n\n while True:\n\n ap_params = [None, None]\n\n ap_params[0] = np.random.choice(OFF_OPTS, p=OFF_PROBS)\n ap_params[1] = np.random.choice(EXP_OPTS, p=EXP_PROBS)\n\n yield ap_params" ]
[ "0.5801649", "0.57721746", "0.57621926", "0.5752019", "0.570017", "0.5656524", "0.5650794", "0.558717", "0.55626065", "0.5558151", "0.55303484", "0.55301005", "0.5512373", "0.55113596", "0.5471883", "0.5464631", "0.54539907", "0.54456383", "0.5424106", "0.54218847", "0.54159504", "0.5405871", "0.5403188", "0.5399336", "0.53667796", "0.5360164", "0.5359057", "0.53580064", "0.5353712", "0.5346713" ]
0.7114801
0
som and bmu_ind depending on the lattice "hexa" or "rect" we have different grid distance functions. bmu_ind is a number between 0 and number of nodes1. depending on the map size bmu_coord will be calculated and then distance matrix in the map will be returned
def grid_dist(self,bmu_ind): try: lattice = getattr(self, 'lattice') except: lattice = 'hexa' print 'lattice not found! Lattice as hexa was set' if lattice == 'rect': return rect_dist(self,bmu_ind) elif lattice == 'hexa': try: msize = getattr(self, 'mapsize') rows = msize[0] cols = msize[1] except: rows = 0. cols = 0. pass #needs to be implemented print 'to be implemented' , rows , cols return np.zeros((rows,cols))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_nbh_distance_weight_matrix(\n self, neighborhood_func: float, bmu_pos: Tuple[int, int]\n ) -> np.ndarray:\n dist_mat = np.linalg.norm(self.node_list_ - bmu_pos, axis=1)\n\n pseudogaussian = np.exp(\n -np.divide(\n np.power(dist_mat, 2), (2 * np.power(neighborhood_func, 2))\n )\n )\n\n if self.nbh_dist_weight_mode == \"pseudo-gaussian\":\n return pseudogaussian.reshape((self.n_rows, self.n_columns, 1))\n\n if self.nbh_dist_weight_mode == \"mexican-hat\":\n mexicanhat = np.multiply(\n pseudogaussian,\n np.subtract(\n 1,\n np.divide(\n np.power(dist_mat, 2), np.power(neighborhood_func, 2)\n ),\n ),\n )\n return mexicanhat.reshape((self.n_rows, self.n_columns, 1))\n\n raise ValueError(\n \"Invalid nbh_dist_weight_mode: \" + str(self.nbh_dist_weight_mode)\n )", "def _get_nbh_distance_weight_block(\n self, nbh_func: float, bmus: List[Tuple[int, int]]\n ) -> np.ndarray:\n dist_weight_block = np.zeros((len(bmus), self.n_rows, self.n_columns))\n\n for i, bmu_pos in enumerate(bmus):\n dist_weight_block[i] = self._get_nbh_distance_weight_matrix(\n nbh_func, bmu_pos\n ).reshape((self.n_rows, self.n_columns))\n\n return dist_weight_block", "def find_bmu(t, net, m):\n #inicializa o index\n bmu_index = np.array([0,0])\n #inicia distancia minima para um numero bem grande\n min_dist = np.iinfo(np.int).max\n #anda pela matriz de pesos e procura menor distancia do vetor t\n for x in range(net.shape[0]):\n for y in range(net.shape[1]):\n #pesos atuais que estou considerando\n w = net[x, y, :].reshape(m,1) #transforma matriz em vetor 3D\n #calcula distancia euclidiana ao quadrado (evita tirar raiz)\n sq_dist = np.sum((w - t) ** 2) #soma as diferencas ao quadrado de cada valor do vetor\n if sq_dist < min_dist: #se distancia eh menor salva valor e index\n min_dist = sq_dist\n bmu_index = np.array([x,y])\n\n #depois de percorrer a matriz tenho a menor distancia e o index do vetor BMU\n #pega vetor dentro do net\n bmu = net[bmu_index[0], bmu_index[1], :].reshape(m,1)\n #retorna o bmu e o indice\n return (bmu, bmu_index)", "def map_vects(self, input_vects):\n \n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n \n to_return = []\n \n distances = []\n \n \n contador_adyacentes = 0\n \n matriz = np.array(list(self._neuron_locations(self._m, self._n)))\n \n m = self._m\n \n n = self._n\n \n matrices = []\n \n matrices = np.stack((matriz,\n matriz + np.array([m,n]), \n matriz - np.array([m,n]), \n matriz + np.array([m,0]),\n matriz - np.array([m,0]),\n matriz + np.array([0,n]),\n matriz - np.array([0,n]),\n matriz + np.array([m,-n]),\n matriz + np.array([-m,n])\n ))\n \n distancias_matriz = []\n\n for i in range(n*m):\n distancias_matriz.append([])\n for j in range(m*n):\n distancias_matriz[i].append(np.min(np.sum(np.power(np.subtract(matriz[i], matrices[:,j]),2), axis = 1)))\n \n distancias_matriz = np.array(distancias_matriz)\n \n \n for vect in input_vects:\n\n # min_index is the index of the BMU\n \n lista_indices = [i for i in range(len(self._weightages))]\n \n min_index = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x]))\n\n # min_index_2 is the index of the 2nd BMU\n \n lista_indices.pop(min_index) # El indice es el mismo que el valor\n \n min_index_2 = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x])) \n \n r2 = np.sqrt(2)\n\n if np.sqrt(distancias_matriz[min_index][min_index_2]) > r2: \n# print('loc 1')\n# print(locaciones[min_index])\n# print('loc 2')\n# print(locaciones[min_index_2])\n contador_adyacentes += 1\n\n\n distance = np.linalg.norm(vect - self._weightages[min_index])\n \n distances.append(distance)\n \n to_return.append(self._locations[min_index]) \n \n # Quantization Error qe (the mean of all distances to the BMU)!\n self.distances = distances \n \n # Topographic error te\n self.proporcion = contador_adyacentes / len(input_vects)\n \n self.prom_dist = np.mean(self.distances)\n \n return to_return", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def BM2BSM(xy, NL, KL, BM0):\n # Check if 3D or 2D\n # np.sqrt( (xy[NL[i,0],0]-xy[BL[:,1],0])**2+(xy[BL[:,0],1]-xy[BL[:,1],1])**2) ]\n '''this isn't finished....'''", "def _calc_u_matrix_distances(self) -> None:\n for u_node in itertools.product(\n range(self.n_rows * 2 - 1), range(self.n_columns * 2 - 1)\n ):\n # neighbor vector\n nb = (0, 0)\n\n if not (u_node[0] % 2) and (u_node[1] % 2):\n # mean horizontally\n nb = (0, 1)\n\n elif (u_node[0] % 2) and not (u_node[1] % 2):\n # mean vertically\n nb = (1, 0)\n\n self.u_matrix[u_node] = np.linalg.norm(\n self.unsuper_som_[u_node[0] // 2][u_node[1] // 2]\n - self.unsuper_som_[u_node[0] // 2 + nb[0]][\n u_node[1] // 2 + nb[1]\n ],\n axis=0,\n )", "def distance_map(self, scaling='sum'):\n\n if scaling not in ['sum', 'mean']:\n raise ValueError(f'scaling should be either \"sum\" or \"mean\" ('\n f'\"{scaling}\" not valid)')\n\n um = nan * zeros((self._weights.shape[0],\n self._weights.shape[1],\n 8)) # 2 spots more for hexagonal topology\n\n ii = [[0, -1, -1, -1, 0, 1, 1, 1]]*2\n jj = [[-1, -1, 0, 1, 1, 1, 0, -1]]*2\n\n if self.topology == 'hexagonal':\n ii = [[1, 1, 1, 0, -1, 0], [0, 1, 0, -1, -1, -1]]\n jj = [[1, 0, -1, -1, 0, 1], [1, 0, -1, -1, 0, 1]]\n\n for x in range(self._weights.shape[0]):\n for y in range(self._weights.shape[1]):\n w_2 = self._weights[x, y]\n e = y % 2 == 0 # only used on hexagonal topology\n for k, (i, j) in enumerate(zip(ii[e], jj[e])):\n if (x+i >= 0 and x+i < self._weights.shape[0] and\n y+j >= 0 and y+j < self._weights.shape[1]):\n w_1 = self._weights[x+i, y+j]\n um[x, y, k] = fast_norm(w_2-w_1)\n\n if scaling == 'mean':\n um = nanmean(um, axis=2)\n if scaling == 'sum':\n um = nansum(um, axis=2)\n\n return um/um.max()", "def computeB(linsys_setup):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz = True\n \n def computeCMBY(d0):\n \"\"\"\n For CMB, y = S^1/2 A N^-1 d, where S is CMB signal covariance matrix (Cl's)\n \"\"\"\n # N.B. Reshaping operations required to go between 2D pixel arrays and \n # 1D vector (for linear system)\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny,nx))\n d1 *= ninvs[freq]\n a_l = fft.fft(d1,axes=[-2,-1])\n a_l *= beams[freq]*precond_2d\n d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True))\n d1 = numpy.reshape(d1,(nx*ny))\n d2 += d1\n return d2\n \n def computeClusterY(d0):\n \"\"\"\n For cluster, y = F^T A^T N^-1 d, where F is TSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[0][ic][freq] * g_nu[freq])\n return d2\n \n def computeClusterKSZY(d0):\n \"\"\"\n For cluster, y = K^T A^T N^-1 d, where K is KSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[1][ic][freq])\n return d2\n \n def computeMonopoleY(d0):\n \"\"\"\n Overall monopole amplitude.\n \"\"\"\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2 += numpy.sum(d1 * ninvs[freq])\n return(d2)\n \n \n # CMB realisation; convolve white noise map with beam and multiply by \n # signal covmat S^1/2 in harmonic space\n b0 = numpy.random.randn(ny,nx)\n a_l = numpy.fft.fft2(b0, b0.shape)\n a_l *= precond_2d * power_2d**(-0.5)\n b0 = numpy.fft.irfft2(a_l, b0.shape)\n \n # Calculate per-band noise realisation.\n # Multiply by pixel-space N^1/2, convolve with beam, and sum over \n # cluster pixels to get RHS\n b1 = 0; b4 = 0\n b2 = numpy.zeros(nCluster)\n if ksz: b3 = numpy.zeros(nCluster)\n \n for freq in range(nFreq):\n _b = numpy.random.randn(ny,nx) * ninvs[freq]**0.5\n a_l = numpy.fft.fft2(_b) * beams[freq] * precond_2d\n b1 += numpy.fft.irfft2(a_l, _b.shape)\n b4 += numpy.sum(_b)\n for ic in range(nCluster):\n b2[ic] += numpy.sum( _b * g_nu[freq] * clumaps[0][ic][freq] )\n if ksz: b3[ic] += numpy.sum( _b * clumaps[1][ic][freq] )\n\n b0 = numpy.reshape(b0,(nx*ny))\n b1 = numpy.reshape(b1,(nx*ny))\n \n\n # Compute CMB and cluster data parts of b\n b_CMB = computeCMBY(datamaps) + b0 + b1\n b_mono = computeMonopoleY(datamaps) + b4\n b_tsz = computeClusterY(datamaps) + b2\n if ksz: b_ksz = computeClusterKSZY(datamaps) + b3\n \n # Return total b vector (Ncmbpix + 1 + (1|2)*Ncluster elements in vector)\n b = numpy.append(b_CMB, b_mono)\n b = numpy.append(b, b_tsz)\n if ksz: b = numpy.append(b, b_ksz)\n return b", "def SALT2_MMDist(numSN,\n cm=-0.0474801042369, cs1=0.0965032273527, cs2=0.042844366359,\n x1m=0.872727291354, x1s1=0.358731835038, x1s2=1.42806797468,\n mm=10.701690617, ms1=0.334359086569, ms2=1.0750402101,\n mBm=-19.0199168813, mc=-0.0838387899933, mt=10.,\n cc=3.20907949118, cx1=-0.137042055737):\n color = double_gauss(cm, cs1, cs2, size=numSN)\n x1 = double_gauss(x1m, x1s1, x1s2, size=numSN)\n mass = double_gauss(mm, ms1, ms2, size=numSN)\n\n mB = mBm + mc * (mass > 10.) + cc * color + cx1 * x1\n\n return mB, x1, color, mass", "def make_mol_kernel(drugs):\n\n dict_drug = drugs.dict_drug\n dict_ind2mol = drugs.dict_ind2mol\n\n # get the ECFP fingerprints\n nb_mol = drugs.nb\n X_fingerprint = np.zeros((nb_mol, 1024), dtype=np.int32)\n list_fingerprint = []\n # for i in list(dict_ind2mol.keys()):\n for i in range(nb_mol):\n dbid = dict_ind2mol[i]\n m = Chem.MolFromSmiles(dict_drug[dbid])\n list_fingerprint.append(AllChem.GetMorganFingerprint(m, 2))\n arr = np.zeros((1,))\n DataStructs.ConvertToNumpyArray(\n AllChem.GetMorganFingerprintAsBitVect(m, \n 2, \n nBits=1024), \n arr)\n X_fingerprint[i, :] = arr\n\n # get the Tanimoto Similarity Matrix\n K = np.zeros((len(list_fingerprint), len(list_fingerprint)))\n for i in range(len(list_fingerprint)):\n for j in range(i, len(list_fingerprint)):\n K[i, j] = DataStructs.TanimotoSimilarity(list_fingerprint[i], \n list_fingerprint[j])\n K[j, i] = K[i, j]\n\n return X_fingerprint, K", "def get_center_of_mass_allies(self,obs):", "def get_bmu(\n self, datapoint: np.ndarray, som_array: np.ndarray\n ) -> Tuple[int, int]:\n a = self._get_node_distance_matrix(\n datapoint.astype(np.float64), som_array\n )\n\n return np.argwhere(a == np.min(a))[0]", "def mi_from_dm(distance_matrix, ns, nh, spike_train_list=None):\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n \n if spike_train_list is not None:\n\n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n\n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 0\n \n if i not in members_of_glob:\n for j in nearest_neighbours[i]:\n if j not in members_of_glob:\n if spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1 # count neigbours out of glob\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*f_i # if one neighbour is in glob, all following neighb are as well\n break\n counts.append(c_i)\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += 1 + (nh - 1)*f_i #If in glob, take fraction of remaining neighbours except you\n counts.append(c_i)\n \n counts = np.array(counts)\n \n else:\n \n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 1\n for j in nearest_neighbours[i]:\n if (i != j and abs(i - j)%ns==0 ):\n c_i += 1 \n counts.append(c_i)\n counts = np.array(counts) \n \n I = sum(np.log2(counts*ns/float(nh))) / float(nr)\n\n return I", "def get_center_of_mass_enemies(self,obs):", "def mi_from_dm_alt(distance_matrix, ns, nh, spike_train_list=None):\n \n #print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n #print \"finished sorting\"\n #return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n \n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I", "def get_gaussian_maps_2d(mu, sigma, shape_hw, mode='rot'):\n with tf.name_scope(None, 'gauss_map', [mu]):\n\n y = tf.cast(tf.linspace(-1.0, 1.0, shape_hw[0]), tf.float64)\n x = tf.cast(tf.linspace(-1.0, 1.0, shape_hw[1]), tf.float64)\n\n [x,y] = tf.meshgrid(x,y)\n xy = tf.stack([x, y], axis=-1)\n xy = tf.stack([xy] * nb_landmarks, axis=0)\n xy = tf.reshape(xy, [1, nb_landmarks, shape_hw[0], shape_hw[1], 2])\n mu = tf.reshape(mu, [-1, nb_landmarks, 1, 1, 2])\n invsigma = tf.linalg.inv(sigma)\n invsigma = tf.reshape(invsigma, [-1, nb_landmarks, 1, 2, 2])\n pp = tf.tile(invsigma, [1, 1, shape_hw[1], 1, 1])\n X = xy-mu\n dist = tf.matmul(X,pp)\n dist = tf.reduce_sum((dist*X), axis=-1)\n\n g_yx = tf.exp(-dist)\n\n g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])\n\n return g_yx", "def calc_synLocations(post_branches, n_syns, dist):\n\n\t\t\t\tassert dist in ['uniform', 'random', 'one'], 'Which synapse distribution for %s population? (uniform/random/one) '%self.population_name\n\t\t\t\t\n\t\t\t\tn_branches = len(post_branches)\n\t\t\t\tbranch_locs = {}\n\t\t\t\t\n\t\t\t\tif dist == 'uniform':\n\t\t\t\t\traise Exception('uniform', '{} dist is under construction!'.format(dist))\n\t\t\t\t\t# density = n_syns / L\n\t\t\t\t\t# locs = sorted(np.arange(0, L, 1/density))\n\t\t\t\t\t# locs = [i/L for i in locs]\n\n\t\t\t\t\t# assert len(locs)==n_syns, ['Sanity check warning: unexpected locs length!', pdb.set_trace()]\n\n\t\t\t\telif dist == 'random':\n\t\t\t\t\t\n\t\t\t\t\tfor i in range(n_syns):\n\n\t\t\t\t\t\t# Randomly choose branch\n\t\t\t\t\t\trand_branch_idx = np.random.randint(n_branches)\n\t\t\t\t\t\trand_branch \t = post_branches[rand_branch_idx]\n\t\t\t\t\t\trand_branch_name = rand_branch.name().split('].')[-1]\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Randomly choose location\n\t\t\t\t\t\trand_loc = np.random.rand()\n\n\t\t\t\t\t\tif rand_branch_name in branch_locs.keys():\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['locs'].append(rand_loc)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name] \t\t\t\t= {}\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['locs'] \t\t= [rand_loc]\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['branch_obj'] = rand_branch\t\t\t\t\t\t\t\t\n\n\t\t\t\t\tfor key in branch_locs:\n\t\t\t\t\t\tbranch_locs[key]['locs'] = sorted(branch_locs[key]['locs'])\n\t\t\t\t\n\t\t\t\telif dist == 'one':\n\t\t\t\t\tsingle_branch_idx \t= np.random.randint(n_branches)\n\t\t\t\t\tsingle_branch \t \t= post_branches[single_branch_idx]\n\t\t\t\t\tsingle_branch_name \t= single_branch.name().split('].')[-1]\n\t\t\t\t\t\n\t\t\t\t\tbranch_locs[single_branch_name] = {'branch_obj': single_branch, 'locs': [0.5]*n_syns}\n\n\t\t\t\treturn branch_locs", "def return_BMU_coord(self, sess, input_array):\n output = sess.run([self.distance_matrix,self.distance_argmin], feed_dict={self.input_placeholder: input_array})\n index = output[1] #flatten index\n row = index/self.tot_cols\n col = index - (row*self.tot_cols)\n return index, (row,col)", "def find_halos(pos, ngrid, log, level=3000):\n print('Binning particles', file=log)\n cells = get_cells(pos, ngrid, log)\n count = bincount(cells, minlength=ngrid**3)\n count.shape = (ngrid,ngrid,ngrid)\n print('Count in', count.min(), count.max(), file=log)\n idx = flatnonzero(count>level)\n print('Number of cells above', level, 'is', len(idx), file=log)\n \n \n labels, num_features = ndimage.label(count>level)\n print('Number fo features', num_features, file=log)\n print('Labels in', labels.min(), labels.max(), file=log)\n locations = ndimage.find_objects(labels)\n\n dense_regions = []\n\n for i in range(num_features):\n loc = locations[i]\n hw = max(l.stop - l.start for l in loc) * 0.5 /ngrid\n hw_padded = hw + 0.0/ngrid\n\n ctr =[(0.5/ngrid)*(l.stop + l.start) for l in loc]\n count_i = count[loc][labels[loc]==(i+1)].sum()\n print('Count', count_i, file=log)\n dense_regions.append((count_i, ctr, hw_padded))\n\n # sort by number of particles in the region\n dense_regions = sorted(dense_regions, key = lambda num_ctr_hw :num_ctr_hw[0], reverse=True)\n\n return dense_regions", "def mi_from_dm_alt_hq(distance_matrix, ns, nh, spike_train_list=None):\n \n print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n #nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n nearest_neighbours = np.array([np.array(hq.nsmallest(nh, r)) for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n print \"finished sorting\"\n return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I", "def get_gaussian_maps(mu, sigmax, sigmay, covs, shape_hw, mode='rot'):\n with tf.name_scope(None, 'gauss_map', [mu]):\n # mu_y, mu_x = mu[:, :, 0:1], mu[:, :, 1:2]\n\n y = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[0]))\n\n x = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[1]))\n [x,y] = tf.meshgrid(x,y)\n xy = tf.stack([x, y], axis=-1)\n xy = tf.stack([xy] * nb_landmarks, axis=0)\n xy = xy[None, : ,:, :, :]\n if mode in ['rot', 'flat']:\n mu = mu[:,:,None, None,:]\n\n invsigma = tf.stack([sigmay**2, -covs, -covs, sigmax**2], axis=-1)\n invsigma = tf.reshape(invsigma, [-1, nb_landmarks, 2,2])\n denominator = (sigmax*sigmay)**2 - covs**2\n denominator = tf.expand_dims(tf.expand_dims(denominator, -1), -1)\n invsigma = invsigma/(denominator+1e-7)\n invsigma = tf.cast(invsigma, tf.float32)\n pp = tf.tile(invsigma[:, :, None, :, :], [1, 1, shape_hw[1], 1, 1])\n X = xy-mu\n dist = tf.matmul(X,pp)\n dist = tf.reduce_sum((dist*X), axis=-1)\n\n\n if mode == 'rot':\n g_yx = tf.exp(-dist)\n else:\n g_yx = tf.exp(-tf.pow(dist + 1e-5, 0.25))\n\n else:\n raise ValueError('Unknown mode: ' + str(mode))\n\n g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])\n return g_yx", "def get_mi(x, y, k=1, normalize=None, norm=np.inf, estimator='ksg'):\n\n if normalize:\n x = normalize(x)\n y = normalize(y)\n\n # construct state array for the joint process:\n xy = np.c_[x,y]\n\n if estimator == 'naive':\n # compute individual entropies\n hx = get_h(x, k=k, norm=norm)\n hy = get_h(y, k=k, norm=norm)\n hxy = get_h(xy, k=k, norm=norm)\n\n # compute mi\n mi = hx + hy - hxy\n\n elif estimator == 'ksg':\n\n # store data pts in kd-trees for efficient nearest neighbour computations\n # TODO: choose a better leaf size\n x_tree = cKDTree(x)\n y_tree = cKDTree(y)\n xy_tree = cKDTree(xy)\n\n # kth nearest neighbour distances for every state\n # query with k=k+1 to return the nearest neighbour, not counting the data point itself\n # dist, idx = xy_tree.query(xy, k=k+1, p=norm)\n dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)\n epsilon = dist[:, -1]\n\n # for each point, count the number of neighbours\n # whose distance in the x-subspace is strictly < epsilon\n # repeat for the y subspace\n n = len(x)\n nx = np.empty(n, dtype=np.int)\n ny = np.empty(n, dtype=np.int)\n for ii in range(n):\n # nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n\n mi = digamma(k) - np.mean(digamma(nx+1) + digamma(ny+1)) + digamma(n) # version (1)\n # mi = digamma(k) -1./k -np.mean(digamma(nx) + digamma(ny)) + digamma(n) # version (2)\n\n elif estimator == 'lnc':\n # TODO: (only if you can find some decent explanation on how to set alpha!)\n raise NotImplementedError(\"Estimator is one of 'naive', 'ksg'; currently: {}\".format(estimator))\n\n else:\n raise NotImplementedError(\"Estimator is one of 'naive', 'ksg'; currently: {}\".format(estimator))\n\n return mi", "def generate_map(seed):\n def set_biomes(field, points):\n for row in range(len(field)):\n # For every cell, we find the closest point\n for cell in range(len(field[row])):\n # Store the currently closest point:\n shortest_dist = -1\n # Stores the biome of the current point:\n current_biome = '_'\n\n # Iterate over the points to find the closest one\n for point in points:\n # Calculate the euclidean distance\n xdiff = point[0] - row\n ydiff = point[1] - cell\n distance = xdiff * xdiff + ydiff * ydiff # Square root not needed since we're only comparing\n\n # If this is currently the shortest distance, set it\n if distance < shortest_dist or shortest_dist == -1:\n shortest_dist = distance\n # Set the biome that will be chosen if a shorter distance isn't found\n current_biome = point[2]\n\n # Select a random field in the biome, taking rarity into account\n\n # Get names/data of all fields in the chosen biome\n biome_fields = biomes[current_biome]['fields'].items()\n # Extract field names and their rarities (weights)\n field_data = [(name, data['rarity']) for name, data in biome_fields]\n # Choose a random field using the weights\n field_index = weighted_choice([field_weight[1] for field_weight in field_data])\n # Set the cell's field\n field[row][cell] = field_data[field_index][0]\n\n return field\n\n def poisson_disc_samples(width, height, r, k=5):\n \"\"\"\n \"Two-dimensional Poisson Disc Sampling using Robert Bridson's algorithm.\"\n Modified version of https://github.com/emulbreh/bridson.\n \"\"\"\n tau = 2 * pi\n cellsize = r / sqrt(2)\n\n grid_width = int(ceil(width / cellsize))\n grid_height = int(ceil(height / cellsize))\n grid = [None] * (grid_width * grid_height)\n\n def distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n return sqrt(dx * dx + dy * dy)\n\n def grid_coords(p2):\n return [int(floor(p2[0] / cellsize)), int(floor(p2[1] / cellsize))]\n\n def fits(p2, gx, gy):\n yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height)))\n\n for x in range(max(gx - 2, 0), min(gx + 3, grid_width)):\n for y in yrange:\n g = grid[x + y * grid_width]\n if g is None:\n continue\n if distance(p2, g) <= r:\n return False\n return True\n\n p = [width * rnd.random(), height * rnd.random()]\n queue = [p]\n grid_x, grid_y = grid_coords(p)\n grid[grid_x + grid_y * grid_width] = p\n\n while queue:\n qi = int(rnd.random() * len(queue))\n qx, qy = queue[qi]\n queue[qi] = queue[-1]\n queue.pop()\n\n for _ in range(k):\n alpha = tau * rnd.random()\n d = r * sqrt(3 * rnd.random() + 1)\n px = qx + d * cos(alpha)\n py = qy + d * sin(alpha)\n\n if not (0 <= px < width and 0 <= py < height):\n continue\n p = [px, py]\n grid_x, grid_y = grid_coords(p)\n\n if not fits(p, grid_x, grid_y):\n continue\n queue.append(p)\n grid[grid_x + grid_y * grid_width] = p\n return [p for p in grid if p is not None]\n\n # Define map dimensions and settings\n # Size should be at least 35x35\n\n height = 50\n width = 50\n\n # Create a new instance of Random() using a given seed\n\n rnd = random.Random(seed)\n\n # Generate a random starting location somewhere in the middle of the map\n\n x = rnd.randint(width - 10, width + 10)\n y = rnd.randint(height - 10, height + 10)\n\n # Create a 2-dimensional list for the game map\n\n field = [['_'] * width for _ in range(height)]\n\n # Create random points that will be the starting positions of biomes\n\n points = poisson_disc_samples(width, height, 3, 5)\n rnd.shuffle(points)\n\n for i in range(len(points)):\n biome = rnd.choice(list(biomes.keys())) # Set a random biome\n\n points[i][0] = int(round(points[i][0])) - 1 # x\n points[i][1] = int(round(points[i][1])) - 1 # y\n points[i].append(biome)\n\n field[points[i][1]][points[i][0]] = 'X' # not needed?\n\n # Set the biomes\n\n field = set_biomes(field, points)\n\n return ({\n 'field': field,\n 'x': x,\n 'y': y\n })", "def getLocalMap(dist_compl):\n sdc=dist_compl*RES\n #clms are real ;)\n #rws are imaginary :D #rows\n map_padd = 1*RES #add a meter\n rws_ofs = abs(sdc.imag.min())+map_padd #offsetX\n rws = abs(sdc.imag.max())+(rws_ofs)\n clms_ofs = abs(sdc.real.min())+map_padd\n clms = abs(sdc.real.max())+(clms_ofs)\n M = np.zeros((np.round(rws+map_padd).astype(int),np.round(clms+map_padd).astype(int))).astype(dtype=MAP_D_TYPE)#empty local map\n Mg = M.copy()\n points = sdc + np.array([clms_ofs+1j*rws_ofs]) #scale\n #M[points.imag.astype(int),points.real.astype(int)]=10 \n for p in points:\n r=np.round(p.imag).astype(int)\n c=np.round(p.real).astype(int)\n try:\n #draw line in matrix\n lc = [np.round(rws_ofs).astype(int),np.round(clms_ofs).astype(int),r,c]\n rr, cc, val = line_aa(*lc) #not really demaning --> 1%\n M[rr, cc] = np.logical_or(M[rr,cc]>0, val>0) \n #add gaussian\n Mg[r-GPoints//2:r+GPoints//2,c-GPoints//2:c+GPoints//2]+=Gau\n except:\n print('Error: out of array when calculating the local map',r,c)\n Mg[Mg>100]=100 #cap the gaussian matrix\n car_pos_in_loc_mat = np.array([np.round(clms_ofs).astype(int), np.round(rws_ofs).astype(int)])\n #Mg[car_pos_in_loc_mat[1],car_pos_in_loc_mat[0]]=300 #add car pos\n return M*(-100)+Mg, car_pos_in_loc_mat", "def som_step(centers,data,neighbor,eta,sigma):\n size_k = int(np.sqrt(len(centers)))\n \n #find the best matching unit via the minimal distance to the datapoint\n b = np.argmin(np.sum((centers - np.resize(data, (size_k**2, data.size)))**2,1))\n\n # find coordinates of the winner\n a,b = np.nonzero(neighbor == b)\n \n # update all units\n for j in range(size_k**2):\n # find coordinates of this unit\n a1,b1 = np.nonzero(neighbor==j)\n # calculate the distance and discounting factor\n disc=gauss(np.sqrt((a-a1)**2+(b-b1)**2),[0, sigma])\n # update weights \n centers[j,:] += disc * eta * (data - centers[j,:])", "def make_indp_tiles(tf_map, nc_sum, mu_sq):\n tiles = tf_map.copy()\n # Here's the deal: we're going to keep only the valid output and\n # it's *always* going to exist in the lowest available indices\n stride = nc_sum + 1\n for i in xrange(tiles.shape[0]/stride):\n numpy.absolute(tiles[stride*i:stride*(i+1)].sum(axis=0), tiles[stride*(i+1)-1])\n\n # Do the proper normalization\n return tiles[nc_sum::nc_sum+1].real**2 / mu_sq[nc_sum::nc_sum+1].reshape(-1, 1)", "def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat", "def _initialize_mapbias(self):\n self.mapbias = sharedX(\n numpy.zeros(self.nmap),\n name='mb',\n borrow=True\n )", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])" ]
[ "0.57011366", "0.5595913", "0.5572306", "0.5561625", "0.5546656", "0.5420969", "0.5410991", "0.53931206", "0.5357705", "0.5275946", "0.5256405", "0.5252467", "0.52403647", "0.52389497", "0.52355295", "0.5224548", "0.5204496", "0.5180344", "0.51797044", "0.51730376", "0.51412076", "0.5068669", "0.5053913", "0.4998826", "0.49872217", "0.49779648", "0.497275", "0.49656913", "0.49619395", "0.49524575" ]
0.752086
0
helper function to get the next ocurring monday as a date object
def _get_next_monday(self): today = datetime.date.today() weekday_int = today.weekday() if weekday_int == 0: return today next_mon = today + timedelta(7 - weekday_int) return next_mon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1", "def next_day_of_week(current, day_of_week):\n\n while current.weekday() != day_of_week:\n current += timedelta(1)\n return current", "def get_next_weekday(date, weekday):\n return date + dt.timedelta(days=(weekday - date.weekday() + 7) % 7)", "def get_next_weekend():\n d = datetime.date.today()\n # day 5 for saturday\n t = datetime.timedelta((7 + 5 - d.weekday()) % 7)\n return (d + t).strftime('%d-%m-%Y')", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def get_next_day(self):\n pass", "def _to_next_ceiling_busi_day(date):\n try:\n date = parse(date)\n except TypeError:\n date = date\n\n date = date + relativedelta(months=+1)\n date = DateUtils._to_ceiling_busi_day(date)\n\n return date", "def next_weekday(date, weekday):\n delta = weekday - date.weekday()\n if delta < 0:\n delta += 7\n return date + timedelta(days=int(delta))", "def get_mothers_day_date(year):\n day = date(year=year, month=5, day=1)\n while 1:\n if day.weekday() == 6:\n day += timedelta(days=7)\n break\n day += timedelta(days=1)\n return day", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def next_sunday(day):\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))", "def get_next_closest_day(weekday):\n names = {\n 'monday': 0,\n 'tuesday': 1,\n 'wednesday': 2,\n 'thursday': 3,\n 'friday': 4,\n 'saturday': 5,\n 'sunday': 6\n }\n\n today = get_current_india_time().date()\n day_shift = (names[weekday] - today.weekday()) % 7\n next_day = datetime.datetime.combine(\n today + datetime.timedelta(days=day_shift), datetime.time.min)\n\n if next_day.weekday() == today.weekday():\n next_day = next_day + datetime.timedelta(days=7)\n return next_day", "def meetup_day(year, month, dow, wom):\n first_dow = monthrange(year, month)[0]\n days_in_month = monthrange(year, month)[1]\n possible_dates = []\n print str(year) + str(month) + dow + wom\n\n \"\"\"Build dictionary of possible dates based on dow\"\"\"\n for day in range(1, days_in_month+1):\n if datetime.date(year, month, day).strftime(\"%A\") == dow:\n print day\n possible_dates.extend([day])\n\n \"\"\"Perform logic on wom constraint\"\"\"\n if wom == \"teenth\":\n for day in possible_dates:\n if day > 12 and day < 20:\n return datetime.date(year, month, day)\n elif wom == \"last\":\n return datetime.date(year, month, possible_dates[-1])\n else:\n return datetime.date(year, month, possible_dates[ int(wom[:1]) - 1 ])", "def next_seven_day(self):\n today = datetime.date.today()\n week_next = today + datetime.timedelta(days=7)\n return week_next.strftime('%Y-%m-%d')", "def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))", "def get_next_midnight():\n return pytz.utc.localize(datetime.datetime.today()).replace(\n hour=0, minute=0, second=0, microsecond=0\n ) + datetime.timedelta(days=1)", "def get_next_midnight():\n return pytz.utc.localize(datetime.datetime.today()).replace(\n hour=0, minute=0, second=0, microsecond=0\n ) + datetime.timedelta(days=1)", "def getPinnedDayOfNextMonth(year, month, day):\n\tyear = year + (month / 12) # purposeful integer division\n\tmonth = (month % 12) + 1\n\tday = pinDayToMonth(year, month, day)\n\treturn datetime.date(year, month, day)", "def next_week_start(iso_date: Optional[str] = None) -> date:\n if iso_date:\n current_date = date.fromisoformat(iso_date)\n else:\n current_date = date.today()\n\n days_until_monday = 7 - current_date.weekday()\n\n candidate_start = current_date + timedelta(days=days_until_monday)\n while candidate_start in holidays.US():\n candidate_start += timedelta(days=1)\n\n return candidate_start", "def next_weekday(weekday, d=datetime.datetime.now()):\n if weekday.lower() not in day_values:\n return None\n days_ahead = day_values[weekday.lower()] - d.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return d + datetime.timedelta(days_ahead)", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def next_date(date_time_input, interval):\n\n if interval=='day':\n return date_time_input+timedelta(days=1)\n\n elif interval=='week':\n return date_time_input+timedelta(days=7)\n\n elif interval=='month':\n \n a = date_time_input+timedelta(days=31)\n next_month = a.replace(day=1)\n return next_month", "def next_month(dateobj):\n year_delta, old_month = divmod(dateobj.month, 12)\n return datetime.date(dateobj.year + year_delta, old_month + 1, 1)", "def next_day(year, month, day):\n thisday = dt.datetime(year, month, day)\n nextday = thisday + dt.timedelta(days=1)\n y = nextday.year\n m = nextday.month\n d = nextday.day\n return y, m, d", "def _next_week(self) -> datetime.datetime:\n now = datetime.datetime.now()\n for i in range(7):\n yield now + datetime.timedelta(i)", "def tomorrow(self):\n if self.isLeapYear():\n fdays = 29\n else:\n fdays = 28\n\n DIM = [0, 31, fdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n currentDay = self.day\n maxDay = DIM[self.month]\n\n if currentDay == maxDay and self.month == 12:\n self.year += 1\n self.month = 1\n self.day = 1\n elif currentDay == maxDay:\n self.month += 1\n self.day = 1\n else:\n self.day += 1", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)" ]
[ "0.80878097", "0.69910073", "0.67567307", "0.67550325", "0.6671787", "0.66657865", "0.65446717", "0.65446717", "0.65441513", "0.6513178", "0.64433354", "0.6440269", "0.6345591", "0.63378316", "0.62782484", "0.6256524", "0.62425745", "0.6242084", "0.61851376", "0.61851376", "0.6117924", "0.61053866", "0.60922694", "0.609124", "0.6040508", "0.6029982", "0.6029481", "0.5989535", "0.59858453", "0.59600985" ]
0.8260075
0
Helper function adding some known todo list items for the test user
def _add_todo_items(self): todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser) todo_list.save() items = [ 'feed the cats', 'drive to work', 'read a book', 'eat some food', ] todo_items = [] for item in items: new_item = ToDoItem( title=item, to_do_list=todo_list, priority=1 ) new_item.save() todo_items.append(new_item) return todo_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_can_add_todo_list():\n scheduler = Scheduler()\n new_id = uuid.uuid4()\n\n scheduler.add_todo_list(new_id, \"my todo list\")\n\n Is(scheduler.get_amount_of_todo_lists()).not_none.integer.has_same_truth_of(1)", "def test_given_a_user_when_I_add_a_todo_Then_I_can_access_it_from_user_todo_collection(self):\n from .models import Tag\n from .models import TodoUser\n from .models import TodoItem\n\n user = TodoUser(\n email=u'[email protected]',\n first_name=u'Arthur',\n last_name=u'Pendragon',\n )\n self.session.add(user)\n\n tags = [u'quest', u'ni', u'knight']\n\n todo = TodoItem(user.email,\n u'Find a shrubbery', \n [u'quest', u'ni', u'knight'] \n ) \n self.session.add(todo)\n \n user_todo = user.todo_list.one()\n self.assertTrue(todo is user_todo)", "def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return", "def add_items(todofile, items):\n if(items is not None and len(items) > 0):\n for item in items:\n todofile.write_todo(parse_item(item))", "def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)", "def add_task(action, user):\n \n item = Item()\n item.description = action['what'].get('description', '')\n item.id = action['what']['id']\n item.position = action['what']['position']\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n l.items.append(item)\n l.save()\n \n return l", "def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')", "def add_list(user_id):\n\n list_title = request.form[\"list_title\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n if to_do_list:\n flash(\"List name already exists. Please select a new name.\")\n return redirect(\"/dashboard\")\n\n new_list = ToDoList(list_title=list_title, user_id=user_id)\n \n db.session.add(new_list)\n db.session.commit()\n \n return redirect(\"/dashboard\")", "def add_todo():\n task = flask.request.form[\"task\"]\n todos.append(ToDo(task))\n return \"success\"", "def add_items(list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def todo_added(name, description):", "def test_list_user(self):\n pass", "def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))", "def add_list(self):\n the_list = models.List(user_id=1,\n list_name=self.test_list,\n description=self.test_list_desc)\n the_list.add()", "def test_list_notes(self):\n pass", "def list_2_for_tests(db_setup, user_for_test):\n heading = \"test_2_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)", "def do_list_items(self, arg):\n try:\n cprint (\"These are your items: \\n\", 'blue')\n my_items = arg[\"<all_items>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n my_items_str = \" \".join(my_items)\n print(my_items_str)\n elif choice == \"id\":\n my_items_str = int(\" \".join(my_items))\n print (my_items_str)\n app.ToDoApp.to_view_items(my_items_str)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def test_creating_todo(todoApp, input):\n # Create new todo\n new_todo_input = todoApp.find_new_todo_input()\n print new_todo_input\n new_todo_input.send_keys(input, Keys.ENTER)\n\n # ASSERTION\n # Check whether the new todo exist in the todo list or not.\n todo = todoApp.find_todo(input)\n \n # Check the new todo status, it should active.\n assert todoApp.is_active_todo(todo)\n \n # Check the active todo count\n assert todoApp.count_active_todos() == '1 item left'", "def test_todo(self):\n self.assertEqual(self.my_todo.state, \"T\")\n self.assertEqual(self.my_todo.due_date, date_today)\n self.assertEqual(self.my_todo.text, \"Call Mom\")\n self.assertEqual(str(self.my_todo), \"Call Mom\")", "def test_adding_many_todos(self):\n event = Event.objects.filter(slug__endswith=\"-upcoming\") \\\n .order_by(\"-pk\")[0]\n event.end = event.start + datetime.timedelta(days=2)\n event.save()\n\n # check if the event has 0 todos\n assert event.todoitem_set.all().count() == 0\n\n # add standard todos\n ident = event.get_ident()\n url, form = self._get_initial_form('todos_add', ident)\n\n # fix: turn Nones into empty strings\n for key, value in form.items():\n if value is None:\n form[key] = ''\n\n rv = self.client.post(reverse('todos_add', args=[ident]), form)\n\n # let's check if the form passes\n assert rv.status_code == 302\n\n # finally let's check there are some new todos\n assert event.todoitem_set.all().count() == 9", "def create_dummy_content(user_id):\n task = TodoItem(\n user=user_id,\n task=u'Find a shrubbery',\n tags=[u'quest', u'ni', u'knight'],\n due_date=datetime.utcnow() + timedelta(days=60),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Search for the holy grail',\n tags=[u'quest'],\n due_date=datetime.utcnow() - timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Recruit Knights of the Round Table',\n tags=[u'quest', u'knight', u'discuss'],\n due_date=datetime.utcnow() + timedelta(minutes=45),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Build a Trojan Rabbit',\n tags=[u'quest', u'rabbit'],\n due_date=datetime.utcnow() + timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Talk to Tim the Enchanter',\n tags=[u'quest', u'discuss'],\n due_date=datetime.utcnow() + timedelta(days=90),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Defeat the Rabbit of Caerbannog',\n tags=[u'quest', u'rabbit'],\n due_date=None,\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Cross the Bridge of Death',\n tags=[u'quest'],\n due_date=None,\n )\n DBSession.add(task)", "def do_item_add(self, arg):\n try:\n add_item = arg[\"<item_name>\"]\n add_item_str = \" \".join(add_item)\n app.ToDoApp.to_add_item(add_item_str, add_item = True)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def list_1_for_tests(db_setup, user_for_test):\n heading = \"test_1_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def do_list(self, arg):\n try:\n cprint (\"Here are your todo lists: \\n\", 'blue')\n app.ToDoApp.to_view_todo()\n\n except ValueError as e:\n cprint(e, 'red')", "def test_post_foods_list(self):\n pass", "def post(self, dnzo_user):\n from tasks_data.task_lists import add_task_list, get_task_list\n \n task_list_name = self.request.get('task_list_name', None)\n if not task_list_name:\n self.bad_request(\"Must provide task_list_name to create a new list\")\n return\n \n new_list = add_task_list(dnzo_user, task_list_name)\n if not new_list:\n self.bad_request(\"Could not add the new task list!\")\n return\n \n self.json_response(task_list=new_list.to_dict())", "def add_item(self):\n\n self.todo_scroll_cell.add_item(f'{self.new_todo_textbox.get()}')" ]
[ "0.67881167", "0.6779041", "0.67479783", "0.6636232", "0.65715206", "0.6420959", "0.63876003", "0.63576937", "0.6330586", "0.62917775", "0.62856203", "0.6273896", "0.62697417", "0.6185657", "0.6137805", "0.6080443", "0.60529596", "0.60524225", "0.601782", "0.6005775", "0.5996415", "0.59884447", "0.5958338", "0.59508663", "0.5936028", "0.592948", "0.5891282", "0.5853943", "0.58322465", "0.580869" ]
0.74570924
0
Helper function adding some known todo list items for the test user for the previous day
def _backfill_todo_items_for_previous_day(self): previous_day_date = self.day.date - timedelta(days=1) day, created = Day.get_or_create(date=previous_day_date) todo_list = ToDoList(day=day, user=self.user.user.rolllistuser) todo_list.save() items = [ 'cut the grass', 'water the plants', 'take out the trash', ] todo_items = [] for item in items: new_item = ToDoItem( title=item, to_do_list=todo_list, priority=1 ) new_item.save() todo_items.append(new_item) return todo_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_todo_items(self):\n\n todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'feed the cats',\n 'drive to work',\n 'read a book',\n 'eat some food',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items", "def test_can_add_todo_list():\n scheduler = Scheduler()\n new_id = uuid.uuid4()\n\n scheduler.add_todo_list(new_id, \"my todo list\")\n\n Is(scheduler.get_amount_of_todo_lists()).not_none.integer.has_same_truth_of(1)", "def todo_added(name, description):", "def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return", "async def test_todo_feed_response_is_ordered_correctly(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n res = await authorized_client.get(app.url_path_for(\"feed:get-todo-feed-for-user\"))\n assert res.status_code == status.HTTP_200_OK\n todo_feed = res.json()\n # the first 13 should be updated and the rest should not be updated\n for feed_item in todo_feed[:13]:\n assert feed_item[\"event_type\"] == \"is_update\"\n for feed_item in todo_feed[13:]:\n assert feed_item[\"event_type\"] == \"is_create\"", "def add_task(action, user):\n \n item = Item()\n item.description = action['what'].get('description', '')\n item.id = action['what']['id']\n item.position = action['what']['position']\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n l.items.append(item)\n l.save()\n \n return l", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)", "def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')", "def add_list(user_id):\n\n list_title = request.form[\"list_title\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n if to_do_list:\n flash(\"List name already exists. Please select a new name.\")\n return redirect(\"/dashboard\")\n\n new_list = ToDoList(list_title=list_title, user_id=user_id)\n \n db.session.add(new_list)\n db.session.commit()\n \n return redirect(\"/dashboard\")", "def add_items(todofile, items):\n if(items is not None and len(items) > 0):\n for item in items:\n todofile.write_todo(parse_item(item))", "def test_future_question_past_question(self):\n create_todo(todo_text=\"Future todo\", days=30)\n create_todo(todo_text=\"Past todo\", days=-30)\n response = self.client.get(reverse('todolist:index'))\n self.assertQuerysetEqual(\n response.context['todo_items_list'],\n ['<TodoItem: >']\n )", "def todo(self):\n # sort events with eventid using datetime string\n pass", "def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')", "def test_adding_many_todos(self):\n event = Event.objects.filter(slug__endswith=\"-upcoming\") \\\n .order_by(\"-pk\")[0]\n event.end = event.start + datetime.timedelta(days=2)\n event.save()\n\n # check if the event has 0 todos\n assert event.todoitem_set.all().count() == 0\n\n # add standard todos\n ident = event.get_ident()\n url, form = self._get_initial_form('todos_add', ident)\n\n # fix: turn Nones into empty strings\n for key, value in form.items():\n if value is None:\n form[key] = ''\n\n rv = self.client.post(reverse('todos_add', args=[ident]), form)\n\n # let's check if the form passes\n assert rv.status_code == 302\n\n # finally let's check there are some new todos\n assert event.todoitem_set.all().count() == 9", "def add_todo():\n task = flask.request.form[\"task\"]\n todos.append(ToDo(task))\n return \"success\"", "def list_2_for_tests(db_setup, user_for_test):\n heading = \"test_2_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def add_items(list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def test_given_a_user_when_I_add_a_todo_Then_I_can_access_it_from_user_todo_collection(self):\n from .models import Tag\n from .models import TodoUser\n from .models import TodoItem\n\n user = TodoUser(\n email=u'[email protected]',\n first_name=u'Arthur',\n last_name=u'Pendragon',\n )\n self.session.add(user)\n\n tags = [u'quest', u'ni', u'knight']\n\n todo = TodoItem(user.email,\n u'Find a shrubbery', \n [u'quest', u'ni', u'knight'] \n ) \n self.session.add(todo)\n \n user_todo = user.todo_list.one()\n self.assertTrue(todo is user_todo)", "def todos_add(request, event_ident):\n try:\n event = Event.get_by_ident(event_ident)\n except Event.DoesNotExist:\n raise Http404('Event matching query does not exist.')\n\n dt = datetime.datetime\n timedelta = datetime.timedelta\n\n initial = []\n base = dt.now()\n if event.start and event.end:\n extra = 9\n else:\n extra = 10\n initial = [\n {\n 'title': 'Set date with host',\n 'due': dt.now() + timedelta(days=30),\n 'event': event,\n },\n ]\n\n TodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm,\n extra=extra)\n\n formset = TodoFormSet(queryset=TodoItem.objects.none(), initial=initial + [\n {\n 'title': 'Set up a workshop website',\n 'due': base + timedelta(days=7),\n 'event': event,\n },\n {\n 'title': 'Find instructor #1',\n 'due': base + timedelta(days=14),\n 'event': event,\n },\n {\n 'title': 'Find instructor #2',\n 'due': base + timedelta(days=14),\n 'event': event,\n },\n {\n 'title': 'Follow up that instructors have booked travel',\n 'due': base + timedelta(days=21),\n 'event': event,\n },\n {\n 'title': 'Set up pre-workshop survey',\n 'due': event.start - timedelta(days=7) if event.start else '',\n 'event': event,\n },\n {\n 'title': 'Make sure instructors are set with materials',\n 'due': event.start - timedelta(days=1) if event.start else '',\n 'event': event,\n },\n {\n 'title': 'Submit invoice',\n 'due': event.end + timedelta(days=2) if event.end else '',\n 'event': event,\n },\n {\n 'title': 'Make sure instructors are reimbursed',\n 'due': event.end + timedelta(days=7) if event.end else '',\n 'event': event,\n },\n {\n 'title': 'Get attendee list',\n 'due': event.end + timedelta(days=7) if event.end else '',\n 'event': event,\n },\n ])\n\n if request.method == 'POST':\n formset = TodoFormSet(request.POST)\n if formset.is_valid():\n formset.save()\n messages.success(request, 'Successfully added a bunch of TODOs.',\n extra_tags='todos')\n return redirect(reverse(event_details, args=(event.get_ident(), )))\n else:\n messages.error(request, 'Fix errors below.')\n\n context = {\n 'title': 'Add standard TODOs to the event',\n 'formset': formset,\n 'helper': bootstrap_helper_inline_formsets,\n 'event': event,\n }\n return render(request, 'workshops/todos_add.html', context)", "def create_dummy_content(user_id):\n task = TodoItem(\n user=user_id,\n task=u'Find a shrubbery',\n tags=[u'quest', u'ni', u'knight'],\n due_date=datetime.utcnow() + timedelta(days=60),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Search for the holy grail',\n tags=[u'quest'],\n due_date=datetime.utcnow() - timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Recruit Knights of the Round Table',\n tags=[u'quest', u'knight', u'discuss'],\n due_date=datetime.utcnow() + timedelta(minutes=45),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Build a Trojan Rabbit',\n tags=[u'quest', u'rabbit'],\n due_date=datetime.utcnow() + timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Talk to Tim the Enchanter',\n tags=[u'quest', u'discuss'],\n due_date=datetime.utcnow() + timedelta(days=90),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Defeat the Rabbit of Caerbannog',\n tags=[u'quest', u'rabbit'],\n due_date=None,\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Cross the Bridge of Death',\n tags=[u'quest'],\n due_date=None,\n )\n DBSession.add(task)", "def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)", "def list_1_for_tests(db_setup, user_for_test):\n heading = \"test_1_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def create_todo(todo_text, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return TodoItem.objects.create(todo_text=todo_text, pub_date=time)", "def test_creating_todo(todoApp, input):\n # Create new todo\n new_todo_input = todoApp.find_new_todo_input()\n print new_todo_input\n new_todo_input.send_keys(input, Keys.ENTER)\n\n # ASSERTION\n # Check whether the new todo exist in the todo list or not.\n todo = todoApp.find_todo(input)\n \n # Check the new todo status, it should active.\n assert todoApp.is_active_todo(todo)\n \n # Check the active todo count\n assert todoApp.count_active_todos() == '1 item left'", "def get_own_todos(current_user: models.User = Depends(get_current_user),\n \tdb: Session = Depends(get_db)):\n todos = blogcrud.get_user_todos(db, current_user.id)\n return todos", "def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "async def test_todo_feed_has_created_and_updated_items_for_modified_cleaning_jobs(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n res_page_1 = await authorized_client.get(\n app.url_path_for(\"feed:get-todo-feed-for-user\"),\n params={\"page_chunk_size\": 30},\n )\n assert res_page_1.status_code == status.HTTP_200_OK\n ids_page_1 = [feed_item[\"id\"] for feed_item in res_page_1.json()]\n todo_feeds = [TodoFeedItem(**feed_item) for feed_item in res_page_1.json()]\n for todo_feed in todo_feeds:\n assert todo_feed.as_task is True\n\n new_starting_date = res_page_1.json()[-1][\"updated_at\"]\n\n res_page_2 = await authorized_client.get(\n app.url_path_for(\"feed:get-todo-feed-for-user\"),\n params={\"starting_date\": new_starting_date, \"page_chunk_size\": 33},\n )\n assert res_page_2.status_code == status.HTTP_200_OK\n ids_page_2 = [feed_item[\"id\"] for feed_item in res_page_2.json()]\n todo_feeds_2 = [TodoFeedItem(**feed_item) for feed_item in res_page_2.json()]\n for todo_feed in todo_feeds_2:\n assert todo_feed.as_task is True\n\n # should have duplicate IDs for 13 update events an `is_create` event and an `is_update` event\n id_counts = Counter(ids_page_1 + ids_page_2)\n assert len([id for id, cnt in id_counts.items() if cnt > 1]) == 13", "def test_calendar_query_todo_alarm(self):\n raise SkipTest(\"test unimplemented\")" ]
[ "0.7035588", "0.63662964", "0.62633353", "0.5987224", "0.59500104", "0.5935984", "0.59221303", "0.5910842", "0.5875174", "0.5867453", "0.5842675", "0.5828082", "0.5801755", "0.5759313", "0.5746675", "0.5741539", "0.5722921", "0.5661359", "0.56570214", "0.5634428", "0.5626967", "0.5617959", "0.5577509", "0.556225", "0.55153346", "0.5497402", "0.5496168", "0.54508454", "0.5431274", "0.5420483" ]
0.73899007
0
Helper function adding some known schedule items for the test user
def _add_schedule_items(self): schedules = [ { 'start_time': '9:30 AM', 'end_time': '10:00 AM', 'title': 'Daily Scrum', 'location': 'Hogwarts', 'day': self.day, 'user': self.user.user.rolllistuser, }, { 'start_time': '10:30 AM', 'end_time': '11:00 AM', 'title': 'Engineering Interview', 'location': 'Narnia', 'day': self.day, 'user': self.user.user.rolllistuser, }, { 'start_time': '12:00 PM', 'end_time': '12:30 PM', 'title': 'Lunch', 'location': 'Kitchen', 'day': self.day, 'user': self.user.user.rolllistuser, }, { 'start_time': '2:00 PM', 'end_time': '2:30 PM', 'title': 'Workout', 'location': 'Gym', 'day': self.day, 'user': self.user.user.rolllistuser, }, ] recurring_item_data = { 'start_time': '3:00 PM', 'end_time': '3:30 PM', 'title': 'Recurring thing', 'location': 'asdf', 'day': self.day, 'user': self.user.user.rolllistuser, } schedule_items = [] schedule_dict = {i['start_time']: i for i in schedules} for schedule in schedules: save_data = schedule save_data['start_time'] = get_relevant_time_id(schedule['start_time']) save_data['end_time'] = get_relevant_time_id(schedule['end_time']) new_schedule_item = ScheduleItem(**save_data) new_schedule_item.save() schedule_items.append(new_schedule_item) save_data = recurring_item_data save_data['start_time'] = get_relevant_time_id(recurring_item_data['start_time']) save_data['end_time'] = get_relevant_time_id(recurring_item_data['end_time']) new_schedule_item = ScheduleItem(**save_data) new_schedule_item.save() new_schedule_item.make_recurring([0]) schedule_items.append(new_schedule_item) return schedule_items, schedule_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_recurring_schedule(self):\n pass", "def test_list_schedules(self):\n pass", "def _create_schedules(self):\n\n ''''''", "def add_schedule(doc_user, date, schedule, logger):\n #my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n if my_calendar == None:\n logger.info('{}: calendar start'.format(doc_user[\"user_id\"]))\n my_calendar = {\"User\": doc_user[\"_id\"],\n \"schedules\": []}\n col_calendar.insert_one(my_calendar)\n\n if not schedule:\n return False\n\n if len(schedule) > 5:\n logger.info('{}: day schedules are already full'.format(\n doc_user[\"user_id\"]))\n return False\n\n ret = 0\n for s in schedule:\n my_calendar[\"schedules\"] += [{\"date\": date,\n \"events_list\": [s]}]\n logger.info('{}: {} added into schedule'.format(\n date, s))\n ret += 1\n\n if ret >= 1:\n col_calendar.find_one_and_replace({\"User\": doc_user[\"_id\"]}, my_calendar)\n\n return True", "def add_schedule(self):\r\n\r\n # Take the schedule entires from TOML file\r\n entries = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Parse schedule payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele = 'link')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/schedule.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_schl, self.schedules)\r\n if 'link' in payload.keys() and payload['link'] != [{}]:\r\n b2 = self.link(self.schedules[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b1 and b2\r\n else:\r\n return False", "def _add_games_to_schedule(self, schedule, game_type, year):\n for item in schedule:\n game = Game(item, game_type, year)\n self._games.append(game)", "def test_add_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def add(self, story, items_to_schedule):\n url = self._build_url(story)\n arguments = self._argument_converter(\n data={\n 'items': items_to_schedule\n }\n )\n\n result = self._client.post(url, **arguments)\n return result", "def create( self ):\r\n for rsrc in self.ee.getRsrcs( ):\r\n self.schedule[rsrc.getid( )] = [ ]", "def test_remove_recurring_schedule(self):\n pass", "def add_scheduled_spirit(self, schedule_info):\n\n raise NotImplementedError", "def create_schedule_team(self, schedule):\r\n stub_user = self.find(\"users\", \"Stub User\", attribute=\"name\")\r\n schedule_json = {\r\n \"name\": schedule['name'],\r\n \"type\": \"schedule\",\r\n \"time_zone\": \"Pacific/Auckland\",\r\n \"schedule_layers\": [\r\n {\r\n \"start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_virtual_start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_turn_length_seconds\": 86400,\r\n \"users\": [\r\n {\r\n \"user\": {\r\n \"type\": \"user\",\r\n \"id\": stub_user[\"id\"]\r\n }\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n try:\r\n self.rpost(\"users\", json=schedule_json)\r\n except PDClientError as e:\r\n raise e", "def _use_default_schedule(self):\n def gen_day():\n dl = []\n ll = [-1, '', -1, '', '']\n for i in range(8):\n dl.append(ll[:])\n rl = []\n for i in range(4):\n rl.append(dl[:])\n return rl\n\n self.schedule = {\n 'current_week': [1, date.today().isocalendar()[1]],\n 'lessons_time': [\n ['8:00', '9:35'],\n ['9:45', '11:20'],\n ['11:40', '13:15'],\n ['13:25', '15:00'],\n ['15:20', '16:55'],\n ['17:05', '18:40'],\n ['18:45', '20:20'],\n ['20:25', '22:00']\n ],\n 'schedule': {\n 'Monday': gen_day(),\n 'Tuesday': gen_day(),\n 'Wednesday': gen_day(),\n 'Thursday': gen_day(),\n 'Friday': gen_day(),\n 'Saturday': gen_day()\n },\n 'subgroup': 0\n }", "def add_to_calender(service, username): \n colors = service.colors().get().execute()\n d_and_t = df.get_add_to_calender_input(argv[1], argv[2])\n now = datetime.datetime.now()\n if d_and_t == None:\n return\n event_request_body = {\n 'start': {\n 'dateTime': df.convert_to_RFC_datetime(d_and_t[0], d_and_t[1], d_and_t[2], d_and_t[3][0]-2, d_and_t[3][1]),\n 'timeZone': 'Africa/Johannesburg'\n },\n 'end': {\n 'dateTime': df.convert_to_RFC_datetime(d_and_t[0], d_and_t[1], d_and_t[2], d_and_t[4][0]-2, d_and_t[4][1]),\n 'timeZone': 'Africa/Johannesburg'\n },\n 'summary': f\"{username} - Code Clinic\",\n 'description': 'empty',\n 'status': 'confirmed',\n 'transparency': 'opaque',\n 'visibility': 'public',\n 'location': 'WTC',\n 'guestsCanModify': True,\n 'attendees': [\n { \n 'displayName': username,\n 'organizer': True,\n 'email': f'{username}@student.wethinkcode.co.za',\n 'optional': True,\n 'responseStatus': 'accepted'\n }\n ]\n }\n start = event_request_body['start']['dateTime']\n end = event_request_body['end']['dateTime']\n\n overlaps = check_if_slots_overlap(start, end, service, username)\n if overlaps == False:\n response = service.events().insert(calendarId=get_events.calendar_id, sendUpdates='all', body=event_request_body).execute()\n print(\"\\nYour slot has been created...\")\n else:\n print(\"\\nYou already have an event scheduled for this time. Please choose another time...\")\n events, count = get_events.get_events_for_next_7_days_to_delete(username, service)\n if count == 0:\n print(\"\\nYou currently don't have any slots created.\")\n return", "def append_schedule(self, host, schedule):\n if isinstance(host, str):\n host = resolve_host_name(host)\n\n host_schedule = self.host_schedule(host['name'])\n\n for name, run_policy, command in schedule:\n e = Event(name, run_policy, command)\n host_schedule.append(e)", "def register_schedule(self, term, schedule, allow_waitlisting=True, at=None):\n items = self.schedules(term, include_units=True)[schedule]\n self.register_courses(term, schedule, items, allow_waitlisting, at)", "def test_add(self):\n sched = Schedule()\n sched.append(Play(Waveform(np.ones(5)), DriveChannel(0)), inplace=True)\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"u1\", 1, sched)\n inst_map.add(\"u1\", 0, sched)\n\n self.assertIn(\"u1\", inst_map.instructions)\n self.assertEqual(inst_map.qubits_with_instruction(\"u1\"), [0, 1])\n self.assertTrue(\"u1\" in inst_map.qubit_instructions(0))\n\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", (), sched)\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", 1, \"not a schedule\")", "def pre_schedule(self):\n return []", "def post_schedule(self):\n return []", "def add_schedule(self, schedule: Schedule, span: FiniteSpan) -> None:\n for schedule_item in schedule._schedule_items:\n for event in schedule_item.events(span):\n self.add_event(event)", "def test_pre_fill_and_assign(self):\n users = []\n for i in range(1, 50):\n users.append(User.objects.create_user(username=\"u{0}\".format(i)))\n pre_fill.main([\"--managers\", \"--workshift\"])\n utils.make_workshift_pool_hours(semester=self.semester)\n # Assign manager shifts beforehand\n for user, manager in zip(users, Manager.objects.all()):\n manager.incumbent = UserProfile.objects.get(user=user)\n manager.save()\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)", "def test_meeting_registrants(self):\n pass", "def invite_site_users(users):\n #group(run_cron.s(item) for item in sites).delay()\n pass", "def get_time_slots_map_for_user(start_time, end_time, user):\n\n time_slots_map = TimeSlotsMap(start_time=start_time, end_time=end_time)\n\n user_query = Query()\n schedules = db.search(user_query.user == user)\n\n schedules.sort(key=lambda x:x['created_time'])\n\n for sc in schedules:\n day_sc_map = TimeHourSlotsInOneDay(initial_value=str(sc[\"available_hours_in_a_day\"]))\n\n sc_start_time = parse_datetime_str(sc.get(\"start_time\"))\n sc_end_time = parse_datetime_str(sc.get(\"end_time\"))\n\n time_slots_map.overlay_slots_map(time_slots_map=day_sc_map,start_time=sc_start_time,\n end_time=sc_end_time, repeat=True)\n\n return time_slots_map", "def set_schedule(self, new_schedule):\n #first, set all the others to inactive\n\n new_schedule.deprecated=False\n if new_schedule.started == None or new_schedule.started <= datetime.utcnow():\n new_schedule.started=datetime.utcnow()\n for sched in self.weekly_schedule:\n if not sched.deprecated:\n #sched.deprecated=True\n sched.ended=datetime.utcnow()\n sched.save()\n elif new_schedule.started > datetime.utcnow():\n #if it's in the future, then don't deprecate the future schedule, just procede along and let the system set the dates correctly\n pass\n self.weekly_schedule.append(new_schedule)\n self.save()", "def update_schedule_users(self, schedule, team_members):\r\n schedule_json = {\r\n \"name\": schedule[\"summary\"],\r\n \"type\": \"schedule\",\r\n \"time_zone\": schedule[\"time_zone\"],\r\n \"description\": schedule[\"description\"],\r\n \"schedule_layers\": [\r\n {\r\n \"start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_virtual_start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_turn_length_seconds\": 86400,\r\n \"users\": []\r\n }\r\n ]\r\n }\r\n\r\n for member in team_members:\r\n pagerduty_user = self.find(\"users\", member[\"name\"], attribute=\"name\")\r\n if pagerduty_user is not None:\r\n schedule_json[\"schedule_layers\"][0][\"users\"].append({\r\n \"user\": {\r\n \"type\": \"user\",\r\n \"id\": pagerduty_user[\"id\"]\r\n }\r\n })\r\n\r\n try:\r\n self.rput(\"schedules/\" + schedule[\"id\"], json=schedule_json)\r\n except PDClientError as e:\r\n raise e", "def add_schedule(self, schedule_dict):\n sub_task = SchedulePolicies.schedule_json(self.policy_type, schedule_dict)\n sub_task[\"subTaskOperation\"] = 2\n self._subtasks.append(sub_task)\n self._modify_schedule_policy_properties()", "def add_jira_entries(config, date, dry_run, economic):\n if date is not None:\n jira = Jira(config.items('Jira'))\n for task in jira.get_tasks():\n if task:\n economic.add_time_entry(task, dry_run)", "def register(self, task, schedule, minutes: int = None):\n self.task_list.append(ScheduledTask(task, schedule, minutes))", "def append_schedule(*args, **kwargs):\n return get_schedule().append_schedule(*args, **kwargs)" ]
[ "0.68778497", "0.6814362", "0.67497754", "0.6255919", "0.5921792", "0.58371866", "0.58195525", "0.5796732", "0.57783014", "0.5733727", "0.5705526", "0.5705507", "0.5691883", "0.5643566", "0.5565707", "0.5564822", "0.5547369", "0.5525819", "0.552174", "0.5512991", "0.5498908", "0.5484778", "0.5469756", "0.5459951", "0.545529", "0.54383945", "0.5435166", "0.5414053", "0.54132766", "0.5375316" ]
0.7738
0
Creates a list of victory conditions based on the size of the board
def create_victory_conditions(size): #Written by Cody West. Not used in current program, could be used to make boards of different sizes victory_conditions = [] for i in range(size): horizontal_victory = [] for n in range(size): horizontal_victory.append(size*i+n) victory_conditions.append(horizontal_victory) for i in range(size): vertical_victory = [] for n in range(size): vertical_victory.append(size*n+i) victory_conditions.append(vertical_victory) diagonal_victory_1 = [] for i in range(size): diagonal_victory_1.append(size*i+i) victory_conditions.append(diagonal_victory_1) diagonal_victory_2 = [] for i in range(size): diagonal_victory_2.append((i+1)*size-(i+1)) victory_conditions.append(diagonal_victory_2) return(victory_conditions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_board(size) -> list:\n return list(itertools.product([i for i in range(size)], repeat=2))", "def create_board(self, size, cars):\n board = [[None for i in range(size)] for j in range(size)]\n\n for car in cars.values():\n for i in range(car.length):\n if car.orientation == 'H':\n board[car.row][car.col + i] = car\n if car.orientation == 'V':\n board[car.row + i][car.col] = car\n\n return board", "def board(constraints):\n rows = len(constraints[0])\n columns = len(constraints[1])\n board = []\n for i in range(rows):\n board.append([Empty for k in range(columns)])\n return board", "def __checkvictory__(self,playerchar):\n\t\tvictory = False\n\t\tboardx = deepcopy(self.board)\n\t\trow = 5\n\t\tcolumn = 6\n\t\tstarburst_bag = []\n\t\tcats_game = True\n\t\tfor a in range(row+1):\n\t\t\tfor b in range(column+1):\n\t\t\t\tstarburst = []\n\t\t\t\tstarburst.append((a,b))\n\t\t\t\t\n\t\t\t\tif self.__checkplace__(a,b) is True:\n\t\t\t\t\tcats_game = False\n\t\t\t\t\tcontinue\n\t\t\t\telif self.__checkplace__(a,b) == playerchar:\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a-starburst[1] < 0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a-starburst[1],b) == playerchar:\n\t\t\t\t\t\t\tstarburst[1] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a-starburst[2] < 0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b+starburst[2] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a-starburst[2],b+starburst[2])\\\n\t\t\t\t\t\t == playerchar:\n\t\t\t\t\t\t\tstarburst[2] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif b+starburst[3] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a,b+starburst[3]) == playerchar:\n\t\t\t\t\t\t\tstarburst[3] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a+starburst[4] > 5:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b+starburst[4] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a+starburst[4],b+starburst[4])\\\n\t\t\t\t\t\t== playerchar:\n\t\t\t\t\t\t\tstarburst[4] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\tstarburst_bag.append(starburst)\n\t\t\n\t\tfor starburst in starburst_bag:\n\t\t\t\n\t\t\ta = starburst[0][0]\n\t\t\tb = starburst[0][1]\n\t\t\t\n\t\t\tif starburst[1] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[1]):\n\t\t\t\t\tboardx[a-i][b] = boardx[a-i][b].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[2] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[2]):\n\t\t\t\t\tboardx[a-i][b+i] = boardx[a-i][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[3] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[3]):\n\t\t\t\t\tboardx[a][b+i] = boardx[a][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[4] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[4]):\n\t\t\t\t\tboardx[a+i][b+i] = boardx[a+i][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\t\n\t\tif cats_game:\n\t\t\treturn None\n\t\tif victory:\n\t\t\treturn boardx\n\t\telse:\n\t\t\treturn False", "def __init__(self, size):\n\t\tself.size = size\n\t\tself.board = []\n\t\tnew = []\n\t\tfor i in range(0, size, 1):\n\t\t\tfor j in range(0, size, 1):\n\t\t\t\tnew.append(0)\n\t\t\tself.board.append(new)\n\t\t\tnew = []", "def make_board():\n return [[0 for i in range(8)] for i in range(8)]", "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def generateQueenAttacks(boardsize, pos):\n assert isinstance(pos, Position) and validatePosition(boardsize, pos)\n attackList = []\n startPos = Position(pos.x, pos.y)\n \n def addAttackList(pos):\n for attacked in attackList:\n if pos.compare(attacked):\n return\n attackList.append(Position(pos.x, pos.y))\n\n #positive x\n while pos.x < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #positive y\n while pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative x\n while pos.x >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative y\n while pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x +y left bottom\n while pos.x >= 0 and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x -y left top\n while pos.x >= 0 and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x +y right bottom\n while pos.x < boardsize and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x -y right top\n while pos.x < boardsize and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n\n return attackList", "def check_victory(board):\n\n for idx in range(3):\n if board[idx][0] != ' ' and board[idx][0] == board[idx][1] == board[idx][2]:\n # This checks if all items in each horizontal row is complete.\n print('Victory to ' + board[idx][0])\n return True\n elif board[0][idx] != ' ' and board[0][idx] == board[1][idx] == board[2][idx]:\n # This checks if all the items in each vertical column is complete.\n print('Victory to ' + board[0][idx])\n return True\n\n if board[0][0] != ' ' and board[0][0] == board[1][1] == board[2][2]:\n # This checks if the left to right diagonal is complete.\n print('Victory to ' + board[0][0])\n return True\n elif board[2][0] != ' ' and board[2][0] == board[1][1] == board[0][2]:\n # This checks if the right to left diagonal is complete.\n print('Victory to ' + board[2][0])\n return True\n\n return False", "def create_board(self, size):\n self.board = [\n [FieldState.EMPTY for _ in range(size)]\n for _ in range(size)\n ]", "def buildBoard(self, n):\n\n boardDict = []\n diagCount = 0\n\n for i in range(n):\n self.rows[i] = [True, \"\", 0] #homogenous, X/O, count of X's/O's\n self.cols[i] = [True, \"\", 0]\n for j in range(n):\n\n# Is there a faster way to make this array than nested for loops?\n boardDict.append((i,j))\n return boardDict", "def makeBoard(n):\n valid_positions = []\n for i in range(0, n):\n for j in range(0,n):\n valid_positions.append(Position(i,j))\n return valid_positions", "def create_board(board_size):\n board = []\n for i in range(board_size):\n row = []\n for j in range(board_size):\n row.append('-')\n board.append(row)\n return board", "def initializer():\n size: int = int(input(\"Enter a number for the board size: \"))\n board: List[List[str]] = [[random.choice([\"X\", \"O\", \" \"]) for x in range(size)] for y in\n range(size)]\n return size, board", "def initialise(length, runs):\n # The first run of fix_row() or fix_col() will find this anyway. But this is faster\n arr = [EITHER] * length\n free_whites = length - sum(runs) - (len(runs) - 1) # remaining whites to place\n j = 0 # current position\n for x in runs:\n if x > free_whites: # backfill s \n for c in range(j + free_whites, j + x): \n arr[c] = BLACK \n if (free_whites == 0) and (j + x < length):\n arr[j + x] = WHITE # can place a white too\n j += x + 1\n return arr", "def get_complete_3D_action_list():\n # Action is a tuple tile_type,nbr_to_move, row_to_move_to\n # 5 * 5 * 6 = 150 possibilities\n actions = list()\n for tt in range(0,5):\n for i in range(1,6): # the final value represents 5 or more\n for row in range(0,6):\n actions.append((tt,i,row))\n return actions", "def __init__(self, board_dim= DEFAULT_DIM):\r\n self.width = board_dim\r\n self.height = board_dim\r\n\r\n self.grid = np.array([[' '] * self.width for i in range(self.height)])\r\n self.num_checkers = 0 # keeps track of how many checkers have been added\r\n\r\n self.available_moves = [(row, col) for row in range(self.height) for col in range(self.width)]\r\n\r\n # Specify the winning condition based on the board's dimension\r\n if (self.width < 5):\r\n self.win_condition = self.width\r\n else:\r\n self.win_condition = 5", "def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}", "def gameOfLife(self, board: List[List[int]]) -> None:\n r, c = len(board), len(board[0])\n # 下面两行做zero padding\n board_exp = np.array([[0 for _ in range(c + 2)] for _ in range(r + 2)])\n board_exp[1:1 + r, 1:1 + c] = np.array(board)\n print(board_exp)\n # 设置卷积核\n kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n # 开始卷积\n for i in range(1, r + 1):\n for j in range(1, c + 1):\n # 统计细胞周围8个位置的状态\n temp_sum = np.sum(kernel * board_exp[i - 1:i + 2, j - 1:j + 2])\n # 按照题目规则进行判断\n if board_exp[i, j] == 1:\n if temp_sum < 2 or temp_sum > 3:\n board[i - 1][j - 1] = 0\n else:\n if temp_sum == 3:\n board[i - 1][j - 1] = 1", "def AI(current_board, AI_symbol, opponent_symbol, difficulty): #Written by Cody West\n victory_conditions = [[0,4,8],[2,4,6],[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8]] #Establishes victory conditions to be checked\n if difficulty >= 2: #If difficulty is at least 2\n ## Cody -- you could just write:\n ## for slots in victory_conditions\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions ## Oops\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list ## Oops \n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n ## This you can do even more efficiently using a beautiful syntax called\n ## \"list comprehension\" which entered python some years ago -- watch\n ## me do it in one line:\n ## check = [current_board[s] for s in slots]\n if check.count(AI_symbol)==2 and check.count(\" \")==1: #If there are any rows where the AI has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n ## Oops -- you repeat the code again here for no reason\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(opponent_symbol)==2 and check.count(\" \")==1: #If there are any rows where the opponent has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n if difficulty >= 3: #If difficulty is at least 3\n ## It looks like you're doing an identical loop here -- I\n ## wonder why you don't move the if statement inside the loop\n ## -- I believe that would significantly shorten your code\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(AI_symbol)==1 and check.count(\" \")==2: #If there are any rows where the AI has one symbol and there's two empty spots\n if check[0] == \" \": #If the first slot from check is empty\n return(slots[0]) #Return the first slot\n else: \n return(slots[2]) #Return the third slot\n if difficulty == 4: #If difficulty is 4\n if current_board[4] == \" \": #If the center is empty\n return(4) #Take the center\n elif current_board[0] or current_board[2] or current_board[6] or current_board[8] == \" \": #Else, if a corner is open\n corners = 2*random.randint(0,4) #Selects a random corner (or center, which will reject)\n while current_board[corners] != \" \": #Until the corner selected is empty\n corners = 2*random.randint(0,4) #Select a new corner or center\n return(corners) #Return empty corner\n else:\n sides = 2*random.randint(0,3)+1 #Selects a side\n while current_board[sides] != \" \": #Until the side is empty\n sides = 2*random.randint(0,3)+1 #Selects a new side\n return(sides) #Returns empty side\n if difficulty < 4: #If difficulty is less than 4\n ran = random.randint(0,8) #Picks random spot on board\n while current_board[ran] != \" \": #Until the spot is empty\n ran = random.randint(0,8) #Picks a new spot\n return(ran) #Returns empty spot", "def inner_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def create_pristine_board(size=100):\n board = defaultdict(dict)\n\n for i in xrange(1, size + 1):\n board[i] = {j: (j - i) for j in xrange(min(i + 1, size + 1), min(i + 7, size + 1))}\n\n return board", "def create_chessboard(size=8):\n r1 = (WHITE + BLACK) * int((size / 2)) + \"\\n\"\n r2 = (BLACK + WHITE) * int((size / 2)) + \"\\n\"\n print((r1 + r2) * int((size / 2)))", "def make_deck():\n deck = []\n for i in range(13):\n for j in range(13):\n if j >= i:\n deck.append([i, j])\n else:\n pass\n return deck", "def generate_board(rows, cols):\n aux = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() < 0.5:\n aux[i][j] = 1\n return aux", "def possibilities(board):\n return board[np.where(board == 0)]", "def make_board(row_size: int, column_size: int) -> list:\n board = []\n for r in range(row_size): # Creates a list for each row.\n row = []\n for c in range(column_size): # Populates the list with a pair of coords for each row.\n row.append((c, r))\n board.append(row)\n return board", "def actions(self, state):\n \n #les actions sont définies comme étant les nombres possibles dans \n #la case i,j\n theActions = []\n for i in range(size):\n for j in range(size):\n line = i\n col = j\n if(state[i][j] == 0):\n possibleNumbers = [1,2,3,4,5,6,7,8,9]\n config = state\n for a in range(size):\n x = config[line][a]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n for b in range(size):\n x = config[b][col]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n #identifie quelle boite on veut vérifier\n hBox = col - col % 3\n vBox = line - line % 3\n \n for c in range(3):\n for d in range(3):\n x = config[c+vBox][d+hBox]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n for k in possibleNumbers:\n theActions.append((i,j,k))\n return theActions", "def drawBoard():\t\n\t#draw 64 Rectangles from (MARGINH,MARGINV) with CASESIZE sizes\n\tfor i in range(BOARDSIZE):\n\t\tfor j in range(BOARDSIZE):\n\t\t\tpygame.draw.rect(DISPLAYSURF, BLACK, [MARGINH + (i)*CASESIZE, MARGINV + (j)*CASESIZE, CASESIZE, CASESIZE], 1)" ]
[ "0.66391665", "0.62301445", "0.6140845", "0.60436356", "0.6038024", "0.5961014", "0.5920955", "0.58922297", "0.5841574", "0.581635", "0.57947874", "0.5757776", "0.57379335", "0.57237613", "0.570596", "0.56783783", "0.5668432", "0.56344336", "0.5629953", "0.56038624", "0.5601722", "0.5581705", "0.5578274", "0.556819", "0.5564179", "0.5550873", "0.55455333", "0.5540024", "0.5540009", "0.55337083" ]
0.8714315
0
Calculates rscu values for each codon
def calculate_rscu(handle: str, genetic_code_num: int, min_len_threshold: int = 200, gene_analysis: bool = False, save_file: bool = False, file_name: str = 'RSCU_report', folder_path: str = 'Report') -> \ dict[str, float | dict[str, float]]: records = parse(handle, 'fasta') references = filter_reference(records, min_len_threshold) if gene_analysis: rscu_dict = dict() for i, seq in enumerate(references): rscu_dict.update({f'gene_{i + 1}': RSCU([seq], genetic_code_num)}) if save_file: name = file_name + '.xlsx' make_dir(folder_path) file_path = join(folder_path, name) if is_file_writeable(file_path): df = pd.DataFrame.from_records( [ (gene, codon, rscu_val) for gene, rscu_vals in rscu_dict.items() for codon, rscu_val in rscu_vals.items() ], columns=['Gene', 'Codon', 'RSCU_vals'] ) df.to_excel(file_path, float_format='%.4f', columns=df.columns) print(f'The RSCU score file can be found at: {abspath(file_path)}') else: reference = filter_reference(records, min_len_threshold) rscu_dict = RSCU(reference, genetic_code_num) if save_file: name = file_name + '.xlsx' make_dir(folder_path) file_path = join(folder_path, name) if is_file_writeable(file_path): df = pd.DataFrame.from_records( [ (codon, rscu_val) for codon, rscu_val in rscu_dict.items() ], columns=['Codon', 'RSCU_vals'] ) df.to_excel(file_path, float_format='%.4f', columns=df.columns) print(f'The RSCU score file can be found at: {abspath(file_path)}') return rscu_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SumaryCompras(vj):\n\n vj.CompasCUC = vj.MontoPrecios = vj.GanancPrecios = 0.0\n\n for row in vj.tbCompras.rows.values():\n prec = vj.MD.Convert( row.precio, row.moneda, MD.Cuc ) # Siempre lleva el precio a CUC\n\n vj.MontoPrecios += ( prec * row.count )\n vj.CompasCUC += row.valCUC\n\n UpdateRecupIdx(vj)\n vj.GanancPrecios = vj.MontoPrecios - vj.MontoInvers", "def _get_cu(self):\n c_undrained=0\n #group_index = self._data['GI']\n if self.is_clayey():\n c_undrained = self.qu(self._data[SoilProperty.N60])/2\n #c_undrained=_clamp(c_undrained, 10, 103)\n # Plasix calculation needs very small c_undrained\n #if c_undrained<0.21:\n # c_undrained = 0.21\n #use 0.2 as per plasix recommendation\n return c_undrained#the cu is always 103 check with small value of n_60, some mistake maybe", "def calc_perc_reducts():\n #Load RCP2.6 datq\n cubes = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')\n #Get the surface and high level SO2 emissions\n surf_cube = cubes[3][:,0]\n high_cube = cubes[1][:,0]\n cubes = iris.cube.CubeList([surf_cube,high_cube])\n\n for i in range(0,len(cubes)):\n #Add the year and month to the cube and extract for 2009 onwards\n iris.coord_categorisation.add_year(cubes[i],'time',name='year')\n iris.coord_categorisation.add_month(cubes[i],'time',name='month')\n cubes[i] = cubes[i].extract(iris.Constraint(year = lambda y: y >=2009))\n\n #Make the year-on-year reduction rates\n yoy_rates = []\n for cube in cubes:\n #Calculate the global mean timeseries\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n area_weights = iris.analysis.cartography.area_weights(cube)\n cube_mean = cube.collapsed(['latitude','longitude'],iris.analysis.MEAN,weights=area_weights)\n\n cube_rates = np.ones((cube_mean.shape))\n #Loop over the months and calculate the changes from the previous year\n #Calculate the year on year proportional changes in the global mean\n for i in range(12,cube_mean.shape[0]):\n cube_rates[i] = cube_mean[i].data / cube_mean[(i-12)].data\n\n yoy_rates.append(cube_rates)\n\n return yoy_rates", "def calculate_cci(hunterlab):\n return 1000 * (hunterlab[1]) / (hunterlab[0] * hunterlab[2])", "def computeRmse(model, data, n , sc):\n truth = data.map( lambda x: ((x[0], x[1]), x[2]) )\n truth.cache()\n ##print 'test zhou 0.....', truth.count() , '............', truth.take(10)\n\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictions.cache()\n # here let's rescale predicted ratings to 0-10 scale\n maxPrediction = predictions.map(lambda x: x[2]).max()\n minPrediction = predictions.map(lambda x: x[2]).min()\n maxRate = RatingScale\n minRate = RatingScaleMin\n ##print 'test zhou 1......', predictions.count(), '............', predictions.take(10)\n\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate )).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n\n\n #predictedRating = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate ) )\n predictedRating = predictions.map(lambda x: ((x[0], x[1]), x[2] ) )\n predictedRating.cache()\n ##predictedRating.checkpoint()\n ##print 'test zhou 2.......', predictedRating.count(), '............', predictedRating.take(10)\n\n\n \n\n\n predictionsAndRatings = predictedRating.join(truth).values()\n #predictionsAndRatings = sc.union(predictedRating, truth)\n predictionsAndRatings.cache()\n #print 'test zhou 3........', predictionsAndRatings.count(), '............', predictionsAndRatings.take(10)\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n \n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n #return 1.0", "def compute_county_cirle(county_population):\n return SCATTER_SCALE * county_population", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def calcCaliCorrandR(constants, corr, data, outName):\n print(constants)\n perr=np.sqrt(np.diag(corr))\n print(perr)\n corrmat=np.zeros([len(constants),len(constants)])\n for i in range(len(corr)):\n for j in range(len(corr)):\n \n ele=corr[i,j]\n diele=ele/(perr[i]*perr[j])\n corrmat[i,j]=round(diele,3)\n print(corrmat)\n #calculate the r^2 value\n ss_res = 0\n ss_total = 0\n residuals = np.zeros([len(data[:,0]), 1])\n for i in range(len(data[:,0])):\n residuals[i] = (LangmuirCurve(data[i,0],constants[0],constants[1],constants[2],constants[3]) - data[i,1])\n ss_res += np.square(residuals[i])\n ss_total += np.square((data[i,1] - np.average(data[:,1])))\n print(ss_res)\n print(ss_total)\n r_sq = 1 - (ss_res/ss_total)\n print(r_sq)\n #write out the fit results\n f = open(outName + \"_cali_constants.txt\", 'w')\n f.write(\"B\\ta\\tN\\tK\\n\")\n for i in range(len(constants)):\n f.write('%.9f' %constants[i] + \"\\t\")\n f.write(\"\\n\\n\")\n for i in range(len(corr)):\n f.write('%.9f' %perr[i] + \"\\t\")\n f.write(\"\\n\\n\")\n f.write(\"Correlation matrix :\\n\\n\")\n for i in range(len(corr)):\n for j in range(len(corr)):\n f.write('%.9f' %corrmat[i,j]+'\\t')\n f.write(\"\\n\\n\")\n f.write(\"R^2 value : \\t\" + '%.9f' %r_sq)\n f.close()", "def sum_crimes(cs:CrimeStatistics)-> int:\n # return 0 # stub\n #template from atomic\n crimes_total = (cs.violent_crimes+cs.property_crimes+cs.arson)\n return crimes_total", "def get_crime_rate(crime):#=d1Data.get_US_crime()\n crimeRates_list = []\n for i in range(0,len(crime)):\n crimeRates = list(crime[i])\n crimeRates[2:] = list(round(100000*crimeRates[j]/crimeRates[1],1) for j in range(2,len(crime[0])))\n crimeRates_list.append(crimeRates)\n return(crimeRates_list)", "def compute_correlation_separability_score(self) -> float:\n sep_scores = pd.DataFrame.from_dict(self.separability_scores).to_numpy()\n sep_scores = minmax_scale(sep_scores)\n corrs = {}\n for tumor_pair in range(sep_scores.shape[1]):\n corr_sep_score = np.corrcoef(PATHO_PRIOR[:, tumor_pair], sep_scores[:, tumor_pair])\n corrs[tumor_pair] = corr_sep_score[1, 0]\n corrs['agg_with_risk'] = sum(\n np.array([val for _, val in corrs.items()]) *\n RISK\n ) \n corrs['agg'] = sum([val for key, val in corrs.items() if type(key)==int]) \n return corrs", "def runcircos(self):\n pd.read_csv(self.cns, sep=\"\\t\")[\n [\"chromosome\", \"start\", \"end\", \"tcn\"]\n ].rename({\"chromosome\": \"chrm\", \"tcn\": \"cns\"}, axis=1).to_csv(\n self.segs, index=None\n )\n\n passed_svs = [\n sv\n for sv in self.svs.values()\n ]\n circos_sv_file = os.path.join(\n self.out_dir, \"circos_svs.tsv\"\n )\n circos_df = pd.DataFrame(\n [\n (\"chr\" + sv.chr1, sv.pos1, sv.pos1, \"chr\" + sv.chr2, sv.pos2, sv.pos2)\n for sv in passed_svs\n ],\n columns=[\n \"Chromosome\",\n \"chromStart\",\n \"chromEnd\",\n \"Chromosome.1\",\n \"chromStart.1\",\n \"chromEnd.1\",\n ],\n )\n circos_df.to_csv(circos_sv_file, index=None)", "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "def get_rcs():\n kk = np.loadtxt(source+\"/kids_data/rcslens2.csv\", delimiter=\",\",\n skiprows=1, max_rows=sample)\n global maxra\n maxra = max(kk[:sample, 0])\n global minra\n minra = min(kk[:sample, 0])\n global maxdec\n maxdec = max(kk[:sample, 1])\n global mindec\n mindec = min(kk[:sample, 1])\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([kk[:sample, 0], kk[:sample, 1]])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n # gamma_shear = -k[:,2]*np.cos\n return ctree, kk[:sample, 2], kk[:sample,\n 3], kk[:sample, 4], kk[:sample, 5]", "def getRNCS(ChargeSA):\n charge=[]\n for i in ChargeSA:\n charge.append(float(i[1]))\n\n temp=[]\n for i in ChargeSA:\n temp.append(i[2])\n\n try:\n RNCG = min(charge)/sum([i for i in charge if i < 0.0])\n return temp[charge.index(min(charge))]/RNCG\n except:\n return 0.0", "def __cnc(cls, sens_mv, we_c):\n if we_c is None:\n return None\n\n cnc = we_c / (sens_mv / 1000.0)\n\n # print(\"A4Datum__cnc: we_c:%s cnc:%f\" % (we_c, cnc), file=sys.stderr)\n\n return cnc", "def calc_cop():\n df = pp.load_csv_file('COP_in.csv', 'metrics_data') \n df = pp.clean_dataframe(df, 5)\n\n df_cop = df['LP01LM01_QQ'] / df['SJ01_SM01']\n df_cop = df_cop.replace(to_replace=np.nan, value = 0, inplace=False)\n \n return df_cop", "def fRCrim(Swe,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rh,Cwv,Ckv,Alpha,Tout):\n#\n# 1. Compute and normalise volumetric components:\n#\t-----------------------------------------------\n\tVw=PHIe*Swe\n\tVh=PHIe*(1-Swe)\n\tVwe=(Vw-Cwv)/(1-Cwv)\n\tVwe=ImposeLimits(Vwe,0,1)\n\tVke=(Vk-Ckv)/(1-Ckv)\n\tVke=ImposeLimits(Vke,0,1)\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vke)+abs(Vwe)+abs(Vh)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVh=abs(Vh)/Sum\n#\n#\t2. Determine conductivity of components:\n#\t----------------------------------------\n\tSigc1=1/Rc1\n\tSigc2=1/Rc2\n\tSigc3=1/Rc3\n\tSigk=1/Rk\n\tSigw=1/Rw\n\tSigh=1/Rh\n#\n#\t3. Compute Conductivity:\n#\t========================\n\tTrm1=Vc1*(Sigc1**(1/Alpha))\n\tTrm2=Vc2*(Sigc2**(1/Alpha))\n\tTrm3=Vc3*(Sigc3**(1/Alpha))\n\tTrm4=(Vk**2.2)*(Sigk**(1/Alpha)) # Factor of 2.2 included to get data to fit to Yang et al\n\tTrm5=Vw*(Sigw**(1/Alpha))\n\tTrm6=Vh*(Sigh**(1/Alpha))\n\tCrf=(Trm1+Trm2+Trm3+Trm4+Trm5+Trm6)**Alpha\n#\n#\n# 4. Output result:\n#\t-----------------\n\tif(Tout==0):\n\t\tFr=Crf\n\telse:\n\t\tFr=1/Crf\n\treturn Fr", "def _calculate_r0(net):\n\n r0 = 0\n for reaction in net.reactions:\n t = reaction.rate(net.species)\n r0 += t\n\n return r0", "def qcd_cc( s, m, r, u ):\n\n l2_min = r\n l3_min = u\n l1_min = (m+1)/2\n l1_max = l2_max = l3_max = (3*s+m+2*r+2*u)/2\n\n S = 0\n for l1 in range(l1_min, l2_max+1):\n for l2 in range(l2_min, l2_max+1):\n for l3 in range(l3_min, l3_max+1):\n n1 = 2*l1 + l2 + l3 - 2*s - m - r - u\n n2_t2 = -2*(l1+l2+l3) + 3*s + m + 2*r + 2*u\n n3 = l2-r\n n4 = l3-u\n if n2_t2%2 != 0:\n continue\n n2 = n2_t2/2\n if n1 < 0 or n2 < 0 or n3 < 0 or n4 < 0:\n continue\n\n denom = factorial(n1)*factorial(n2)*factorial(n3)*factorial(n4)*factorial(3)**n1*factorial(4)**n2*factorial(m)*factorial(r)**2*factorial(u)**2\n\n nom = double_factorial(2*l1-1)*factorial(l2)*factorial(l3)\n S+= Fraction(nom, denom)\n\n return S", "def nCWRk(n, r):\n val = 1\n for i in range(1, r+1):\n val *= n + r - i\n val //= i\n return val", "def cppi(risky_r, safe_r=None, m=3, start=initial, floor=0.8, riskfree_rate=risk_free_rate, drawdown=None):\n # set up the CPPI parameters\n dates = risky_r.index\n n_steps = len(dates)\n account_value = start\n floor_value = start*floor\n peak = account_value\n if isinstance(risky_r, pd.Series): \n risky_r = pd.DataFrame(risky_r, columns=[\"R\"])\n\n if safe_r is None:\n safe_r = pd.DataFrame().reindex_like(risky_r)\n safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number\n # set up some DataFrames for saving intermediate values\n account_history = pd.DataFrame().reindex_like(risky_r)\n risky_w_history = pd.DataFrame().reindex_like(risky_r)\n cushion_history = pd.DataFrame().reindex_like(risky_r)\n floorval_history = pd.DataFrame().reindex_like(risky_r)\n peak_history = pd.DataFrame().reindex_like(risky_r)\n\n for step in range(n_steps):\n if drawdown is not None:\n peak = np.maximum(peak, account_value)\n floor_value = peak*(1-drawdown)\n cushion = (account_value - floor_value)/account_value\n risky_w = m*cushion\n risky_w = np.minimum(risky_w, 1)\n risky_w = np.maximum(risky_w, 0)\n safe_w = 1-risky_w\n risky_alloc = account_value*risky_w\n safe_alloc = account_value*safe_w\n # recompute the new account value at the end of this step\n account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])\n # save the histories for analysis and plotting\n cushion_history.iloc[step] = cushion\n risky_w_history.iloc[step] = risky_w\n account_history.iloc[step] = account_value\n floorval_history.iloc[step] = floor_value\n peak_history.iloc[step] = peak\n risky_wealth = start*(1+risky_r).cumprod()\n backtest_result = {\n \"Wealth\": account_history,\n \"Risky Wealth\": risky_wealth, \n \"Risk Budget\": cushion_history,\n \"Risky Allocation\": risky_w_history,\n \"m\": m,\n \"start\": start,\n \"floor\": floor,\n \"risky_r\":risky_r,\n \"safe_r\": safe_r,\n \"drawdown\": drawdown,\n \"peak\": peak_history,\n \"floor\": floorval_history\n }\n return backtest_result", "def roc(ground_truth, pred_result):\n assert len(ground_truth)==len(pred_result)\n tp, fp, tn, fn = 1e-8, 1e-8, 1e-8, 1e-8\n for i in range(len(ground_truth)):\n if ground_truth[i][0] == 0 and pred_result[i][0] == 0:\n tp += 1\n elif ground_truth[i][0] == 0 and pred_result[i][0] == 1:\n fn += 1\n elif ground_truth[i][0] == 1 and pred_result[i][0] == 0:\n fp += 1\n elif ground_truth[i][0] == 1 and pred_result[i][0] == 1:\n tn += 1\n roc_tpr, roc_fpr = tp/(tp+fn), fp/(fp+tn)\n return (roc_fpr, roc_tpr)", "def get_covrad(elem):\n return covrad[get_num(elem)]", "def apply_per_reducts_cmip6():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Get the year-on-year proportional reductions in RCP2.6\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply year on year proportional reductions (globally uniform) from RCP2.6 in 2015 onwards\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_rcp262015.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_rcp262015.nc\")\n\n return", "def uCSIsYiRadicals(code):\n ret = libxml2mod.xmlUCSIsYiRadicals(code)\n return ret", "def calc_ic(data):\n return scs.spearmanr(data[:, 0], data[:, 1]).correlation", "def calc_rocchio(original, relevant_vectors, nonrelevant_vectors):\n print('orig' + str(len(original)))\n if len(relevant_vectors) > 0: print('rv 1st len' + str(len(relevant_vectors[0])))\n if len(nonrelevant_vectors) > 0: print('nr 1st len' + str(len(nonrelevant_vectors[0])))\n rv_count = len(relevant_vectors)\n nr_count = len(nonrelevant_vectors)\n rv_sum = np.add.reduce(relevant_vectors)\n print('rv_sum' + str(rv_sum) + 'rv_count' + str(rv_count))\n nr_sum = np.add.reduce(nonrelevant_vectors)\n print('nr_sum' + str(nr_sum) + 'nr_count' + str(nr_count))\n updated_relevance = cg.ROCCHIO_ALPHA * original \\\n + cg.ROCCHIO_BETA * (1/rv_count if rv_count else 1) * rv_sum \\\n - cg.ROCCHIO_GAMMA * (1/nr_count if nr_count else 1) * nr_sum\n #only keep terms above minimum threshold (also serves to exclude negative values)\n print('before')\n print(updated_relevance[:40])\n updated_relevance = [0 if wgt < cg.ROCCHIO_MIN else wgt for wgt in updated_relevance]\n print('after')\n print(updated_relevance[:40])\n return updated_relevance", "def _c_numeric(self, rij):\n radial_fun = np.zeros((self.lmax+1, self.nmax))\n radial_fun[0,1] = 1.0\n\n #Get local references to these variables so that we don't need `self`\n #all over in the overbasis calculation below.\n alpha = self.alpha\n rb = self.rb \n for n in range(1, self.nmax+1):\n argbess = 2*alpha*rb[n-1]*rij\n ep = np.exp(-alpha*(rij + rb[n-1])**2)\n em = np.exp(-alpha*(rij - rb[n-1])**2)\n #In the loops below, msb prefix refers to modified spherical bessel.\n for l in range(self.lmax+1):\n if l == 0:\n if argbess == 0.0:\n msb_fi_ki_l = np.exp(-alpha*(rb[n-1]**2 + rij**2))\n else:\n #msb_fi_ki_lm = cosh(arg_bess)/arg_bess\n #msb_fi_ki_l = sinh(arg_bess)/arg_bess\n msb_fi_ki_lm = 0.5 * (em + ep) / argbess\n msb_fi_ki_l = 0.5 * (em - ep) / argbess\n else:\n if argbess == 0.0:\n msb_fi_ki_l = 0.0\n else:\n msb_fi_ki_lmm = msb_fi_ki_lm\n msb_fi_ki_lm = msb_fi_ki_l\n msb_fi_ki_l = msb_fi_ki_lmm-(2*l-1)*msb_fi_ki_lm/argbess\n\n radial_fun[l,n-1] = msb_fi_ki_l #* rb[n-1]\n fc = fcut(rij, self.rcut, self.trans_width)\n return np.dot(radial_fun, self.transformbasis)*fc", "def rsi(date):\n\n # print(float(r_json['Technical Analysis: RSI'][date]['RSI']))\n return float(r_json['Technical Analysis: RSI'][date]['RSI'])" ]
[ "0.56749076", "0.5645466", "0.5644362", "0.5638017", "0.56055605", "0.55989563", "0.5504229", "0.54844487", "0.54743993", "0.54574186", "0.54527915", "0.5400491", "0.53917223", "0.53822356", "0.53790843", "0.5347198", "0.5308095", "0.530323", "0.5293776", "0.5282645", "0.5274876", "0.5268127", "0.5245522", "0.5244178", "0.52299285", "0.52232295", "0.5221863", "0.5220932", "0.52107495", "0.51973003" ]
0.58999944
0
return a set of nodes who are within max_dist of self
def neighbors(self, max_dist=3): # TODO: this may have problems because the set doesn't # compare object id but uses user defined comparison methods # TODO: outgoing edges are no longer saved found = set() found.add(self) queue = [(self, 0)] while queue: node, d = queue.pop(0) if d < max_dist: for edge in node.outgoing: if edge.head not in found: found.add(edge.head) queue.append((edge.head, d+1)) for edge in node.incoming: for tailnode in edge.tail: if tailnode not in found: found.add(tailnode) queue.append((tailnode, d+1)) return found
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def getMaximumDistances(self):\n pass", "def find(self, value, max_distance):\n\t\t# type: (Any, int) -> List[Tuple[int, Any]]\n\n\t\tnode = self.root\n\t\tret = [] # type: List[Tuple[int, Any]]\n\n\t\tif node is None:\n\t\t\treturn ret\n\n\t\tcandidates = [node] # is a deque better here?\n\n\t\twhile candidates:\n\t\t\tcandidate = candidates.pop()\n\t\t\tdistance = self.distance_func(value, candidate.value)\n\n\t\t\tif distance <= max_distance:\n\t\t\t\tret.append((distance, candidate.value))\n\n\t\t\t# instead of looking for candidates by searching,\n\t\t\t# one could also directly access the necessary keys in the dict\n\t\t\tfor d, bknode in candidate.leaves.items():\n\t\t\t\tlower = distance - max_distance\n\t\t\t\tupper = distance + max_distance\n\t\t\t\tif lower <= d <= upper:\n\t\t\t\t\tcandidates.append(bknode)\n\n\t\treturn ret", "def getNeighbors(self):\n targets = set()\n for arc in self._arcsFrom:\n targets.add(arc.getFinish())\n return [ node for node in sorted(targets) ]", "def greedy_max_cut(graph):\n cut = Cut(set(), set())\n for vertex in graph.nodes:\n l_neighbors = sum((adj in cut.left) for adj in graph.neighbors(vertex))\n r_neighbors = sum((adj in cut.right) for adj in graph.neighbors(vertex))\n if l_neighbors < r_neighbors:\n cut.left.add(vertex)\n else:\n cut.right.add(vertex)\n return cut", "def findClosestNodes(self, target: hash.hash.Hash):\n # TODO: make more efficient\n # See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table\n \n nodes = []\n \n for bucket in self.buckets:\n nodes = nodes + bucket.nodes\n\n nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))\n\n return nodes[:config.K]", "def find_max(self):\n return max(self.nodes, key=int)", "def get_min_max_electrode_distances(self):\n distances = pdist(self.get_electrode_positions())\n return distances.min(), distances.max()", "def get_neighbours(self):\n return []", "def eligible_edges(self):\n return self.edges", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def max_cliques(self):\n possible = frozenset(self.vertices())\n acc = frozenset()\n excluded = frozenset()\n cliques = []\n degeneracy_ordered_vertices = self.degeneracy_ordering()\n for v in degeneracy_ordered_vertices:\n neighbors_of_v = self.neighbors(v)\n self._bron_kerbosch(\n acc.union({v}),\n possible.intersection(neighbors_of_v),\n excluded.intersection(neighbors_of_v),\n cliques)\n possible = possible.difference({v})\n excluded = excluded.union({v})\n return cliques", "def find_targetnodes(self):\n\n self.connect_backwards()\n\n targetnodes = []\n for n in self.find_datanodes():\n if len(n.receives_from) > 0:\n targetnodes.append(n)\n return targetnodes", "def get_interest_nodes(self):\n # go through each node in the network to find the min and max degrees\n max_value = 0\n min_value = len(self.nodes)\n for name in self.nodes:\n\n # check for new max\n if self.nodes[name].get_degree() >= max_value:\n\n max_value = self.nodes[name].get_degree()\n\n self.max_node = name\n\n # check for new min\n elif self.nodes[name].get_degree() <= min_value:\n\n min_value = self.nodes[name].get_degree()\n\n self.min_node = name\n\n return self.max_node, self.min_node", "def _get_traversable_nodes(self):\n nodes = self.local_environment.get_node_neighborhood(self.location)\n potential_nodes = [node for node in nodes if self.local_environment.get_node_deadline(node) >= 0]\n edges_to_potential_nodes = self.local_environment.graph.edges(self.location)\n\n for single_edge in edges_to_potential_nodes:\n\n # if edge is blocked\n if self.local_environment.graph[single_edge[0]][single_edge[1]][\"blocked\"]:\n potential_nodes.remove(single_edge[1])\n return potential_nodes", "def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]", "def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts", "def maximumDistance(self):\n from ete2 import Tree\n t = Tree(name='LUCA_root')\n empty_forest = {'sp':t,'gns':t,'fam':t,'ord':t,'cls':t,'phy':t,'kng':t}\n return self.distanceToTree(empty_forest,update_inner_attributes=False)", "def search_coord_with_max_nanobots(self, mins, maxs, fully_in_range, maybe_partially_in_range, best_count=0):\n # Figure out how many of maybe_partially_in_range are actually in range of this whole cube\n # or if they're completely out of range\n cube = OctreeNode(mins, maxs)\n new_fully_in_range = fully_in_range.copy()\n new_partially_in_range = []\n for nanobot in maybe_partially_in_range:\n if cube.nanobot_in_range_of_whole_node(nanobot):\n new_fully_in_range.append(nanobot)\n elif cube.in_node(nanobot.coord) or cube.in_range_if_outside(nanobot):\n new_partially_in_range.append(nanobot)\n\n # If we're not potentially at least as good as best_count, no results worth returning\n if len(new_fully_in_range) + len(new_partially_in_range) < best_count:\n return []\n\n # If none are partially in range, we know the answer for this node!\n if not new_partially_in_range:\n return [SearchResult(mins, maxs, len(new_fully_in_range))]\n\n # If this node is only 0 or 1 units long in each direction, we can't subdivide\n big_enough = False\n for axis in range(3):\n if maxs[axis] - mins[axis] > 1:\n big_enough = True\n\n all_results = []\n if not big_enough:\n # Manually test all 8 corners (ignoring duplicate corners, if any)\n points_tested = set()\n for corner in itertools.product(*zip(mins, maxs)):\n if corner not in points_tested:\n points_tested.add(corner)\n new_best_count = len(new_fully_in_range) + len([nanobot for nanobot in new_partially_in_range\n if manhattan_dist(nanobot.coord, corner) <= nanobot.r])\n if new_best_count >= best_count:\n best_count = new_best_count\n all_results += [SearchResult(corner, corner, new_best_count)]\n \n else:\n # Otherwise, divide into 8 subcubes and recursively search\n midpoint = []\n for axis in range(3):\n midpoint.append((mins[axis] + maxs[axis]) // 2)\n\n axis_coords = list(zip(mins, midpoint, maxs))\n for corner_index in itertools.product(*zip([0, 0, 0], [1, 1, 1])):\n subcube_mins = []\n subcube_maxs = []\n for axis in range(3):\n subcube_mins.append(axis_coords[axis][corner_index[axis]])\n subcube_maxs.append(axis_coords[axis][corner_index[axis] + 1])\n\n results = self.search_coord_with_max_nanobots(subcube_mins,\n subcube_maxs,\n new_fully_in_range,\n new_partially_in_range,\n best_count)\n \n # Result counts should all be the same\n if results and results[0].count >= best_count:\n all_results += results\n\n # Keep the result(s) with the highest count\n return [result for result in all_results if result.count == best_count]", "def selection(self):\n bestScore = -10000000.0\n bestChildren = None\n\n for child in self.childNodes:\n score = child.wins / child.visits + np.sqrt(2) * np.sqrt(\n np.log(self.visits) / child.visits)\n if score > bestScore:\n bestChildren = child\n bestScore = score\n return bestChildren", "def neighbours(self):\n return [x.node for x in self.edges]", "def neighbours(self):\n seen = set()\n return [l.other(self) for l in self.dovetails \\\n if id(l) not in seen and not seen.add(id(l))]", "def getNeighbors(self, current: MstarNode):\n neighbors = []\n options = []\n # Loop over all the agents\n for i in range(self.n_agents):\n node: Node = current.nodes[i]\n options_i = []\n if i in current.collision_set:\n # If the agent in the collision set we add the current node as well as all possible nodes\n options_i.append(node)\n (x, y) = node.position\n moves = {0: (x, y - 1), 90: (x + 1, y), 180: (x, y + 1), 270: (x - 1, y)}\n options_i.append(Node(node.position, node, node.rotation + 90, node.h))\n options_i.append(Node(node.position, node, node.rotation - 90, node.h))\n if self.grid[moves[node.rotation][1]][moves[node.rotation][0]] == 0:\n options_i.append(Node(moves[node.rotation], node, node.rotation,\n self.heuristic(i, moves[node.rotation], node.rotation)))\n else:\n # If the agent is not in the collision set we add only the optimal following node\n try:\n if (node, self.goal.nodes[i]) in self.policy:\n nextPos = self.policy[(node, self.goal.nodes[i])]\n else:\n nextPos = Astar(self.grid, node, self.goal.nodes[i]).solve()\n self.policy[(node, self.goal.nodes[i])] = nextPos\n except ValueError:\n print(f\"start: {node}, goal: {self.goal.nodes[i]}\")\n raise RuntimeError()\n options_i.append(Node(nextPos[0], node, nextPos[1], self.heuristic(i, nextPos[0], nextPos[1])))\n options.append(options_i)\n # Take the cartesian product to get all options\n for element in itertools.product(*options):\n neighbors.append(list(element))\n return neighbors", "def get_reachable_nodes(self, source: Node) -> Set[Node]:\n nodes_found: Set[Node] = {source}\n queue = [source]\n while queue:\n v = queue.pop()\n for e in v.outgoing_edges:\n if e.node_to not in nodes_found:\n nodes_found.add(e.node_to)\n queue.append(e.node_to)\n return nodes_found", "def get_neighbours(kmer, max_d):\n assert (is_dna(kmer))\n result = set([kmer])\n for i in range(max_d):\n addded = set()\n for kmer in result:\n addded |= _get_neighbours(kmer)\n result |= addded\n return result", "def find_near_nodes(self, new_node):\n number_nodes = len(self.node_list) + 1\n r = self.connect_circle_dist * math.sqrt(\n (math.log(number_nodes) / number_nodes)\n )\n\n # if expand_dist exists, search vertices in a range no more than expand_dist\n if hasattr(self, \"expand_dis\"):\n r = min(r, self.expand_dis)\n dist_list = [\n (node.x - new_node.x) ** 2 + (node.y - new_node.y) ** 2\n for node in self.node_list\n ]\n near_indexes = [dist_list.index(i) for i in dist_list if i <= r ** 2]\n return near_indexes", "def greedy(self):\n # for each node, find the incoming link with the highest score.\n max_scores = {}\n max_sources = {}\n for source, target in self.iteredges():\n score = self.get_score(source, target)\n max_score = max_scores.get(target)\n if max_score is None or score > max_score:\n max_scores[target] = score\n max_sources[target] = source\n # then build a graph out of just these links.\n succs = dict((n, []) for n in self)\n for target, source in max_sources.items():\n succs[source].append(target)\n return Digraph(succs, self.get_score, self.get_label)", "def one_dimension_val_clutering(vals, max_distance=5):\n vals = sorted(vals)\n clusters = []\n for (idx, i) in enumerate(vals):\n cluster = [j for j in vals if abs(j - i) < max_distance]\n clusters.append(cluster)\n clusters = sorted(clusters, key=len, reverse=True)\n cluster = clusters[0]\n if len(cluster) / len(vals) > 0.6 or len(cluster) >= 3:\n return cluster\n else:\n return []", "def select_leaf(self):\n current = self\n best_child = None\n selected_nodes_R = 0\n while current.isExpanded:\n maxUCT = - float('inf')\n for child in current.children.values():\n UCT = child.compute_uct()\n if UCT > maxUCT:\n maxUCT = UCT\n best_child = child\n\n current = best_child\n selected_nodes_R += current.score\n return current, selected_nodes_R", "def _max_cardinality_search(self, mask):\n n = mask.shape[0]\n cliques = [[]] # maintains the list of cliques\n last_mark = -1 # number of marked neighbors for prev. node\n marks = [[] for i in range(n)] # a set tracking the marked neighbors of each node\n mark_size = np.zeros(n) # number of marked neighbors for each node\n remaining = list(range(n))\n for _ in reversed(range(n)):\n node = remaining[np.argmax(mark_size[remaining])]\n if mark_size[node] <= last_mark: # moving into a new clique\n cliques.append(marks[node] + [node])\n else: # add it to the last clique\n cliques[-1].append(node)\n nb_node = np.nonzero(mask[node,:])[0] # neighbors of node\n for nb in nb_node: # update the marks for neighbors\n marks[nb].append(node)\n mark_size[nb] += 1\n last_mark = mark_size[node]\n remaining.remove(node)\n sorted_cliques = [sorted(c) for c in cliques]\n return sorted_cliques" ]
[ "0.6185975", "0.61843127", "0.6099087", "0.60814947", "0.6033172", "0.60061944", "0.5881994", "0.58049726", "0.5757084", "0.57157856", "0.57139254", "0.5710196", "0.57095784", "0.5681697", "0.5668638", "0.56296057", "0.56086", "0.5608161", "0.5601407", "0.5596248", "0.55825", "0.55809957", "0.5579248", "0.55698985", "0.5566113", "0.55277824", "0.55240047", "0.5519509", "0.551765", "0.54933506" ]
0.7041858
0
show the neighborhood of this node in a picture
def show_neighborhood(self, max_dist=3, detailed=True): dotstr = '' for node in self.neighbors(max_dist): if node is self: dotstr += node.dot(color='dodgerblue', detailed=detailed) else: dotstr += node.dot(detailed=detailed) dotstr = 'digraph hypergraph {\nrankdir=BT\n%s}\n' % dotstr f = open('/tmp/dotty', 'w') f.write(dotstr) f.close() os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif') os.system('eog /tmp/dotty.gif')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()", "def show_neighbours(self):\n if self.connected_to:\n s = \"\"\n for connection in self.connected_to:\n s += f\"{connection.get_name()} \"\n return s\n return \"No neighbours\"", "def __repr__(self):\n s = self.regular_neighborhood()\n return 'Train track on the ' + repr(s).lower()", "def test_d2_get_neighborhood_small(self):\n config.NR_COLS = 3\n config.NR_ROWS = 3\n gamefield = [\n [1, 0, 0],\n [1, 0, 0],\n [0, 1, 1],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 3)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 2)\n self.assertEqual(nh, 4)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 2, 0)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 2, 2)\n self.assertEqual(nh, 3)\n # center\n nh = logic.get_neighborhood(gamefield, 1, 1)\n self.assertEqual(nh, 4)", "def create_graph(self):\n robot_pix = int(math.ceil(self.robot.size / self.resolution))\n ii = 0\n jj = 0\n for i in range(0, self.height, robot_pix):\n jj = 0\n for j in range(0, self.width, robot_pix):\n block = self.occ_grid[i:i+robot_pix, j:j+robot_pix].flatten()\n avg = np.mean(block)\n robot_block = self.tesselation_image[i:i+robot_pix, j:j+robot_pix].flatten()\n n_occur = np.bincount(robot_block)\n block_id = np.argmax(n_occur)\n \n p = Pose()\n p.position.x = self.resolution * j + self.resolution / 2.0 + self.origin.position.x\n p.position.y = self.height * self.resolution - (self.resolution * i + self.resolution / 2.0) + self.origin.position.y\n node = Node(ii, jj, p)\n idx = np.where(block > 20)\n if block_id == self.robot.robot_id:\n if 0 <= avg <= 20:\n print(\"Node in path\", node)\n node.valid = True\n else:\n node.valid = False\n elif block_id == 0:\n node.valid = False\n else:\n node.belongs = False\n self.nodes[ii,jj] = node\n jj += 1\n ii += 1\n\n\n height, width = self.nodes.shape\n print(\"Node shape: \", self.nodes.shape)\n for i in range(height):\n for j in range(width):\n min_i = max(0, i-1)\n max_i = min(height - 1, i+1) + 1\n min_j = max(0, j-1)\n max_j = min(width - 1, j+1) + 1\n\n node = self.nodes[i,j]\n neighbors = self.nodes[min_i:max_i, min_j:max_j].flatten()\n for n in neighbors:\n if not n or not node:\n print(\"None %d-%d\"%(i,j))\n continue\n if n != node:\n if n.valid:\n print(\"Neighbor appended\")\n self.nodes[i,j].neighbors.append(n)\n else:\n self.nodes[i,j].obstacle_neighbors.append(n)\n print(\"Graph is created!\")", "def test_d1_get_neighborhood(self):\n config.NR_COLS = 10\n config.NR_ROWS = 10\n gamefield = [\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 4)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 8)\n self.assertEqual(nh, 2)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 9, 1)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 9, 9)\n self.assertEqual(nh, 4)\n # center\n nh = logic.get_neighborhood(gamefield, 4, 5)\n self.assertEqual(nh, 3)", "def neighbourhood(self, node1, node2, t):\n raise NotImplementedError", "def draw_neighbor_counts(img_bgr, rafts_loc, num_of_rafts):\n points = rafts_loc\n vor = ScipyVoronoi(points)\n neighbor_counts = np.zeros(num_of_rafts, dtype=int)\n for raft_id in range(num_of_rafts):\n neighbor_counts[raft_id] = np.count_nonzero(vor.ridge_points.ravel() == raft_id)\n\n font_face = cv.FONT_ITALIC\n font_scale = 0.5\n font_color = (0, 165, 255) # BGR\n font_thickness = 1\n output_img = img_bgr\n for raft_id in np.arange(num_of_rafts):\n text_size, _ = cv.getTextSize(str(raft_id + 1), font_face, font_scale, font_thickness)\n output_img = cv.putText(output_img, str(neighbor_counts[raft_id]),\n (rafts_loc[raft_id, 0] + text_size[0] // 2, rafts_loc[raft_id, 1] + text_size[1]),\n font_face, font_scale, font_color, font_thickness, cv.LINE_AA)\n\n return output_img", "def show_nn(X):\n neigh = NearestNeighbors(n_neighbors=2)\n nbrs = neigh.fit(X)\n distances, indices = nbrs.kneighbors(X)\n distances = np.sort(distances, axis=0)\n distances = distances[:,1]\n plt.plot(distances)", "def addNeighbor(self, neighbor):", "def placeNodes(imgR):\n nodes = []\n N,M = np.shape(imgR)\n for i in range(N):\n for j in range(M):\n loc = (i,j)\n if imgR[loc] == 0.:\n if len(adjPaths(imgR,loc)) > 2:\n nodes.append(loc)\n return nodes", "def vision(image):\n vis_map = resize(image, alpha, beta)\n print(\"Resized map from the blue mask\")\n\n world = rotate(vis_map)\n\n plt.figure()\n plt.imshow(world[:, :, ::-1])\n plt.show()\n object_grid, occupancy_grid = detect_object(world)\n print(\"Result of the red mask\")\n plt.figure()\n plt.imshow(occupancy_grid)\n plt.show()\n return object_grid, occupancy_grid, world", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def show_image(self, idx):\n image, target = self.__getitem__(self, idx)\n im_h, im_w, _ = image.size()\n labels_num = target['labels']\n rescale = torch.tensor([[im_w, im_h, im_w, im_h]])\n bboxs = target['boxes'] * rescale\n img = image.permute(1, 2, 0).numpy()\n for i, bboxe in enumerate(bboxs):\n x, y, xm, ym = bboxe\n label = class_name[int(labels_num[i])]\n plot_one_box((int(x), int(y), int(xm), int(ym)), img, label=label, line_thickness=3)\n cv2.imshow('image', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)", "def print_neighbours(self, word=''):\n\n if word in self.index.keys():\n word_ind = self.index[word]\n for i in self.graph[word_ind]:\n print(self.words[i])\n print()\n else:\n print('Error - Not a valid word')", "def neighbors(self, x):\n pass", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def getneighbors(self):\r\n\t\ti=self.cell[0]\r\n\t\tj=self.cell[1]\r\n\t\t\r\n\t\tw = self.width-1\r\n\t\tCenter = self.base[i][j]\r\n\t\tif(self.type==\"Neumann\"):\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tself.surrounding = [North,South,East,West]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)\r\n\t\t\t\r\n\t\telif(self.type==\"Moore\"):\r\n\t\t\t\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\t\t\t\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][0]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][w]\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][0]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tself.surrounding = [North,South,East,West,NE,NW,SE,SW]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)+str(NE)+str(NW)+str(SE)+str(SW)", "def debug(self):\n neighbors = len(self.__neighbors)\n string = self.__repr__() + f' neighbors: {self.living_neighbors()}/{neighbors}'\n for neighbor in self.__neighbors:\n string += '\\n ' + neighbor.__repr__()\n print(string)", "def show_one(img):\n dpi = 40\n margin = 0.05\n nda = sitk.GetArrayFromImage(img)\n spacing = img.GetSpacing()\n extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0)\n figsize = (5, 5)\n fig = plt.figure(figsize=figsize, dpi=dpi)\n ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])\n\n plt.set_cmap(\"gray\")\n ax.imshow(nda, extent=extent, interpolation=None)", "def display(self): \n print ' ' \n print 'Connect ', NWIN, ' Board '\n print ' ' \n for r in reversed(range(self.getHeight())):\n for c in range(self.getWidth()):\n if self.cell[c][r] == BLACK:\n print '+',\n elif self.cell[c][r] == WHITE:\n print '-',\n else:\n print '.',\n print ' '\n for c in range(self.getWidth()):\n print c,\n print ' '\n print ' '", "def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()", "def get_neighbours(self):\n return []", "def neighbor_node(self, node):\n neighborhood_node = []\n for i in range(self.nodenum):\n if(self.Adjmatrix[node, i] == 1):\n neighborhood_node.append(i)\n \n return neighborhood_node", "def draw(self):\n nx.draw_networkx(self.rc)", "def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)", "def draw_nodes(self):\n pass", "def plot_networks(student, shape):\n plt.figure()\n s = np.arange(np.prod(shape))\n plt.figure()\n value = student.train_model.value(s).reshape(shape)\n plt.imshow(value)\n\n pi = student.train_model.proba_step(s).T.reshape((-1,) + shape)\n x, y = np.unravel_index(s, shape)\n\n for a in range(pi.shape[0]):\n if a == UP:\n u = np.zeros_like(s)\n v = pi[a].T.ravel()\n if a == DOWN:\n u = np.zeros_like(s)\n v = -pi[a].T.ravel()\n if a == RIGHT:\n v = np.zeros_like(s)\n u = pi[a].T.ravel()\n if a == LEFT:\n v = np.zeros_like(s)\n u = -pi[a].T.ravel()\n plt.quiver(x, y, u, v)", "def _neighbours_html(self):\n self._make_svg_script()\n\n ret = {\n 'rt_label': self.rt_label,\n 'uri': self.uri,\n 'uri_encoded': self.uri_encoded,\n 'label': self.label,\n 'nid': self.nid,\n 'gat': self.gat,\n 'rs_encoded': self.rs_encoded,\n 'rs_label': self.rs_label,\n 'sa': self.sa,\n 'ea': self.ea,\n 'script': self.script\n }\n\n return render_template(\n 'class_report.html',\n report=ret\n )" ]
[ "0.7047486", "0.6181761", "0.6136457", "0.61287487", "0.59386134", "0.59066224", "0.5844902", "0.5810311", "0.5801892", "0.57685393", "0.5745063", "0.5735822", "0.5733103", "0.57241136", "0.57115674", "0.56976724", "0.56679213", "0.56657785", "0.5641514", "0.56204975", "0.55908644", "0.5574266", "0.55739707", "0.5572492", "0.55564094", "0.55529624", "0.55504614", "0.55486983", "0.55485576", "0.5541845" ]
0.73047006
0
subpaths is a list of paths on tail nodes. return a new path generated by concatenating this edge. this is used in kbest paths generation.
def make_path(self, subpaths): assert len(self.tail) == len(subpaths), '%s' % self path = Path(self, subpaths) weight = self.hg.one for p in subpaths: if p is not None: weight = self.hg.prod(weight, p.weight) weight = self.hg.prod(weight, self.hg.w(self)) path.weight = weight return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decompose_paths_rec(node_inner, path):\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def toSubpathPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def get_subgraph_from_paths(self, paths):\n nodes, edges = graph_elements_from_paths(paths)\n subgraph = self.graph.subgraph(nodes).edge_subgraph(edges)\n return subgraph", "def subpath(self):\n return self._subpath()", "def sub_path(self) -> str:\n return self._sub_path", "def extract_path(self):\n if self.extracted_path is not None:\n return self.extracted_path\n current = self\n path = []\n while current:\n path.append([current.end, current.path_cost])\n current = current.parent\n return list(reversed(path))", "def acyclic_sub_path(tree, path):\n for u, v in pairwise(reversed(path)):\n if v in tree.nodes and u not in tree.nodes:\n return path[path.index(v):]", "def find_all_subpaths(all_paths):\r\n # Calculate length of the maximum path\r\n max_length = max(len(s) for s in all_paths)\r\n\r\n subpaths = set()\r\n for path in all_paths:\r\n for k in range(0, max_length + 1):\r\n for ii in range(0, len(path) - k + 1):\r\n subpaths.add(tuple(path[ii:ii + k]))\r\n subpaths = filter(None, subpaths)\r\n return list(subpaths)", "def get_paths_of_length_k(subpaths, k):\r\n subpaths_of_length_k = [i for i in subpaths if len(\r\n i) == k] # all k-length subpaths\r\n subpaths = [i for i in subpaths if len(i) != k] # remove k-length subpaths\r\n return subpaths_of_length_k, subpaths", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "def extend_path(self, end, path_cost):\n return self.combine(Action(end, path_cost))", "def sub_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sub_path\")", "def appendPath(paths: List[unicode]) -> unicode:\n ...", "def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def OptimalSubsequenceWarpingPath( self ):\n subseqCandidates = []\n subseqCosts = []\n\n lastRow = list(self.D[-1])\n bStar = lastRow.index( min(lastRow) )\n while lastRow[bStar] < self.maxPathLen or len(subseqCosts) == 0:\n # find aStar with minimum distance for subsequences ending at bStar\n P, cost = self.OptimalWarpingPath( bStar )\n subseqCandidates.append( P )\n subseqCosts.append( cost )\n lastRow[bStar] = float(\"inf\")\n bStar = lastRow.index( min(lastRow) ) \n minCost = min(subseqCosts)\n return subseqCandidates[ subseqCosts.index( minCost ) ], minCost", "def get_path_ends(self):\n\n end1, end2 = self.get_end_vertices()\n\n return Path(end1), Path(end2)", "def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def save_subpath(self, index, result_path='', subPath=''):\n pass", "def build_path(\n G: nx.Graph,\n node: int,\n endpoints: List[int],\n path: List[int]) -> List[int]:\n\n # For each successor in the passed-in node\n for successor in G.successors(node):\n if successor not in path:\n # If successor is already in path, ignore it, otherwise add to path\n path.append(successor)\n\n if successor not in endpoints:\n # If successor not endpoint, recursively call\n # build_path until endpoint found\n path = build_path(G, successor, endpoints, path)\n\n else:\n # If successor is endpoint, path is completed, so return\n return path\n\n if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):\n # If end of the path is not actually an endpoint and the path's\n # first node is a successor of the path's final node, then this is\n # actually a self loop, so add path's first node to end of path to\n # close it\n path.append(path[0])\n\n return path", "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def cross(subpaths, j, k):\r\n for q in range(j, k):\r\n subpaths[q].direct_close()\r\n subpaths[q].reverse()\r\n subpaths[j:k] = subpaths[j:k][::-1]", "def edge_subgraph(self, edges, relabel_nodes=False, output_device=None):\n raise NotImplementedError(\"edge_subgraph is not implemented yet\")", "def interjoint_paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n subpaths = self._single_tree_interjoint_paths(\n tree, return_indices=return_indices\n )\n paths.extend(subpaths)\n\n return paths", "def join(self, path, *paths):", "def add_path(self, path):\n\n for i in range(1, len(path)):\n self.add_edge(path[i], path[i - 1])", "def sub_link_capacity(self, path, bw):\n \n # PART 1, TASK 3.4 sub bw to edges", "def subgraph(self, nodes, relabel_nodes=False, output_device=None):\n raise NotImplementedError(\"subgraph is not implemented yet\")", "def multi_join(paths, *path_segments):\n return [os.path.join(*(path_segments + (path,))) for path in paths]", "def compute_longest_syntactic_path(self, add_cut):\n max_cost, maxpath_node_uids, subgraph_node_uids, subgraph_edge_uids = self._compute_longest_path_cut(self._start_uid, self._end_uid)\n cut_uid = Cut.get_cut_uid(self._start_uid, self._end_uid)\n if add_cut and cut_uid not in self._cuts.keys():\n c = Cut(self._start_uid, self._end_uid, max_cost, subgraph_node_uids, subgraph_edge_uids, self)\n self._cuts[c.get_uid()] = c\n return max_cost, maxpath_node_uids, subgraph_node_uids, subgraph_edge_uids" ]
[ "0.59228057", "0.5917371", "0.5893389", "0.5885784", "0.58473676", "0.57791805", "0.5497774", "0.54971564", "0.5475109", "0.5418675", "0.5359241", "0.5319792", "0.5247005", "0.51880467", "0.51860625", "0.516922", "0.51333255", "0.5104041", "0.5086994", "0.5030389", "0.5015719", "0.4999733", "0.49690285", "0.49557117", "0.49517438", "0.49373305", "0.49032763", "0.48745564", "0.483924", "0.4820145" ]
0.65290594
0
top down topo sort. nodes that don't reach the target node are thrown away
def topo_sort(self): # TODO: detect cycles self.find_reachable_nodes() # save list of nodes in topo order self.nodes = [] # assign each node an id field incrementally cur_id = 0 # count visited outgoing edges for each node unvisited = {} for nid, node in list(self.found.items()): unvisited[nid] = node.nout queue = [self.root] #print >>sys.stderr, '+++' while queue: # take off nodes whose all outgoing edges are visited from # queue head node = queue.pop(0) self.nodes.append(node) node.hg = self node.id = cur_id cur_id += 1 for edge in node.incoming: edge.hg = self for tailnode in edge.tail: #print >>sys.stderr, tailnode unvisited[id(tailnode)] -= 1 if unvisited[id(tailnode)] == 0: queue.append(tailnode) self.sanity_check() self.tasks_done.add('topo_sort')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reset_topological_order(self):\n self._topological_order = self._input_nodes[:]\n self.sorted = False", "def topologicalSort(self):\r\n visited = [False]*self.vertices \r\n stack =[]\r\n \"\"\"\r\n using stack, problems with using code given by\r\n professor (using queues) so I'm using a new approach\r\n \"\"\"\r\n for i in range(self.vertices):\r\n \"\"\"\r\n traversing thru number of vertices, checking\r\n if false, and if is, goes to helper method\r\n \"\"\"\r\n if visited[i] == False: \r\n self.topologicalSortUtil(i,visited,stack) \r\n \r\n\r\n print(stack)", "def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True", "def sort(self):\n srt = self.sources()\n stack = list(srt) # makes a copy\n while stack:\n node = stack.pop(0)\n if (not node.isSink()):\n # if a child is not in srt, and all of its parents are in srt,\n # then add it. Must have all parents to get true topo sort.\n newChildren = filter(lambda x: len(set(x.parents()) - set(srt))==0,\n [child for child in node.children() if child not in srt])\n stack.extend(newChildren)\n srt.extend(newChildren)\n return srt", "def topological_sort(self):\n\t\t#detect leaves\n\t\tnumChildren = dict((n.name,0) for n in self.variables.values())\n\t\tfor n in self.variables.itervalues():\n\t\t\tfor p in n.parents:\n\t\t\t numChildren[p]+=1\n\t\t#do a BFS from leaves to get the reverse topological sort\n\t\ttopo = []\n\t\tqueue = [n for (n,c) in numChildren.iteritems() if c==0]\n\t\tif len(queue)==0:\n\t\t\traise ValueError(\"Bayes net is not acyclic?\")\n\t\twhile len(queue)>0:\n\t\t\tn = self.variables[queue.pop(0)]\n\t\t\ttopo.append(n)\n\t\t\tfor p in n.parents:\n assert numChildren[p]>0\n numChildren[p] -= 1\n if numChildren[p]==0:\n queue.append(p)\n\t\t#now reverse it to get the top down ordering\n assert len(topo)==len(self.variables)\n\t\treturn reversed(topo)", "def test_0(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r2)\r\n o2 = MyOp.make_node(o.outputs[0], r5)\r\n\r\n all = general_toposort(o2.outputs, prenode)\r\n assert all == [r5, r2, r1, o, o.outputs[0], o2, o2.outputs[0]]\r\n\r\n all = io_toposort([r5], o2.outputs)\r\n assert all == [o, o2]", "def topological_sort(graph, rootKey = None):\n\n\t# Reset's the attribute values of all Nodes in graph to their initialization values.\n\t# Importantly, resets Node.searchStatus to \"undiscovered\" and Node.parent to None.\n\tgraph.reset()\n\n\ttopologicalKeyList = []\n\n\t# time is declared inside a function and so must be made global.\n\tglobal time; time = 0\n\n\t# If a starting root is specified, begin there.\n\tif rootKey is not None:\n\t\ttopological_sort_visit(graph, rootKey, topologicalKeyList)\n\n\t# Visit each undiscovered Node.\n\n\t# The keys are ordered here to enforce an easily predictable traversal.\n\t# This is not necessary and reduces efficiency, but makes testing very straightforward. \n\t# For the purposes of this program this loss in efficiency is acceptable.\n\torderedKeys = list(graph.adjacencyMap.keys()); orderedKeys.sort()\n\tfor key in orderedKeys:\n\t\tif graph.vertexMap[key].searchStatus == \"undiscovered\":\n\t\t\ttopological_sort_visit(graph, key, topologicalKeyList)\n\n\t# Explored and created a forest within graph.\n\treturn topologicalKeyList", "def topological_sort(g) -> list:\n dfs(g)\n res = [v for v in g]\n quick_sort(res, key=lambda v: v.finish_time)\n res.reverse()\n return res", "def _toposort(edges):\r\n incoming_edges = reverse_dict(edges)\r\n incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())\r\n S = set((v for v in edges if v not in incoming_edges))\r\n L = []\r\n\r\n while S:\r\n n = S.pop()\r\n L.append(n)\r\n for m in edges.get(n, ()):\r\n assert n in incoming_edges[m]\r\n incoming_edges[m].remove(n)\r\n if not incoming_edges[m]:\r\n S.add(m)\r\n if any(incoming_edges.get(v, None) for v in edges):\r\n raise ValueError(\"Input has cycles\")\r\n return L", "def _topological_sort(self):\n\n visited = defaultdict(bool)\n stack = []\n\n for pod in self.pods:\n if not visited[pod]:\n self._topological_sort_pod(pod, visited, stack)\n\n return stack[::-1]", "def sorting(self, presorted=None):\n self._sorted_nodes = []\n if presorted:\n notsorted_nodes = copy(presorted)\n else:\n notsorted_nodes = copy(self.nodes)\n predecessors = {key: copy(val) for (key, val) in self.predecessors.items()}\n\n # nodes that depends only on the self._nodes_wip should go first\n # soe remove them from the connections\n for nd_out in self._node_wip:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)\n\n while notsorted_nodes:\n sorted_part, notsorted_nodes = self._sorting(notsorted_nodes, predecessors)\n self._sorted_nodes += sorted_part\n for nd_out in sorted_part:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)", "def test_1(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r1)\r\n o2 = MyOp.make_node(o.outputs[0], r5)\r\n all = general_toposort(o2.outputs, prenode)\r\n assert all == [r5, r1, o, o.outputs[0], o2, o2.outputs[0]]", "def binarize_top_down(self, ignored_elements=frozenset()):\n\n fifo = collections.deque((self.top,))\n is_seen = {self.top}\n\n while fifo:\n # print(\"\\n\", fifo)\n vertex = fifo.pop()\n # print(\"element \", vertex)\n if len(self.under(vertex)) > 2 and vertex not in ignored_elements:\n self.binarization_element_under(vertex)\n visit_list = self.under(vertex)\n for neighbor in visit_list:\n if neighbor not in is_seen:\n is_seen.add(neighbor)\n fifo.appendleft(neighbor)\n\n return self", "def fast_targeted_order(ugraph):\n\n graph = copy_graph(ugraph)\n# print graph\n size = len(ugraph) #- 1\n degree_sets = [set() for _ in range(size)]\n order = []\n\n for node, edges in ugraph.iteritems():\n degree = len(edges)\n degree_sets[degree].add(node)\n# print degree_sets\n\n for k in range(size - 1, -1, -1):\n while degree_sets[k]:\n node = degree_sets[k].pop()\n neighbors = graph[node]\n for neighbor in neighbors:\n degree = len(graph[neighbor])\n degree_sets[degree].remove(neighbor)\n degree_sets[degree - 1].add(neighbor)\n order.append(node)\n delete_node(graph, node)\n return order", "def topological_sort(self):\n \n visited = set()\n sorted_node = [] \n\n # sort all the node in the graph\n for i in self.node_set: \n if i not in visited: \n visited = self.topological_sort_helper(i, visited, sorted_node) \n \n visited.clear()\n return sorted_node", "def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order", "def toposort(nodes, get_next_nodes):\n traversing = set()\n traversed = set()\n result = collections.deque()\n\n def traverse(node):\n if node in traversing:\n raise CycleError(node)\n if node in traversed:\n return # not a cycle but we already saw this\n traversing.add(node)\n for next in get_next_nodes(node):\n traverse(next)\n traversed.add(node)\n traversing.remove(node)\n result.appendleft(node)\n\n for node in nodes:\n traverse(node)\n\n return list(result)", "def fast_target_order(ugraph):\n result_order = []\n num_nodes = len(ugraph.keys())\n degrees = [set([]) for _ in range(num_nodes)]\n for node in ugraph.keys():\n node_degree = len(ugraph[node])\n degrees[node_degree].add(node)\n for degree in range(num_nodes - 1, -1, -1):\n while degrees[degree] != set([]):\n elem = degrees[degree].pop()\n for neighbor in ugraph[elem]:\n n_degree = len(ugraph[neighbor])\n degrees[n_degree].remove(neighbor)\n degrees[n_degree - 1].add(neighbor)\n result_order.append(elem)\n delete_node(ugraph, elem)\n return result_order", "def test_sort(self):\n a, b, c, d = Node('a'), Node('b'), Node('c'), Node('d')\n a | b | c\n a * 'foo' | 'bar' * c\n d | 'baz' * b\n nodes = topo_sort([a, d])\n self.assertEqual(set(nodes[:2]), set([a, d]))\n self.assertEqual(nodes[2:], [b, c])", "def topSort(self, x=None, seen=None, order=deque([]), cycle=False):\n\n if x is None:\n for x in self.getVertices(): # choose an arbitrary vertex\n break\n\n if seen is None:\n seen = {vertex: False for vertex in self.getVertices()}\n\n seen[x] = True\n\n for y, weight in self.outboundEdges(x):\n if seen[y]:\n cycle = True\n return False\n\n order = self.topSort(y, seen, order, cycle)\n\n if order == False:\n cycle = True\n return False\n\n\n order.appendleft(x)\n return order\n\n # print(\"%i \" % x, end='')", "def sort_nodes(self):\n non_terminal_nodes = []\n for node in self.nodes:\n if not node.entries:\n assert self.start is None, (\n 'there are more than one node with no incoming arcs')\n self.start = node\n elif not node.exits:\n assert self.end is None, (\n 'there are more than one node with no outgoing arcs')\n self.end = node\n else:\n non_terminal_nodes.append(node)\n assert self.start is not None and self.end is not None, (\n 'no start or end node')\n self.nodes = ([self.start]\n + sorted(non_terminal_nodes,\n key=lambda x: (x.entry, x.sym))\n + [self.end])\n for n in self.nodes:\n n.exits.sort(key=lambda x: (x.dest.entry, x.dest.sym))", "def test_topological_sort(self) -> None:\n\n # tuple convention: (outgoing, incoming)\n graph = {\n \"1\": ([\"4\"], []),\n \"2\": ([\"4\"], []),\n \"3\": ([\"5\", \"6\"], []),\n \"4\": ([\"7\", \"5\"], [\"1\", \"2\"]),\n \"5\": ([\"8\"], [\"4\", \"3\"]),\n \"6\": ([], [\"3\"]),\n \"7\": ([\"8\"], [\"4\"]),\n \"8\": ([], [\"7\", \"5\"])\n } # type: Dict[str, Tuple[List[str], List[str]]]\n\n self.assertEqual(topological_sort(graph, [\"1\", \"2\", \"3\"]), [\"1\", \"2\", \"3\", \"4\", \"6\", \"7\", \"5\", \"8\"])", "def test_topological_sort(self) -> None:\n\n # tuple convention: (outgoing, incoming)\n graph = {\n \"1\": ([\"4\"], []),\n \"2\": ([\"4\"], []),\n \"3\": ([\"5\", \"6\"], []),\n \"4\": ([\"7\", \"5\"], [\"1\", \"2\"]),\n \"5\": ([\"8\"], [\"4\", \"3\"]),\n \"6\": ([], [\"3\"]),\n \"7\": ([\"8\"], [\"4\"]),\n \"8\": ([], [\"7\", \"5\"])\n } # type: Dict[str, Tuple[List[str], List[str]]]\n\n self.assertEqual(topological_sort(graph, [\"1\", \"2\", \"3\"]), [\"1\", \"2\", \"3\", \"4\", \"6\", \"7\", \"5\", \"8\"])", "def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n \n counter = 0\n order = [] \n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n counter += 1\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n \n \n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n counter += 1\n new_graph[neighbor].remove(max_degree_node)\n\n order.append(max_degree_node)\n return order # uncomment to use graph_resilience_targeted_order", "def fast_targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n \n degree_sets = {} # degree_set[k] is a set of all nodes whose degree is k\n for k in range(len(new_graph)):\n degree_sets[k] = set([])\n \n for i in new_graph:\n d = len(new_graph[i])\n degree_sets[d].add(i)\n\n attack_order =[]\n n = len(new_graph)\n for k in reversed(range(n-1)):\n while len(degree_sets[k]):\n u = random.choice(tuple(degree_sets[k])) # u is an arbitrary element in degree-set[k]\n degree_sets[k].remove(u)\n for v in new_graph[u]: # v is the neighbor of u\n d = len(new_graph[v])\n degree_sets[d].remove(v)\n degree_sets[d-1].add(v)\n\n attack_order.append(u)\n delete_node(new_graph, u)\n \n return attack_order", "def dependency_order(self):\n seen = set()\n\n def _prune_visited(node):\n if node in seen:\n return True\n seen.add(node)\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_prune_visited):\n yield node.data", "def toposort(adj):\n # Memoize for visited vertex\n used = [0] * len(adj)\n order = []\n # write your code here\n # Traverse through each vertex\n for i in range(len(adj)):\n if not used[i]:\n # If not visited, run dfs\n dfs(adj, used, order, i)\n\n # Reverse the order list to show in descending order\n order.reverse()\n return order", "def sort(self):\n while self.nodes != []:\n iterated = False\n for node in self.leaf_nodes():\n iterated = True\n self.prune_node(node)\n yield node\n if not iterated:\n raise CyclicGraphError(\"Sorting has found a cyclic graph.\")", "def topological_sort(self, verbose=True):\n visited = set()\n stack = []\n rec_stack = set()\n for vertex in self.vertices():\n if vertex not in visited:\n if self._topological_sort(vertex, visited, stack, rec_stack):\n print('ERROR: Graph is cyclic! Cannot perform Topological Sort.')\n return None\n if verbose:\n print('TopologicalSort(Graph):', stack)\n return stack", "def sort(self):\r\n return self.sort_targets([self])" ]
[ "0.6479977", "0.64079857", "0.63420755", "0.63285846", "0.62891173", "0.62466025", "0.6230972", "0.6165624", "0.61602473", "0.6127933", "0.6103531", "0.6076643", "0.60480684", "0.60202795", "0.5990248", "0.5957457", "0.59298575", "0.59276503", "0.592397", "0.59147286", "0.5845383", "0.58326817", "0.58326817", "0.58308834", "0.5830487", "0.5823839", "0.58169216", "0.581677", "0.5813266", "0.58003336" ]
0.7133982
0
Read a file and return a toposorted hypergraph.
def deserialize(self, filename): f = open(filename) edges_tails = [] nodes = [] # first pass adds incoming edges to nodes for line in f: if '->' in line: # edge edge = self.edge_class() tail_ids, head_id = edge.deserialize(line) nodes[head_id].add_incoming(edge) edges_tails.append((edge, tail_ids)) else: # node node = self.node_class() node.deserialize(line) assert node.id == len(nodes), 'nodes shall appear in order' nodes.append(node) # second pass adds tail nodes to edges for edge, tail_ids in edges_tails: for nid in tail_ids: edge.add_tail(nodes[nid]) f.close() # make a toposorted hypergraph hg = Hypergraph(nodes[0]) hg.nodes = nodes for node in hg: node.hg = hg for edge in hg.edges(): edge.hg = hg hg.tasks_done.add('topo_sort') return hg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_graph(filename):\n G = Hypergraph()\n\n f = open(filename, 'r', encoding='utf8')\n lines = f.readlines()\n if args.weighted:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n weight = line[1]\n G.add_edge(edge_name, line[2:], float(weight))\n else:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n G.add_edge(edge_name, line[1:])\n f.close()\n return G", "def read_graph(file_name):\r\n with open(file_name, 'r') as f:\r\n lines = f.readlines()\r\n first_line = lines[0].strip().split()\r\n no_vertices = int(first_line[0])\r\n new_graph = UndirectedGraph(no_vertices)\r\n for line in lines[1:]:\r\n if line == \"\":\r\n continue\r\n line = line.strip().split()\r\n _from, _to, _cost = int(line[0]), int(line[1]), int(line[2])\r\n new_graph.add_edge(_from, _to, _cost)\r\n return new_graph", "def read_file(path):\n\tG = nx.Graph()\n\n\twith open(path, 'r') as in_file:\n\t\tfor line in in_file:\n\t\t\tcontents = line.split(\" \")\n\t\t\tu = int(contents[0])\n\t\t\tv = int(contents[1])\n\t\t\tstreet_type = int(contents[2])\n\t\t\ttime = int(contents[3])\n\t\t\tlength = int(contents[4])\n\t\t\tcost = 1/float(length)\n\t\t\t\n\t\t\tG.add_node(u)\n\t\t\tG.add_node(v)\n\t\t\tif street_type is 1:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\telse:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\t\tG.add_edge(v, u, street_type=street_type, time=time, length=length, cost=cost)\n\n\treturn G", "def read_graph(filename):\n with open(filename) as f:\n g = eval(f.read())\n return g", "def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')", "def read_graph(filename):\n return nx.read_edgelist(filename, create_using=nx.DiGraph(), nodetype=str)", "def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')", "def read_graph(filename):\n with open(filename, 'r') as file: # open the file\n # read the number of nodes and number of edges\n num_nodes, num_edges = DataIO.__preprocess_line(file.readline())\n graph = GraphProcessing.construct_null_graph(num_nodes) # construct a null graph\n for line in file.readlines(): # for every line in the file\n preprocessed_line = DataIO.__preprocess_line(line) # preprocess the line\n if preprocessed_line: # if the preprocessed line is not a null string\n # read the first and second node and the edge weight\n source_node, terminal_node, weight = preprocessed_line\n graph[source_node][terminal_node] = weight\n graph[terminal_node][source_node] = weight\n return graph # return the final graph", "def read_graph(filename, directed=True):\n if not directed:\n G = nx.Graph()\n else:\n G = nx.DiGraph()\n with open(filename) as f:\n for line in f:\n d = line.split()\n G.add_edge(int(d[0]), int(d[1]))\n print('Read Graph')\n return G", "def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)", "def read_input_from_file(f):\n f.readline()\n size = int(f.readline().split()[-1])\n nb_edges = int(f.readline().split()[-1])\n\n g = UndirectedGraph()\n\n if parameters.DEBUG:\n print('Build nodes')\n\n nodes = [g.add_node() for _ in range(size)]\n\n if parameters.DEBUG:\n print('Build edges')\n edges = []\n weights = {}\n i = 0\n for i in range(nb_edges):\n if parameters.DEBUG:\n i += 1\n if i % 1000 == 0:\n print('Edge %d / %d' % (i, nb_edges))\n line = f.readline()\n _, u, v, w = line.split()\n\n e = g.add_edge(nodes[int(u) - 1], nodes[int(v) - 1])\n weights[e] = int(w)\n\n edges.append((int(u), int(v), int(w)))\n\n line = f.readline()\n while 'Terminals' not in line:\n line = f.readline()\n if 'SECTION' in line:\n line = f.readline()\n while 'Terminals' not in line:\n line = f.readline()\n nb_terms = int(line.split()[-1])\n terms = []\n for i in range(nb_terms):\n line = f.readline()\n _, t = line.split()\n terms.append(nodes[int(t) - 1])\n\n return instances.SteinerInstance(g, terms, weights)", "def file_parse():\n\n\tfilename = input(\"Enter the file path for your graph: \")\n\ttarget = open(filename, 'r')\n\n\ttarget_lines = [] \t# List of lines from target file\n\t\n\t# Grab the graph count and node/edge count for the first graph\n\ti = 0\n\tfor line in target:\n\t\tif i == 0:\n\t\t\tgraph_count = int(line)\n\t\telif i == 1:\n\t\t\tnode_count = int(line)\n\t\telif i == 2:\n\t\t\tedge_count = int(line)\n\t\telse:\t\n\t\t\ttarget_lines.append(line.strip('\\n'))\n\t\ti += 1\n\n\treturn graph_create(target_lines, graph_count, node_count, edge_count)", "def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!", "def loadDataZachary(fileName):\n\n \"Initialize a graph\"\n G = nx.Graph()\n\n \"Open file\"\n f = open(fileName)\n\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n while line:\n if(line[0]!=\"%\"):\n ls =line.split(' ')\n num,nums=int(ls[0]),int(ls[1])\n G.add_edge(num,nums)\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n\n \"Closing the file\"\n f.close()\n\n return G, 'Zachary'", "def build_graph(file_name):\n graph = MyGraph()\n with open(file_name, 'r') as fin:\n line = fin.readline().replace('\\n', '')\n while line != \"\":\n vals = line.split(':')\n graph.add_node(vals[0], pos=(int(vals[1]),int(vals[2])))\n line = fin.readline().replace('\\n', '')\n dest = fin.readline().replace('\\n','').split('\\t')\n line = fin.readline().replace('\\n', '')\n edges = []\n while line != '':\n node_info = line.split('\\t')\n src = node_info[0]\n for node in range(1,len(node_info)):\n if node_info[node] != '':\n if (dest[node],src) not in edges:\n edges.append((src,dest[node], node_info[node]))\n line = fin.readline().replace('\\n','')\n for edge in edges:\n graph.add_edge(edge[0], edge[1], weight=int(edge[2]))\n\n return graph", "def read_graph_file(filename):\n nodes, edges = [], []\n with open(filename) as f1:\n numNodes = int(f1.readline())\n numEdges = int(f1.readline())\n nodes = np.zeros([numNodes,3], dtype=\"float32\")\n edges = np.zeros([numEdges,2], dtype=\"int32\")\n nodeCount = 0\n edgeCount = 0\n for line in f1:\n parts = line.split(\" \")\n if len(parts) == 4:\n # node line\n nodes[nodeCount] = (float(parts[0]), float(parts[1]), int(parts[3])) \n nodeCount += 1\n elif len(parts) == 3:\n # edge line\n edges[edgeCount] = (int(parts[0]), int(parts[1])) \n edgeCount += 1\n return nodes, edges", "def build_graph(filepath):\n graph = defaultdict(list)\n with open(filepath, 'r') as file:\n for edge in file:\n head, tail = edge.split()\n graph[head].append(tail)\n return graph", "def load_graph(file_name, directed=True):\n G = nx.DiGraph() if directed else nx.Graph()\n with open(file_name, \"r\") as f:\n for line in f:\n tokens = line.split()\n u = int(tokens[0])\n v = int(tokens[1])\n if len(tokens) > 2:\n w = float(tokens[2])\n G.add_edge(u, v, weight=w)\n else:\n G.add_edge(u,v)\n return G", "def from_file(cls, filename: str, directed = False):\n with open(filename) as fh:\n vertnum = int(fh.readline().strip())\n int(fh.readline().strip())\n graph = Graph(vertnum, directed)\n\n for line in fh:\n numstr = line.split()\n v1 = int(numstr[0])\n v2 = int(numstr[1])\n graph.add_edge(v1, v2)\n\n return graph", "def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph", "def read_file():\n\tgraph = {}\n\twith open('data/SCC.txt', 'r') as f:\n\t\told_index = '1'\n\t\tadjacency_list = []\n\t\tfor line in f:\n\t\t\tdata = line.split()\n\t\t\tnew_index = data[0]\n\t\t\tif old_index != new_index:\n\t\t\t\tgraph[old_index] = {'adj_nodes': adjacency_list, 'is_explored': False}\n\t\t\t\told_index = new_index\n\t\t\t\tadjacency_list = []\n\t\t\tadjacency_list.append(data[1])\n\t\tgraph[old_index] = {'adj_nodes': adjacency_list, 'is_explored': False}\n\n\tfor i in range(1, NUM_VERT + 1):\n\t\tif graph.get(str(i), False) is False:\n\t\t\tgraph[str(i)] = {'adj_nodes': [], 'is_explored': False}\n\treturn graph", "def create_graph(path):\n f = open(path, 'r')\n g = nx.DiGraph()\n\n # Velikost mnozice\n n = int(f.readline().split(\" \")[0])\n\n # Dodamo vsa vozlisca v graf\n for i in range(n):\n g.add_node(i+1)\n\n # Gremo cez vse primerjave in dodamo povezave v graf\n for line in f:\n u, v = line.strip().split(\" \")\n u, v = int(u), int(v)\n g.add_edge(u, v)\n\n return g", "def get_graph(path: str) -> nx.Graph:\n with open(path, 'r') as f:\n list_of_edges = [line.strip().split() for line in f.readlines()]\n g = nx.Graph()\n g.add_edges_from(list_of_edges)\n return g", "def read_graph(filename):\n\n print(\"\\n\\n========== Loading graph: \" + filename + '==================')\n edges = {}\n\n inFile = open(filename)\n for line in inFile:\n roadInfo = line.split()\n\n # Skip blank lines, read in contents from non-empty lines.\n if (len(roadInfo) > 0):\n srcCity = roadInfo[0]\n destCity = roadInfo[1]\n\n if srcCity in edges:\n edges[srcCity] = edges[srcCity] + [destCity]\n else:\n edges[srcCity] = [destCity]\n\n if destCity in edges:\n edges[destCity] = edges[destCity] + [srcCity]\n else:\n edges[destCity] = [srcCity]\n\n print(\" done.\\n\")\n return edges", "def ParseGraph(filename):\n vertices = []\n edges = set([])\n\n for l in open(filename):\n fields = [int(f) for f in l.split()]\n vertex = fields.pop(0)\n incident = [tuple(sorted([vertex, f])) for f in fields]\n vertices.append(vertex)\n edges.update(incident)\n\n return vertices, list(edges)", "def read(self):\n\t\tentities = dict()\n\t\trelations = set()\n\t\tedges = set()\n\t\twith open(self.file_path, encoding=\"utf-8\") as f:\n\t\t\tfor line in tqdm(f):\n\t\t\t\tif(self.prob == 1.0 or random() < self.prob):\n\t\t\t\t\tsource, relation, target, _ = line.split(\" \", 3)\n\t\t\t\t\tis_dataprop = target.startswith('\"')\n\t\t\t\t\tif source not in entities:\n\t\t\t\t\t\tentities[source] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\tentities[source][\"out_degree\"] += 1\n\t\t\t\t\tentities[source][\"degree\"] += 1\n\t\t\t\t\tif not is_dataprop:\n\t\t\t\t\t\tif target not in entities:\n\t\t\t\t\t\t\tentities[target] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\t\tentities[target][\"in_degree\"] += 1\n\t\t\t\t\t\tentities[target][\"degree\"] += 1\n\t\t\t\t\t\trelations.add(relation)\n\t\t\t\t\t\tedges.add((relation, source, target))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(self.include_dataprop):\n\t\t\t\t\t\t\tentities[source][\"data_properties\"][relation] = target\n\n\t\treturn (entities, relations, edges)", "def read_graph_g2o(filename):\n Edge = namedtuple(\n 'Edge', ['Type', 'fromNode', 'toNode', 'measurement', 'information'])\n edges = []\n nodes = {}\n with open(filename, 'r') as file:\n for line in file:\n data = line.split()\n\n if data[0] == 'VERTEX_SE2':\n nodeId = int(data[1])\n pose = np.array(data[2:5], dtype=np.float32)\n nodes[nodeId] = pose\n\n elif data[0] == 'VERTEX_XY':\n nodeId = int(data[1])\n loc = np.array(data[2:4], dtype=np.float32)\n nodes[nodeId] = loc\n\n elif data[0] == 'EDGE_SE2':\n Type = 'P'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:6], dtype=np.float32)\n uppertri = np.array(data[6:12], dtype=np.float32)\n information = np.array(\n [[uppertri[0], uppertri[1], uppertri[2]],\n [uppertri[1], uppertri[3], uppertri[4]],\n [uppertri[2], uppertri[4], uppertri[5]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n elif data[0] == 'EDGE_SE2_XY':\n Type = 'L'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:5], dtype=np.float32)\n uppertri = np.array(data[5:8], dtype=np.float32)\n information = np.array([[uppertri[0], uppertri[1]],\n [uppertri[1], uppertri[2]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n else:\n print('VERTEX/EDGE type not defined')\n\n # compute state vector and lookup table\n lut = {}\n x = []\n offset = 0\n for nodeId in nodes:\n lut.update({nodeId: offset})\n offset = offset + len(nodes[nodeId])\n x.append(nodes[nodeId])\n x = np.concatenate(x, axis=0)\n\n # collect nodes, edges and lookup in graph structure\n graph = Graph(x, nodes, edges, lut)\n print('Loaded graph with {} nodes and {} edges'.format(\n len(graph.nodes), len(graph.edges)))\n\n return graph", "def load_graph(path, n):\n\n f = open(path)\n g = nx.Graph()\n\n for line in f:\n parts = line.split()\n node1 = int(parts[0])\n node2 = int(parts[1])\n\n if node1 >= n:\n break\n\n if node1 <= n and node2 <= n:\n g.add_edge(node1, node2)\n\n f.close()\n return g", "def make_graph_from_file(filename):\n file = open(filename, \"r\")\n lines = file.readlines()\n file.close()\n\n # Check if it is a graph or digraph\n graph_or_digraph_str = lines[0].strip() if len(lines) > 0 else None\n if graph_or_digraph_str != \"G\" and graph_or_digraph_str != \"D\":\n raise Exception(\"File must start with G or D.\")\n is_bidirectional = graph_or_digraph_str == \"G\"\n\n g = Graph()\n\n # Add all vertices\n for vertex_key in lines[1].strip(\"() \\n\").split(\",\"):\n g.add_vertex(vertex_key)\n\n # Add all edges\n for line in lines[2:]:\n # Split components of edge\n new_edge = line.strip(\"() \\n\").split(\",\")\n if len(new_edge) < 2 or len(new_edge) > 3:\n raise Exception(\"Lines adding edges must include 2 or 3 values\")\n\n # Get vertices\n vertex1, vertex2 = new_edge[:2]\n\n # Get weight if it exists\n weight = int(new_edge[2]) if len(new_edge) == 3 else None\n\n # Add edge(s)\n g.add_edge(vertex1, vertex2, weight)\n if is_bidirectional:\n g.add_edge(vertex2, vertex1, weight)\n\n return g\n # Check if first line is 'G' or 'D' and store the value. If neither, raise an exception\n # For each vertex id in first line, add a vertex to the graph\n # For each of the following lines:\n # Extract the vertex ids and the (optional) weight, and add an edge to the graph\n # If it is a Graph and not a Digraph, add another edge in the opposite direction\n # Raise an exception if line contains too many (or too few) item\n raise Exception(f\"File must begin with G or D, found {firstline}\")", "def parse_graphml_file(filename: str, digraph=True):\n graphml_graph = nx.read_graphml(filename)\n if digraph:\n graphml_graph = graphml_graph.to_directed()\n\n return graphml_graph" ]
[ "0.7323825", "0.6937586", "0.68380326", "0.67994225", "0.67790306", "0.6770359", "0.6753334", "0.669916", "0.6612476", "0.65449065", "0.6532908", "0.6516059", "0.64703196", "0.64033914", "0.63891566", "0.6366815", "0.63625896", "0.634472", "0.6317197", "0.6311096", "0.62962216", "0.6286445", "0.6216713", "0.6215092", "0.6173808", "0.61655265", "0.6153044", "0.614722", "0.61371505", "0.5982137" ]
0.7031254
1
Standard backtracking approach to find the optimal opmesh assignment, starting with the optimal number of stages (best_n_stages). The return is a list [((layer_start, next_layer_start), submesh_shape_idx, sharding_config_idx)] where (layer_start, next_layer_start) is [) slice of the ops and submesh_shape_idx is the submesh those ops should be mapped to (sharding_config_idx is currently always 1 but will be eventually used pick optimal tensor sharding configuration).
def get_optimal_submesh_assignments( best_n_stages, F_argmin, n_devices, n_ops, submesh_sizes ): current_s = best_n_stages current_layer = 0 current_devices = n_devices optimal_layer_submesh_assignments = [] while current_s > 0 and current_layer < n_ops and current_devices > 0: next_start_layer, submesh_shape_idx, sharding_config_idx = F_argmin[ current_s, current_layer, current_devices ] assert next_start_layer != -1 and current_devices != -1 optimal_layer_submesh_assignments.append( ((current_layer, next_start_layer), submesh_shape_idx, sharding_config_idx) ) current_s -= 1 current_layer = next_start_layer current_devices -= submesh_sizes[submesh_shape_idx] assert current_s == 0 and current_layer == n_ops and current_devices == 0 return optimal_layer_submesh_assignments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin", "def inter_op_dp(\n n_layers: int,\n n_devices: int,\n n_microbatches: int,\n submesh_shapes: List[Tuple[int, int]],\n intra_compute_costs,\n max_n_succ_stages,\n):\n min_cost = np.inf\n best_solution = None\n prev_intra_cost = 0.0\n gap = 1e-6\n\n submesh_sizes: list = NumbaList()\n for n, m in submesh_shapes:\n submesh_sizes.append(n * m)\n\n for intra_cost in np.sort(np.unique(intra_compute_costs)):\n if intra_cost - prev_intra_cost < gap:\n continue\n if intra_cost * n_microbatches >= min_cost:\n break\n\n # Optimization that lifts a check for stage_cost <= t_max_stage_cost\n # out of the inner dp loop (see alpa/~/stage_construction.py#L121).\n # This yields a ~100-200x improvement over the baseline implementation.\n valid_cost_idxs = np.transpose((intra_compute_costs <= intra_cost).nonzero())\n # This corresponds to the i of k <= i <= K from eqn. 3 in the alpa paper.\n valid_cost_idxs = valid_cost_idxs[\n valid_cost_idxs[:, 0] <= valid_cost_idxs[:, 1]\n ]\n valid_costs = intra_compute_costs[tuple(valid_cost_idxs.T)]\n valid_idxs_costs = np.hstack([valid_cost_idxs, valid_costs[:, np.newaxis]])\n\n F, F_stage_max, F_argmin = inter_op_dp_inner_loop(\n n_layers,\n n_devices,\n submesh_sizes,\n valid_idxs_costs,\n max_n_succ_stages,\n )\n\n best_n_stages = F[:, 0, n_devices].argmin()\n all_stages_cost = F[best_n_stages, 0, n_devices]\n slowest_stage_cost = F_stage_max[best_n_stages, 0, n_devices]\n if np.isinf(all_stages_cost):\n continue\n slowest_stage_total_cost = (n_microbatches - 1) * slowest_stage_cost\n\n if all_stages_cost + slowest_stage_total_cost < min_cost:\n min_cost = all_stages_cost + slowest_stage_total_cost\n best_solution = best_n_stages, F_argmin\n prev_intra_cost = intra_cost\n\n assert best_solution is not None\n best_n_stages, F_argmin = best_solution\n optimal_layer_submesh_assignments = get_optimal_submesh_assignments(\n best_n_stages, F_argmin, n_devices, n_layers, submesh_sizes\n )\n return optimal_layer_submesh_assignments", "def flax_shard_checkpoint(params, max_shard_size=\"10GB\"):\n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = {}\n current_block_size = 0\n total_size = 0\n\n # flatten the weights to chunk\n weights = flatten_dict(params, sep=\"/\")\n for item in weights:\n weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = {}\n current_block_size = 0\n\n current_block[item] = weights[item]\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = FLAX_WEIGHTS_NAME.replace(\".msgpack\", f\"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack\")\n shards[shard_file] = shard\n for weight_name in shard.keys():\n weight_map[weight_name] = shard_file\n\n # Add the metadata\n metadata = {\"total_size\": total_size}\n index = {\"metadata\": metadata, \"weight_map\": weight_map}\n return shards, index", "def dfs_maximizing(state) :\n #print state.describe_previous_move()\n global state_evals, path, _path, _score, level, _state;\n\n level+=1\n path.append(state)\n for stt in state.generate_next_states():\n score=0\n agenda.append((stt, level))\n \n if stt.is_game_over():\n state_evals+=1\n score=stt.get_endgame_score()\n if score>_score:\n _score=score\n _path = path[0:]\n _state = stt\n if not agenda:\n\n _path.append(_state)\n return [_path, _score, state_evals];\n else:\n new_state, level=agenda.pop()\n path=path[0:level]\n level-=1\n return dfs_maximizing(new_state)", "def get_optimal_patches(self):\n self.optimal_patch_centers = list()\n # Backtrace through cost to determine optimal samples\n for i in range(self.cost_matrix.shape[0] - 1, -1, -1):\n idx = self.nodes_min_energy_index(i)\n node = self.min_energy_index[i][idx]\n self.optimal_patch_centers.append(node)\n self.optimal_patch_centers.reverse()\n self.optimal_patch_centers = [\n int(patch) for patch in self.optimal_patch_centers if np.isfinite(patch)\n ]\n optimal_patch_centers = list()\n for patch_center in self.optimal_patch_centers:\n if (\n self.source_patches[self.patch_centers[patch_center]].size\n != self.patch_size * self.patch_size * 3\n ):\n node = patch_center - 1 if patch_center > 1 else patch_center + 1\n optimal_patch_centers.append(node)\n if optimal_patch_centers:\n self.optimal_patch_centers = optimal_patch_centers", "def minimax_endgame_search(state, maximize=True) :\n global depth;\n depth=0\n path=[]\n paths=[]\n _path, _score = get_minimax_score(state, maximize, path, paths,INF,always_zero)\n\n return [_path, _score, len(paths)]", "def get_floorplan(\n graph: DataflowGraph,\n slot_manager: SlotManager,\n grouping_constraints_in_str: List[List[str]],\n pre_assignments_in_str: Dict[str, str],\n floorplan_strategy: str = 'HALF_SLR_LEVEL_FLOORPLANNING',\n threshold_for_iterative: int = 200,\n floorplan_opt_priority: str = 'AREA_PRIORITIZED',\n min_area_limit: float = 0.65,\n max_area_limit: float = 0.85,\n min_slr_width_limit: int = 10000,\n max_slr_width_limit: int = 15000,\n max_search_time: int = 600,\n hbm_port_v_name_list: List[str] = []\n) -> Tuple[Dict[Vertex, Slot], List[Slot]]:\n # get initial v2s\n init_slot = slot_manager.getInitialSlot()\n init_v2s = {v : init_slot for v in graph.getAllVertices()}\n\n actual_usage = get_actual_usage(init_v2s.keys(), slot_manager.getInitialSlot())\n if max_area_limit < actual_usage:\n max_area_limit = actual_usage + 0.1\n cli_logger.warning('The specified max_area_limit is less than the actual usage of the design: %f. '\n 'Adjust max_area_limit to %f', actual_usage, max_area_limit)\n if min_area_limit < actual_usage:\n min_area_limit = actual_usage\n cli_logger.warning('Adjust the min_area_limit to the actual usage of the design: %f', actual_usage)\n\n cli_logger.info('')\n cli_logger.info('Floorplan parameters:')\n cli_logger.info('')\n cli_logger.info(' floorplan_strategy: %s', floorplan_strategy)\n cli_logger.info(' threshold for switching to iterative partitioning: %d', threshold_for_iterative)\n cli_logger.info(' floorplan_opt_priority: %s', floorplan_opt_priority)\n cli_logger.info(' min_area_limit: %f', min_area_limit)\n cli_logger.info(' max_area_limit: %f', max_area_limit)\n cli_logger.info(' min_slr_width_limit: %d', min_slr_width_limit)\n cli_logger.info(' max_slr_width_limit: %d', max_slr_width_limit)\n cli_logger.info(' max_search_time per solving: %d', max_search_time)\n cli_logger.info('')\n cli_logger.info('Start floorplanning, please check the log for the progress...\\n')\n\n # get grouping constraints of Vertex\n grouping_constraints: List[List[Vertex]] = [\n [graph.getVertex(v_name) for v_name in v_name_group]\n for v_name_group in grouping_constraints_in_str\n ]\n\n _logger.info(f'The following modules are grouped to the same location:')\n for grouping in grouping_constraints_in_str:\n _logger.info(' ' + ', '.join(grouping))\n\n # get pre_assignment in Vertex\n pre_assignments = { graph.getVertex(v_name) : slot_manager.createSlot(pblock)\n for v_name, pblock in pre_assignments_in_str.items()\n }\n\n # get the hbm port vertices\n hbm_port_v_list = [graph.getVertex(v_name) for v_name in hbm_port_v_name_list]\n for v_name in hbm_port_v_name_list:\n _logger.info('Binding of HBM vertex %s is subject to change', v_name)\n\n print_pre_assignment(pre_assignments)\n\n print_vertex_areas(init_v2s.keys(), slot_manager.getInitialSlot())\n\n params = {\n 'floorplan_opt_priority': floorplan_opt_priority,\n 'min_area_limit': min_area_limit,\n 'max_area_limit': max_area_limit,\n 'min_slr_width_limit': min_slr_width_limit,\n 'max_slr_width_limit': max_slr_width_limit,\n 'max_search_time': max_search_time,\n 'hbm_port_v_list': hbm_port_v_list,\n }\n\n # choose floorplan method\n num_vertices = len(graph.getAllVertices())\n v2s: Dict[Vertex, Slot] = {}\n\n # if user specifies floorplan methods\n if floorplan_strategy == 'SLR_LEVEL_FLOORPLANNING':\n _logger.info(f'user specifies to floorplan into SLR-level slots')\n v2s = partition(\n init_v2s, slot_manager, grouping_constraints, pre_assignments, partition_method='FOUR_WAY_PARTITION', **params\n )\n\n if v2s:\n return v2s, get_four_way_partition_slots(slot_manager)\n else:\n return None, None\n\n elif floorplan_strategy == 'QUICK_FLOORPLANNING':\n _logger.info(f'user specifies to prioritize speed')\n v2s = iterative_bipartition(init_v2s, slot_manager, grouping_constraints, pre_assignments)\n if v2s:\n return v2s, get_eight_way_partition_slots(slot_manager)\n else:\n return None, None\n\n else:\n if floorplan_strategy != 'HALF_SLR_LEVEL_FLOORPLANNING':\n raise NotImplementedError('unrecognized floorplan strategy %s', floorplan_strategy)\n\n # empirically select the floorplan method\n if num_vertices < threshold_for_iterative:\n _logger.info(f'There are {num_vertices} vertices in the design, use eight way partition')\n\n if num_vertices > 100:\n _logger.warning('Over 100 vertices. May have a long solving time. Reduce threshold_for_iterative to skip to iterative bi-partitioning.')\n\n v2s = partition(\n init_v2s, slot_manager, grouping_constraints, pre_assignments, partition_method='EIGHT_WAY_PARTITION', **params\n )\n if v2s:\n return v2s, get_eight_way_partition_slots(slot_manager)\n else:\n _logger.warning(f'Please check if any function in the design is too large')\n\n _logger.info(f'Use four-way partition because eight-way partition failed or there are too many vertices ({num_vertices})')\n v2s = partition(\n init_v2s, slot_manager, grouping_constraints, pre_assignments, partition_method='FOUR_WAY_PARTITION', **params\n )\n if v2s:\n final_v2s = iterative_bipartition(v2s, slot_manager, grouping_constraints, pre_assignments, partition_order=[Dir.vertical])\n\n if final_v2s:\n return final_v2s, get_eight_way_partition_slots(slot_manager)\n else:\n return v2s, get_four_way_partition_slots(slot_manager)\n\n _logger.error(f'AutoBridge fails to partition the design at the SLR level. Either the design is too large, or the functions/modules are too large.')\n return None, None", "def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##", "def search(self, pid, start, layers):\n plan = []\n workload = [0 for _ in range(len(self.workers))]\n\n # each layer is a separate search for the worker to process the layer\n for i in range(len(layers)):\n layer = layers[i]\n target_color = layer[\"color\"]\n target_thickness = layer[\"thickness\"]\n processing_costs = {k: math.ceil(target_thickness / self.processing_rate[k][target_color]) for k in self.processing_rate}\n\n # Searches to find the cost of processing every node at each worker.\n # Cost consists of: Cost of the path \n # + Existing workload cost \n # + processing cost by the worker\n # \n # Basically Dijkstra's.\n visited = set()\n path = {}\n path_costs = {}\n pq = [(0, start)]\n curr_costs = {}\n\n # Assumes single connected component \n while len(visited) != len(self.workers):\n cost, curr = heapq.heappop(pq)\n if curr in visited: continue\n visited.add(curr)\n curr_costs[curr] = cost + processing_costs[self.worker_flavor[curr]] + self.workload[curr]\n if curr == self.origin:\n curr_costs[curr] += self.origin_penalty\n for neighbor in self.neighbors[curr]:\n if neighbor in visited: continue\n cost_new = cost + 1 \n if neighbor == self.origin:\n cost_new += self.origin_penalty\n if neighbor not in path_costs or cost_new < path_costs[neighbor]:\n path_costs[neighbor] = cost_new\n path[neighbor] = curr\n heapq.heappush(pq, (cost_new, neighbor))\n\n # Get the best cost and candidate for processing the current layer\n best_cost = float(\"inf\")\n best_cand = -1\n for cand in curr_costs:\n if curr_costs[cand] < best_cost:\n best_cost = curr_costs[cand]\n best_cand = cand\n\n # If the best candidate isn't the starting node, add the cost of the\n # path for future workload considerations\n if best_cand != start:\n # create the path \n best_path = [best_cand]\n while best_path[-1] != start:\n best_path.append(path[best_path[-1]])\n best_path = best_path[::-1]\n\n # Add the Pass operations to the plan\n prev = start \n for curr in best_path[1:]:\n workload[prev] += 1\n plan.append([1, {\"Pass\":{\"pearl_id\":pid,\"to_worker\":curr}}])\n prev = curr\n\n # Add the noms to the plan \n workload[best_cand] += processing_costs[self.worker_flavor[best_cand]]\n plan.append([processing_costs[self.worker_flavor[best_cand]], {\"Nom\": pid}])\n\n # Set the last worker in the path as the start of the next search pass\n start = best_cand\n return plan, workload, start", "def _extract_solution(self, manager: RoutingIndexManager, routing: RoutingModel, assignment: Assignment, indices_to_visit: List[int]) -> Dict[str, Any]:\n sln = {\"objective\": assignment.ObjectiveValue()}\n \n stop_indices = []\n index = routing.Start(0)\n while not routing.IsEnd(index):\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n sln[\"order\"] = stop_indices\n return sln", "def stage_mesh_axis(self):\n stage_mesh_axis = None\n p = self.params\n if p.mesh_axis_names is not None:\n stage_mesh_axis = base_layer.to_partition_spec(\n p.weight_split_dims_mapping.stages, p.mesh_axis_names\n )[0]\n return stage_mesh_axis", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def getOptimalSolution(self):\n max_index = np.argmax(self.Ws)\n self.Wmax = self.Ws[max_index]\n self.Emax = self.subsets[max_index]\n return (self.Wmax, self.Emax)", "def managable_mesh_list(mesh_lst, struct_grd=False):\n #step1\n grd_fact = (1+int(struct_grd))\n slice_idx = 0\n for idx, mm in enumerate(mesh_lst):\n num_cells = mm.num_cells()\n if(int(num_cells/grd_fact) <600):\n print(\"removing the mesh at index %i due to low cell count (%i) for peridynamic calculations\"%(idx, int(num_cells/grd_fact)))\n slice_idx = idx\n\n mesh_lst = mesh_lst[slice_idx+1:]\n \n #Step2\n if(len(mesh_lst)> 5):\n print(\"Too many meshes in the list, resizing to managable size\")\n return mesh_lst[0:3]", "def solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic):\n\n result = {}\n result[\"visualStages\"] = []\n for stage in stages:\n\n stage_dic = {}\n object_dic_copy = copy.deepcopy(objects_dic)\n predicates = stage[\"items\"]\n sorted_predicates = priority(predicates, predicates_rules)\n\n # For hanoi problem, reset each stage\n # For logistics problem, reset each stage\n for fname in gstate[\"reset_function\"]:\n gstate[fname] = {}\n solvepredicates(sorted_predicates, object_dic_copy, predicates_rules, gstate)\n stage_dic[\"visualSprites\"] = object_dic_copy\n if \"stageName\" not in stage:\n stage_dic[\"stageName\"] = \"Inital Stage\"\n stage_dic[\"stageInfo\"] = \"No step information\"\n\n else:\n stage_dic[\"stageName\"] = stage[\"stageName\"]\n stage_dic[\"stageInfo\"] = stage[\"stageInfo\"]\n\n result[\"visualStages\"].append(stage_dic)\n\n result[\"subgoals\"] = Subgoal.get_subgoal(stages, problem_dic[1]['goal'].copy(), actionlist.copy())\n\n return result", "def solve(num_wizards, num_constraints, wizards, constraints): \n global wiz_const\n wiz_const = mapConstraints(wizards, constraints)\n partial_soltns = []\n\n # counter for priority queue since it doesn't allow \n # identical priorities\n k = 0\n\n # list of wizards sorted by lowest to highest degree\n sorted_wiz = sortWizByConsts(wiz_const)\n wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}\n\n const_set = set(map(tuple, constraints))\n for i in range(4) : \n heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))\n k += 1\n\n print(\"setup done, commencing solving\")\n\n while len(partial_soltns) : \n\n # for partial_soltn, const_set in partial_soltns : \n# partial_soltns.remove(partial_soltn)\n num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)\n const = findNextConst(partial_soltn, const_set, wiz_rankings)\n print(\"seen \" + str(len(partial_soltn)) + \"\\t num partial_solutions\\t\" + str(len(partial_soltns)))\n try : \n const_set.remove(const)\n except KeyError : \n print(\"BAD SHIT\")\n pass\n possible_arrangements = [(const[0], const[1], const[2]),\n (const[2], const[0], const[1]), \n (const[2], const[1], const[0]),\n (const[1], const[0], const[2])]\n for arr in possible_arrangements:\n soltn = partial_soltn.copy()\n a, b, c = arr\n if not (soltn.has_node(a) and soltn.has_node(b) and nx.has_path(soltn, a, b)) : \n soltn.add_edge(a, b)\n if not (soltn.has_node(b) and soltn.has_node(c) and nx.has_path(soltn, b, c)) : \n soltn.add_edge(b, c)\n # see if we violated any other constraints (seen or not seen)\n is_valid, num_wiz = validNumWiz(soltn, const_set)\n\n if is_valid and len(list(nx.simple_cycles(soltn))) == 0 :\n heapq.heappush(partial_soltns, (-len(soltn), k, soltn, const_set.copy()))\n k += 1\n # are we done?\n if num_wiz == num_wizards :\n print(\"FINAL SOLUTION (found without processing all constraints but validating against them)\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n if foundCompleteOrdering(heapq.heappop(partial_soltns)) : \n print(\"FINAL SOLUTION\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n print(\"NO SOLUTION FOUND\")\n return \"\"", "def partition_mesh(mesh, n_parts, use_metis=True, verbose=False):\n output('partitioning mesh into %d subdomains...' % n_parts, verbose=verbose)\n timer = Timer(start=True)\n\n if use_metis:\n try:\n from pymetis import part_graph\n\n except ImportError:\n output('pymetis is not available, using naive partitioning!')\n part_graph = None\n\n if use_metis and (part_graph is not None):\n cmesh = mesh.cmesh\n cmesh.setup_connectivity(cmesh.dim, cmesh.dim)\n graph = cmesh.get_conn(cmesh.dim, cmesh.dim)\n\n cuts, cell_tasks = part_graph(n_parts, xadj=graph.offsets.astype(int),\n adjncy=graph.indices.astype(int))\n cell_tasks = nm.array(cell_tasks, dtype=nm.int32)\n\n else:\n ii = nm.arange(n_parts)\n n_cell_parts = mesh.n_el // n_parts + ((mesh.n_el % n_parts) > ii)\n output('cell counts:', n_cell_parts, verbose=verbose)\n assert_(sum(n_cell_parts) == mesh.n_el)\n assert_(nm.all(n_cell_parts > 0))\n\n offs = nm.cumsum(nm.r_[0, n_cell_parts])\n cell_tasks = nm.digitize(nm.arange(offs[-1]), offs) - 1\n\n output('...done in', timer.stop(), verbose=verbose)\n\n return cell_tasks", "def build_data_parallel_strategies(\n train_step_graph: GraphModule,\n num_params: int,\n num_states: int,\n mesh: DeviceMesh,\n batch_dim: int = 0,\n) -> Dict[fx.Node, StrategyType]:\n activation_idx = num_params + num_states\n non_compute_ops = [\n aten.clone.default,\n aten.detach.default,\n aten.ones_like.default,\n aten.reshape.default,\n aten.t.default,\n aten.view.default,\n torch.ops._spmd.tag_grad.default,\n operator.getitem,\n ]\n\n tuple_strategy_ops = [aten._fused_adam.default]\n\n dp_strategy_map: Dict[fx.Node, StrategyType] = {}\n batch_dim_analyzer = BatchDimAnalyzer(batch_dim)\n placeholder_idx = 0\n num_param_grad = 0\n\n # first we backward propagate to mark the param gradients sharding\n # with tag_grad node helps and then delete the tag_grad nodes\n for node in reversed(list(train_step_graph.graph.nodes)):\n # find a param_grad node via the tagging\n if node.target == torch.ops._spmd.tag_grad.default:\n cur_node = node\n while cur_node.target in non_compute_ops:\n cur_node = cur_node.args[0]\n partial_strategy = _gen_partial_strategy(mesh)\n dp_strategy_map[cur_node] = DataParallelStrategy(\n NodeType.GRAD, [partial_strategy]\n )\n num_param_grad += 1\n # remove the tag_grad node from graph\n node.replace_all_uses_with(node.args[0])\n train_step_graph.graph.erase_node(node)\n\n if num_param_grad == num_params:\n # early break if we have already processed all param_grads\n break\n\n # next we forward propagate to mark all the sharding\n for node in train_step_graph.graph.nodes:\n if node.op == \"placeholder\":\n if \"val\" not in node.meta:\n # NOTE: There're certain cases where the placeholder nodes do\n # not have real tensor values:\n # 1. optimizer states can be None sometimes, i.e. SGD with\n # no momentum, optimizer states populate `momentum` state\n # as None, the full graph we get from `compile` would have\n # None as the placeholder value\n # 2. function args might not only contain params or activations,\n # but also contain other non-tensor inputs, i.e. the model\n # and optimizer instances baked in as a placeholder, there might\n # also be some scalar argument which is not a tensor\n #\n # For the above cases, we create a NON_TENSOR stratgy so that we\n # know it's not a tensor and we don't need to shard it\n dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, [])\n\n elif placeholder_idx < num_params:\n # during compilation there's an assumption that the first num_params\n # placeholders should be parameters\n shard_strategy = _gen_shard_strategy(mesh, 0)\n replica_strategy = _gen_replicate_strategy(mesh)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.PARAM, [replica_strategy, shard_strategy]\n )\n\n elif placeholder_idx < activation_idx:\n # optimizer states follow the same strategy as\n # the corresponding parameters\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, 0)\n\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.STATE, [replica_strategy, shard_strategy]\n )\n else:\n activation_batch_dim_size = node.meta[\"val\"].shape[batch_dim]\n # find the first activation node and use its batch dim size\n if batch_dim_analyzer.batch_dim_size == -1:\n batch_dim_analyzer.init_batch_dim_size(activation_batch_dim_size)\n\n batch_dim_analyzer.set_batch_dim(node, batch_dim)\n shard_strategy = _gen_shard_strategy(mesh, batch_dim)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [shard_strategy]\n )\n placeholder_idx += 1\n elif node.op == \"call_function\":\n # Annotate node types for the computation graph\n # Data Parallel node propagation logic:\n # param (non-compute) -> out: param\n # grad (non-compute before/after) -> out: grad\n # state -> output: state\n #\n # param + activation (param must be replicate, act be sharded) -> out: activation\n # param/state + grad (param/state/grad be the same spec) -> out: param/state\n # param + state -> out: param\n\n if node.target in non_compute_ops:\n # At this point, we should have removed all the `tag_grad` nodes in the graph\n assert node.target != torch.ops._spmd.tag_grad.default\n\n input_nodes = node.all_input_nodes\n assert (\n len(input_nodes) == 1\n ), f\"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}\"\n arg_strategy = dp_strategy_map[input_nodes[0]]\n\n if node.target == operator.getitem:\n # for getitem call, just forward the strategy from the input\n getitem_idx = node.args[1]\n if isinstance(arg_strategy, TupleStrategy):\n # for tuple strategy, we need to get the child strategy from the tuple\n dp_strategy_map[node] = arg_strategy.childs[getitem_idx]\n else:\n # if it's not a tuple strategy, we just forward the arg strategy\n dp_strategy_map[node] = arg_strategy\n else:\n assert isinstance(arg_strategy, DataParallelStrategy)\n arg_node_type = arg_strategy.node_type\n if arg_node_type == NodeType.PARAM:\n replica_strategy = _gen_replicate_strategy(mesh)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.PARAM, [replica_strategy]\n )\n elif arg_node_type == NodeType.GRAD:\n partial_sig = _gen_partial_strategy(mesh)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.GRAD, [partial_sig]\n )\n elif arg_node_type == NodeType.ACT:\n arg_node_spec = batch_dim_analyzer.compute_act_spec(\n input_nodes[0], mesh\n )\n\n output_spec = batch_dim_analyzer.compute_act_spec(node, mesh)\n\n shard_strategy = PlacementStrategy(\n output_spec=output_spec, input_specs=[arg_node_spec]\n )\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [shard_strategy]\n )\n else:\n raise RuntimeError(\n f\"non compute op not supporting {arg_node_type}! \"\n )\n\n # finished processing this non-compute node\n continue\n\n # for computatation nodes, we need to check all the inputs\n input_args = node.all_input_nodes\n input_specs = []\n if node in dp_strategy_map:\n # found a param_grad node that already have output pre-filled spec\n # fill in the expected input specs for the pre-filled strategy\n node_strategy = dp_strategy_map[node]\n assert isinstance(node_strategy, DataParallelStrategy)\n node_type = node_strategy.node_type\n assert node_type == NodeType.GRAD\n produce_param_grad_strat = node_strategy.strategies\n has_activation = False\n for arg in input_args:\n arg_strategy = dp_strategy_map[arg]\n assert isinstance(arg_strategy, DataParallelStrategy)\n arg_node_type = arg_strategy.node_type\n if arg_node_type == NodeType.ACT:\n # activation sharded\n has_activation = True\n act_spec = batch_dim_analyzer.compute_act_spec(arg, mesh)\n\n input_specs.append(act_spec)\n\n if has_activation:\n assert len(produce_param_grad_strat) == 1\n produce_param_grad_strat[0].input_specs = input_specs\n elif node.target in tuple_strategy_ops:\n # ops that need to build tuple strategy instead of normal strategy\n # This should happen rarely and only needed when we need to generate\n # different node strategy for multiple outputs (i.e. fused_adam op)\n # TODO: Currently this specializes to fused optimizer ops, but we need\n # to see how to generalize this strategy building logic\n output_strategy_len = len(node.args) - 1\n tuple_strategies = []\n for i in range(output_strategy_len):\n if not isinstance(node.args[i], list):\n raise RuntimeError(\n f\"Expecting list as arg to build Tuple Strategy, but found type {type(node.args[i])}!\"\n )\n # for list/tuple arg, use the first one to find out the node type\n if len(node.args[i]) > 0:\n arg_strategy = dp_strategy_map[node.args[i][0]]\n assert isinstance(arg_strategy, DataParallelStrategy)\n assert arg_strategy.node_type in [\n NodeType.PARAM,\n NodeType.GRAD,\n NodeType.STATE,\n ], \"Expecting param/grad/state as arg to build Tuple Strategy!\"\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)\n out_node_strategy: StrategyType = DataParallelStrategy(\n arg_strategy.node_type, [replica_strategy, shard_strategy]\n )\n\n tuple_strategies.append(out_node_strategy)\n\n output_tuple_strategy = TupleStrategy(tuple(tuple_strategies))\n dp_strategy_map[node] = output_tuple_strategy\n else:\n # NOTE: This is the common region for all regular computation ops\n\n input_node_types = [\n cast(DataParallelStrategy, dp_strategy_map[arg]).node_type\n for arg in input_args\n if isinstance(dp_strategy_map[arg], DataParallelStrategy)\n ]\n if NodeType.GRAD in input_node_types:\n # param/state + grad, build up acceptable strategy\n # the strategy should be the same for all the inputs/outputs\n # TODO: optimizer parts should follow the dtensor prop logic\n # to support more general cases that allows optimizer states\n # to have different shardings compare to the params\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)\n output_node_type = NodeType.PARAM\n\n non_grad_types = [t for t in input_node_types if t != NodeType.GRAD]\n\n output_node_type = non_grad_types[0]\n for non_grad_type in non_grad_types:\n assert (\n non_grad_type == output_node_type\n ), f\"Found more than one non grad types! Expect {output_node_type} but found {non_grad_type}!\"\n assert output_node_type in [\n NodeType.PARAM,\n NodeType.STATE,\n ], f\"Expecting output node type to be either state or param, but found {output_node_type}!\"\n\n dp_strategy_map[node] = DataParallelStrategy(\n output_node_type, [replica_strategy, shard_strategy]\n )\n elif NodeType.STATE in input_node_types:\n # either param + state or state + state\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)\n output_node_type = (\n NodeType.PARAM\n if NodeType.PARAM in input_node_types\n else NodeType.STATE\n )\n\n dp_strategy_map[node] = DataParallelStrategy(\n output_node_type, [replica_strategy, shard_strategy]\n )\n elif NodeType.PARAM in input_node_types:\n if NodeType.ACT in input_node_types:\n # param + activation, build up acceptable strategy\n # param must be replicated, activation must be sharded\n for arg in input_args:\n arg_strategy = dp_strategy_map[arg]\n assert isinstance(arg_strategy, DataParallelStrategy)\n node_type = arg_strategy.node_type\n if node_type == NodeType.ACT:\n # compute activation spec\n act_spec = batch_dim_analyzer.compute_act_spec(\n arg, mesh\n )\n\n input_specs.append(act_spec)\n elif node_type == NodeType.PARAM:\n # param must be replicated\n input_specs.append(\n DTensorSpec(mesh=mesh, placements=[Replicate()])\n )\n else:\n raise RuntimeError(\n f\"Expecting node with parameter and activation, but found {input_node_types}! \"\n )\n # produce activation type sharding for output\n output_spec = batch_dim_analyzer.compute_act_spec(node, mesh)\n\n act_strategy = PlacementStrategy(\n output_spec=output_spec, input_specs=input_specs\n )\n\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [act_strategy]\n )\n else:\n # If inputs only have parameters, the\n # strategy of this node should follow input\n dp_strategy_map[node] = dp_strategy_map[input_args[0]]\n else:\n # If input nodes does not have PARAM/GRAD/STATE, then\n # it should be a pure activation computation, it should\n # produce activation output.\n # Activations are usually sharded unless model creates\n # new tensors during computation, which depend on whether\n # the new tensor associate with a batch dim or not, it could\n # be shard/replicate/partial, batch dim analyzer should tell\n # us the correct sharding.\n for arg in input_args:\n arg_strategy = dp_strategy_map[arg]\n assert isinstance(arg_strategy, DataParallelStrategy)\n input_spec = batch_dim_analyzer.compute_act_spec(arg, mesh)\n\n input_specs.append(input_spec)\n\n act_spec = batch_dim_analyzer.compute_act_spec(node, mesh)\n op_strategy = PlacementStrategy(\n output_spec=act_spec, input_specs=input_specs\n )\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [op_strategy]\n )\n\n elif node.op == \"output\":\n dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, [])\n else:\n raise RuntimeError(f\"op code {node.op} not supported\")\n\n return dp_strategy_map # type: ignore[return-value]", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def find_max_score_location(grid, shape):", "def get_bc_parts(mesh, lst):\n if len(lst) > 0:\n shift = max(0, -min(e.value for e in lst))\n else:\n return [], [], 0, FacetFunction(\"size_t\", mesh, 0)\n # values must be shifted by smallest Steklov value since size_t is unsigned\n fun = FacetFunction(\"size_t\", mesh, shift)\n for bc in lst:\n sub = OnBoundary()\n # overwrite inside function with the one from bc\n sub.inside = bc.getTest()\n sub.mark(fun, bc.value + shift)\n # some conditions may cancel eachother\n exist = set(np.unique(fun.array()))\n lst = [e for e in lst if e.value+shift in exist]\n # separate Robin and Steklov, Dirichlet and Neumann are irrelevant\n Robin = [e for e in lst if e.value > 1 and e.parValue != 0]\n Steklov = [e for e in lst if e.value < 0 and e.parValue != 0]\n return Robin, Steklov, shift, fun", "def get_best_clique(self):\n\t\treturn [i+1 for i in range(self._size) if self._globalMinimumState[i] == 1]", "def best_move(data, indexing, cf, cf_prime, N=20, M=30): \n stats = {}\n timer = time()\n ns = list(neighbours(indexing, random_stream=N))\n stats[\"n_neighbours\"] = len(ns)\n stats[\"t_neighbours\"] = 1000*(time() - timer)\n\n dt_rcs = []\n bestpair, best_rcost = None, None\n for v,k in ns:\n timer = time()\n rc = reduced_cost(data, indexing, cf, cf_prime, v, k, uw_sample_count=M)\n dt_rcs.append(1000*(time() - timer))\n if bestpair is None or rc > best_rcost:\n bestpair = v,k\n best_rcost = rc\n\n stats[\"t_rcs_mean\"] = np.mean(dt_rcs)\n stats[\"t_rcs_std\"] = np.std(dt_rcs)\n stats[\"t_rcs_sum\"] = np.sum(dt_rcs)\n stats[\"rc\"] = best_rcost\n stats[\"partcount\"] = np.unique(indexing).shape[0]\n return bestpair, best_rcost, stats", "def Compute_Grid(Idx, Coeff, q_max, rules, growth, LevelMax, sc, p, tol ):\n\n seed = 123456789\n #Coeff= Sandia.calculate_coefficients(Idx, q_max)\n new_np = Sandia.max_next_points(Idx, Coeff, rules, growth)\n points = Sandia.weights_and_points(new_np, LevelMax, Idx, Coeff, growth, rules, sc, p)\n N_Unique, sparse_index = Sandia.unique_points(seed, tol, points)\n return Sandia.reduce_points_and_weights(N_Unique, points, Idx, sparse_index, Coeff, growth, rules, sc, p)", "def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = 10 # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # ##print(p, map_size, num_feat, tmp)\n return tmp", "def refinemesh(prev_legs, state0_chaser, n_s):\n scored_points = np.array([])\n for leg in prev_legs:\n scored_point = [*leg.dv, leg.t_leg, leg.score]\n scored_points = np.append(scored_points, scored_point)\n scored_points = scored_points.reshape(len(prev_legs), 5)\n tri = Delaunay(scored_points[:, 0:4], qhull_options='QJ')\n m_max = max(scored_points[:, 4]) # Maximum trajectory score of all simplices of the triangulation\n if m_max == 0:\n print('algorithm.py: m_max = 0 because all leg scores are 0!!!')\n m_max = 1 # to avoid raising the dividing by 0 error if all leg scores are 0\n g_max = 1\n for q in tri.simplices:\n smplx_scores = scored_points[:, 4][q] # scores of the points defining the simplex\n aux = mean(smplx_scores)\n if g_max < aux:\n g_max = aux\n\n simplices_scored = []\n for q in tri.simplices:\n smplx_score = compute_simplexscore(q, scored_points, m_max, g_max)\n # simp_scored = [smplx_score, q_vec]\n simplices_scored.append([smplx_score, q])\n sorted_simp_scores = sorted(simplices_scored, reverse=True) # ranks the simplices based on score\n new_samples = samplewithinbestsimplices(sorted_simp_scores, tri.points, n_s)\n\n new_legs = []\n for s in new_samples:\n leg = Leg(s[0:3], s[3], state0_chaser)\n new_legs.append(leg)\n\n return new_legs", "def partition(data, s, b, u, res, points, size, depth):\r\n\t# depth is just for demonstration purposes, terminating the recursion early\r\n\t\r\n\t# termination conditions\r\n\tif size > 1 and depth > 0:\r\n\r\n\t\t# variables that keep track of the scope of \"points\" for iteration purposes\r\n\t\trlen = []\r\n\t\tclen = len(points)\r\n\t\tfor i in range(clen):\r\n\t\t\trlen.append(len(points[i]))\r\n\t\t\r\n\t\t# keeps track of which point defines the maximal set\r\n\t\tmax = -10000\r\n\t\tmax_index = [0,0]\r\n\r\n\t\t# each point on the grid defines a potentially maximal set (including that point and the best \r\n\t\t# choice for higher rows) s[x][y] tracks the value of the set defined by (x, y)\r\n\t\tfor i in range(len(points)):\r\n\t\t\t# calculating s based on current row\r\n\t\t\ts[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]] = data[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]]\r\n\t\t\tfor j in range(rlen[i] - 2, -1, -1):\r\n\t\t\t\ts[points[i][j][0]][points[i][j][1]] = s[points[i][j + 1][0]][points[i][j + 1][1]] + data[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\r\n\t\t\t# if below the first row, factoring in the optimal set from above rows\r\n\t\t\tif i != 0:\r\n\t\t\t\tprev_end = points[i-1][rlen[i-1]-1]\r\n\t\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\t\tu[points[i][j][0]][points[i][j][1]] = b[prev_end[0]][np.minimum(prev_end[1], points[i][j][1])]\r\n\t\t\t\t\ts[points[i][j][0]][points[i][j][1]] += s[prev_end[0]][u[points[i][j][0]][points[i][j][1]]]\r\n\t\t\t\r\n\t\t\t# keeping track of the best sets from the new row for later use (what b and u are for)\r\n\t\t\trow_max = -10000\r\n\t\t\trow_max_index = -1\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tcurr = s[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\tif curr > row_max:\r\n\t\t\t\t\trow_max = curr\r\n\t\t\t\t\trow_max_index = points[i][j][1]\r\n\t\t\t\tb[points[i][j][0]][points[i][j][1]] = row_max_index\r\n\r\n\t\t\t# updating the global optimal set\r\n\t\t\tif row_max > max:\r\n\t\t\t\tmax = row_max\r\n\t\t\t\tmax_index[0] = i\r\n\t\t\t\tmax_index[1] = row_max_index\r\n\t\t\r\n\t\t# finding the set of points that generated the global optimum\r\n\t\tpointers = []\r\n\t\tpointers.append(max_index[1])\r\n\t\tfor i in range(max_index[0], 0, -1):\r\n\t\t\tpointers.append(u[points[i][0][0]][pointers[max_index[0]-i]])\r\n\t\tpointers = np.flip(pointers, axis=0)\r\n\t\t\r\n\t\t# finding the set of points of the upper and lower partitions defined by the optimal set\r\n\t\tupper_points = []\r\n\t\tlower_points = []\r\n\t\tup_num = 0\r\n\t\tlow_num = 0\r\n\t\tfor i in range(clen):\r\n\t\t\turow = []\r\n\t\t\tlrow = []\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tif i <= max_index[0] and points[i][j][1] >= pointers[i]:\r\n\t\t\t\t\turow.append(points[i][j])\r\n\t\t\t\t\tup_num += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tlrow.append(points[i][j])\r\n\t\t\t\t\tlow_num += 1\r\n\t\t\tif len(urow) > 0:\r\n\t\t\t\tupper_points.append(tuple(urow))\r\n\t\t\tif len(lrow) > 0:\r\n\t\t\t\tlower_points.append(tuple(lrow))\r\n\r\n\t\t# updating the final result and prepping the new datasets to have mean 0\r\n\t\tfor i in range(len(upper_points)):\r\n\t\t\tfor j in range(len(upper_points[i])):\r\n\t\t\t\tres[upper_points[i][j][0]][upper_points[i][j][1]] += max/up_num\r\n\t\t\t\tdata[upper_points[i][j][0]][upper_points[i][j][1]] -= max/up_num\r\n\t\tfor i in range(len(lower_points)):\r\n\t\t\tfor j in range(len(lower_points[i])):\r\n\t\t\t\tres[lower_points[i][j][0]][lower_points[i][j][1]] -= max/low_num\r\n\t\t\t\tdata[lower_points[i][j][0]][lower_points[i][j][1]] += max/low_num\r\n\t\t\r\n\t\t# recursion (if the optimal set is the current one, stop since at this point \r\n\t\t# the mean of the selected elements is optimal over them)\r\n\t\tif up_num != size:\r\n\t\t\tpartition(data, s, b, u, res, upper_points, up_num, depth-1)\r\n\t\tif low_num != size:\r\n\t\t\tpartition(data, s, b, u, res, lower_points, low_num, depth-1)\r\n\telse:\r\n\t\treturn", "def create_partition(mesh,polygons,enforce_exact=False):", "def optimal_route(graph,homes,source):\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n optimal_cost = np.Inf\n optimal_dropoffs = dict()\n optimal_route = []\n optimal_num_clusters = 0\n\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n if cost < optimal_cost:\n optimal_cost = cost\n optimal_route = route \n optimal_dropoffs = dropoffs\n optimal_num_clusters = num_clusters\n\n return optimal_cost, optimal_dropoffs, optimal_route, optimal_num_clusters", "def eo_edges(self):\n logger.info(\"eo_edges called\")\n permutations = []\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # Build a list of the wing strings at each midge\n wing_strs = []\n\n for _, square_index, partner_index in midges_recolor_tuples_555:\n square_value = self.state[square_index]\n partner_value = self.state[partner_index]\n wing_str = square_value + partner_value\n wing_str = wing_str_map[square_value + partner_value]\n wing_strs.append(wing_str)\n\n # build a list of all possible EO permutations...an even number of edges must be high\n for num in range(4096):\n num = str(bin(num)).lstrip(\"0b\").zfill(12)\n if num.count(\"1\") % 2 == 0:\n permutations.append(list(map(int, num)))\n\n # Put all 2048 starting states in a file and point ida-via-graph\n # at the file so it can solve all of them and apply the one that is the shortest.\n lr_center_stage_states = []\n eo_outer_orbit_states = []\n eo_inner_orbit_states = []\n\n for permutation in permutations:\n must_be_uppercase = []\n must_be_lowercase = []\n self.state = original_state[:]\n\n for wing_str, uppercase in zip(wing_strs, permutation):\n if uppercase:\n must_be_uppercase.append(wing_str)\n else:\n must_be_lowercase.append(wing_str)\n\n # logger.info(\"%s: %s permutation %s\" % (self, index, \"\".join(map(str, permutation))))\n self.edges_flip_orientation(must_be_uppercase, must_be_lowercase)\n\n # build lists of the states that we need to find state_indexes for\n lr_center_stage_states.append(self.lt_phase3_lr_center_stage.state())\n eo_outer_orbit_states.append(self.lt_phase3_eo_outer_orbit.state())\n eo_inner_orbit_states.append(self.lt_phase3_eo_inner_orbit.state())\n\n # now we have a huge list of states to lookup, do a binary search on multiple states at once (this is drastically faster\n # than binary searching for them individually). state_index_multiple() will return a dict where the state is the key\n # and the state_index is the value.\n lr_center_stage_eo_inner_orbit_state_indexes = self.lt_phase3_lr_center_stage.state_index_multiple(\n lr_center_stage_states\n )\n eo_outer_orbit_state_indexes = self.lt_phase3_eo_outer_orbit.state_index_multiple(eo_outer_orbit_states)\n eo_inner_orbit_state_indexes = self.lt_phase3_eo_inner_orbit.state_index_multiple(eo_inner_orbit_states)\n\n # build a list of tuples of the state indexes\n pt_state_indexes = []\n for lr_center_stage_eo_inner_orbit_state, eo_outer_orbit_state, eo_inner_orbit_state in zip(\n lr_center_stage_states, eo_outer_orbit_states, eo_inner_orbit_states\n ):\n pt_state_indexes.append(\n (\n lr_center_stage_eo_inner_orbit_state_indexes[lr_center_stage_eo_inner_orbit_state],\n eo_outer_orbit_state_indexes[eo_outer_orbit_state],\n eo_inner_orbit_state_indexes[eo_inner_orbit_state],\n )\n )\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # When solve_via_c is passed pt_state_indexes (2048 lines of states in this case), it will try all 2048 of them\n # to find the state that has the shortest solution.\n self.lt_phase3.solve_via_c(pt_states=pt_state_indexes)\n\n self.print_cube_add_comment(\"edges EOed into high/low groups\", tmp_solution_len)\n self.post_eo_state = self.state[:]\n self.post_eo_solution = self.solution[:]\n\n # re-color the cube so that the edges are oriented correctly so we can\n # pair 4-edges then 8-edges. After all edge pairing is done we will uncolor\n # the cube and re-apply the solution.\n self.edges_flip_orientation(wing_strs, [])\n self.highlow_edges_print()" ]
[ "0.59374243", "0.5722452", "0.51469857", "0.51208717", "0.51167727", "0.5081394", "0.50553244", "0.50473976", "0.5028678", "0.5010286", "0.49406895", "0.49098796", "0.48995638", "0.4865513", "0.48511472", "0.48335147", "0.48096806", "0.47716156", "0.4743922", "0.47341198", "0.4722869", "0.47086594", "0.47077912", "0.4700789", "0.4698528", "0.4685798", "0.4673617", "0.4669398", "0.46493873", "0.4645071" ]
0.7721114
0
Count the number of times elem appears in the reversed iterator.
def count(self, elem): return self.iter.count(elem)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sequence_sorted_count(self, x, reverse=False):\n c = 0\n if reverse: it = reversed(self)\n else: it = iter(self)\n for v in it:\n if x == v:\n c += 1\n break\n for v in it:\n if x == v: c += 1\n else: break\n return c", "def count(self):\n\n count = 0\n x = self.begin\n\n if self.begin == self.end == None:\n return 0\n\n elif self.begin == self.end:\n return 1\n\n else:\n while x:\n count += 1\n x = x.next\n\n return count", "def count(self, element):\n count = 0\n for i in range(self._length): # Increment count when equal value is found\n if self._arr[i] == element:\n count += 1\n return count", "def count(self, elem):\n if not self.step:\n return _coconut.float(\"inf\") if elem == self.start else 0\n return int(elem in self)", "def count(self):\n node = self.head\n i = 0\n while node:\n i += 1\n node = node.next\n\n return i", "def get_count(self):\n count = 0\n temp = self.head\n while temp:\n count += 1\n temp = temp.next\n return count", "def leniter(i):\n return sum(1 for e in i)", "def __len__(self):\n if not self.head:\n return 0\n if not self.head.next:\n return 1\n N, tort, hare = 1, self.head.next, self.head.next.next\n while tort and (tort is not hare):\n N += 1\n tort = tort.next\n if hare and hare.next:\n hare = hare.next.next\n return N", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')", "def count(iterable):\n\treturn sum(1 for _ in iterable)", "def count(self, i):\n return sum([1 for j in self if i==j])", "def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count", "def iter_count(self):\n return self._iter_count", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def count(self):\n return sum(1 for _ in self)", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def len(self):\n count = 0\n temp = self.head\n while temp.next!=None:\n count += 1\n temp = temp.next\n return(count)", "def length(self): # Class O(n)\r\n h = self.head\r\n size = 1\r\n while 'next' in dir(h.next):\r\n size += 1\r\n h = h.next\r\n return size", "def count(iterable):\n return sum(1 for _ in iterable)", "def cursor_nelements(cursor):\n\tcount = 0\n\tfor data in cursor:\n\t\tcount += 1\n\treturn count", "def get_number_of_inversions_naive(self, lst):\r\n # Running time: O(n ** 2)\r\n count_inv = 0\r\n \r\n for i in range(len(lst)):\r\n for j in range(i+1, len(lst)):\r\n if lst[i] > lst[j]:\r\n count_inv += 1\r\n \r\n return count_inv", "def count(seq):\n\treturn sum(1 for x in seq)", "def __count_inversions(puzzle):\n puzzleLength = len(puzzle)\n count = 0\n for i in range(puzzleLength):\n for j in range(i + 1, puzzleLength):\n if(puzzle[i] > puzzle[j]):\n count += 1\n return count", "def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def index(self, elem):\n return _coconut.len(self.iter) - self.iter.index(elem) - 1", "def invserion_count(board : list) -> int:\n inv_count = 0\n board_len = len(board)\n for i in range(board_len):\n for j in range(i+1,board_len):\n if board[i] and board[j] and board[i] >= board[j]:\n inv_count += 1\n return inv_count", "def size(self):\n traverse = self.head\n count = 0\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count + 1", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur" ]
[ "0.710874", "0.67030925", "0.65069044", "0.6468267", "0.6360811", "0.6293502", "0.62844634", "0.6255483", "0.62217087", "0.6204283", "0.61864936", "0.6095033", "0.6027348", "0.5992894", "0.5981212", "0.59621656", "0.59607416", "0.5958107", "0.59503806", "0.593788", "0.59354264", "0.59248734", "0.59003836", "0.5895588", "0.5893935", "0.5893935", "0.58929145", "0.58881074", "0.5884157", "0.58774316" ]
0.70186836
1
Find the index of elem in the reversed iterator.
def index(self, elem): return _coconut.len(self.iter) - self.iter.index(elem) - 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def r_index(sequence, element):\n\n for i, e in enumerate(reversed(sequence)):\n if element == e:\n return len(sequence) - 1 - i\n else:\n raise ValueError(\"r_index(sequence, element):\\\n element not in the sequence\")", "def index(self, elem):\n pointer = self.head\n i = 0\n while (pointer):\n if pointer.data == elem:\n return i\n pointer = pointer.next\n i += 1\n raise ValueError(\"{} is not in list\".format(elem))", "def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)", "def index(self, elem):\n ponteiro = self.inicio\n i = 0\n while(ponteiro):\n if ponteiro.dado == elem:\n return i\n ponteiro = ponteiro.prox\n i = i + 1\n raise ValueError(\"{} is not in list\".format(elem))", "def reverse_linear_search(lst, value):\n i = len(lst) - 1\n while i != -1 and lst[i] != value:\n i = i + 1\n if i == -1:\n return -1\n else:\n return i", "def find_index(arr, pred):\n for index, elem in enumerate(arr):\n if pred(elem):\n return index\n return -1", "def get_right_index(i):\n pos = i + 1\n right_pos = 2 * pos + 1\n right_index = right_pos - 1\n return right_index", "def findlastindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)", "def search(elements_list, element):\n for index, item in enumerate(elements_list):\n if item == element:\n return index\n return -1", "def index(self, elem):\n if elem not in self:\n raise _coconut.ValueError(_coconut.repr(elem) + \" not in \" + _coconut.repr(self))\n return (elem - self.start) // self.step if self.step else 0", "def get_element_index(el, elements):\n for idx, element in enumerate(elements):\n diff = torch.sum(torch.abs(el - element))\n if diff.item() < 1e-8:\n return idx\n return None", "def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1", "def index(self, item: Any) -> int:\n index_so_far = 0\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return index_so_far\n index_so_far += 1\n curr = curr.next\n raise ValueError", "def last_index(self, item):\n return _(self.size()._ - 1 - self.reverse().index(item)._)", "def f_index(self, substring, direction=[]):\n substr = self.value(substring)\n if \"back\" in direction:\n pos = self._val.rfind(substr)\n else:\n pos = self._val.find(substr)\n\n return pos + 1", "def get_index(self, child):\n for _index, item in enumerate(self.children):\n if item == child:\n return _index\n\n return -1", "def element_index(self):\n return self._index", "def find(self, data):\n index = 0\n current = self.head\n while current:\n if current.data == data:\n return index\n index += 1\n current = current.next\n\n return -1", "def indexMatching(seq, condition):\n for i,x in enumerate(seq):\n if condition(x):\n return i\n return -1", "def search_for_nums(data):\n index = None\n for i in range(len(data)-1,0, -1): #count backwards through the loop\n if data[i] != None: #found most recent input\n print(\"index found...data: %s\" % (data[i]))\n return i\n #END IF\n #END FOR\n return index", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def index(self, item):\n \"\"\"\n :type item: Node\n :rtype int\n \"\"\"\n curr = self.head\n idx = 0\n while curr:\n if item == curr.getData():\n break\n idx += 1\n curr = curr.getNext()\n return idx", "def edge_index(indexed_triangle, edge):\n for i in range(3):\n triangle_edge = indexed_triangle[(i + 1) % 3], indexed_triangle[(i + 2) % 3]\n if triangle_edge == edge:\n return i\n triangle_edge = triangle_edge[1], triangle_edge[0]\n if triangle_edge == edge:\n return i\n # Edge not found in triangle\n assert False", "def find_index(numbers, element):\n index = 0\n for item in numbers:\n if element != item:\n index += 1\n elif element == item:\n return index", "def _find_position(self, element):\n walk = self._data.first()\n while walk is not None and walk.element()._value != element:\n walk = self._data.after(walk)\n return walk", "def index(self, item):\n\t\ti = 0\t\t\n\t\tif not self.len:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\treturn i\n\t\tactual = self.prim\n\t\twhile actual and actual.dato != item:\n\t\t\tactual = actual.prox\n\t\t\ti += 1\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\treturn i", "def index(self, data):\n\n traverse = self.head\n index = 0\n while traverse.next != None:\n\n if traverse.data == data:\n return index\n traverse = traverse.next\n index += 1\n\n if traverse.data == data:\n return index", "def find_index(draw, urn):\n i = 0\n while draw >= 0:\n draw -= urn[i]\n i+=1\n return i-1", "def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index", "def index(self, pos):\n for i, n in enumerate(self):\n if i == pos: return n\n raise Exception('Index out of bounds.')" ]
[ "0.7337428", "0.7109907", "0.67639583", "0.6708626", "0.66251403", "0.6604515", "0.6553324", "0.65205306", "0.6481329", "0.6373556", "0.6335034", "0.6285708", "0.6278031", "0.6270351", "0.62547594", "0.6241182", "0.62061155", "0.6160364", "0.6146057", "0.61259276", "0.61254555", "0.61192966", "0.61157286", "0.610275", "0.61026233", "0.6048159", "0.6036537", "0.60223585", "0.5985957", "0.59512293" ]
0.7336818
1
consume(iterable, keep_last) fully exhausts iterable and return the last keep_last elements.
def consume(iterable, keep_last=0): return _coconut.collections.deque(iterable, maxlen=keep_last)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last(iterable):\n d = deque(iterable, maxlen=1)\n try:\n return d.pop()\n except IndexError:\n raise ValueError(\"Cannot return last item from empty iterable {!r}\".format(iterable))", "def last(iterable):\n it = iter(iterable)\n item = next(it)\n for item in it:\n pass\n return item", "def return_last(iter):\n for thing in iter:\n pass\n return thing", "def last(iterator):\n item = None\n for item in iterator:\n pass\n return item", "def consume(iterator):\n deque(iterator, maxlen=0)", "def last(seq):\n try:\n return seq[-1]\n except TypeError:\n old = None\n it = iter(seq)\n while True:\n try:\n old = next(it)\n except StopIteration:\n return old", "def last(iterable, *default):\n\tassert len(default) <= 1\n\titerable = iter(iterable)\n\n\ttry:\n\t\tx = next(iterable)\n\texcept StopIteration:\n\t\tif default:\n\t\t\treturn default[0]\n\t\traise\n\n\tfor x in iterable:\n\t\tpass\n\treturn x", "def tail(self, *, peek=False, ignore_no_item_found=False, only_new_items=False):\n with self._buffer.reader_lock() as reader:\n if only_new_items:\n try:\n item = reader.find_last_gt(self.last_key)\n except ValueError as e: # ValueError: No item found with key above: self.last_key\n if ignore_no_item_found:\n return None\n raise e\n except TypeError as e:\n # TypeError: '>' not supported between instances of type(key) and 'NoneType'\n if self.last_item is None: # first time reading a value from buffer\n item = reader[-1]\n else:\n raise e\n else:\n try:\n item = reader[-1]\n except IndexError as e: # IndexError: deque index out of range\n if ignore_no_item_found:\n return None\n raise e\n if not peek:\n self.last_item = item\n return item", "def getLatest(self, limit=None):\n count = len(self)\n if limit is None or limit > count:\n limit = count\n if not limit:\n return []\n return list(self[-limit:])", "def take_last(\n count: int,\n) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:\n\n def _take_last(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:\n async def subscribe_async(aobv: AsyncObserver[_TSource]) -> AsyncDisposable:\n safe_obv, auto_detach = auto_detach_observer(aobv)\n queue: List[_TSource] = []\n\n async def asend(value: _TSource) -> None:\n queue.append(value)\n if len(queue) > count:\n queue.pop(0)\n\n async def aclose() -> None:\n for item in queue:\n await safe_obv.asend(item)\n await safe_obv.aclose()\n\n obv = AsyncAnonymousObserver(asend, safe_obv.athrow, aclose)\n return await pipe(obv, source.subscribe_async, auto_detach)\n\n return AsyncAnonymousObservable(subscribe_async)\n\n return _take_last", "def tail(iterable, n):\n if n <= 0:\n return []\n return list(deque(iterable, maxlen=n))", "def pick(iterable):\n for element in iterable:\n yield element\n while True:\n yield element", "def peek_rear(self):\n if self.items:\n return self.items[-1]\n return None", "def until_last(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[:matches[-1]+1])", "def dropwhile(iterable, predicate):\n return iter(it.dropwhile(predicate, iterable))", "def take_while(coll, func):\n i = 0\n while i < len(coll) and func(coll[i]):\n i += 1\n return coll[:i]", "def butlast(mylist):\n # This returns a copy of mylist\n return mylist[:-1]", "def take(num, iterable):\n return list(islice(iterable, num))", "def get_only(seq: Iterable[T]) -> T:\n it = iter(seq)\n try:\n first_element = it.__next__()\n # we use the sentinel approach rather than the usual (evil) Python \"attempt can catch the\n # exception\" approach to avoid raising zillions of spurious exceptions on the expected\n # code path, which makes debugging a pain\n sentinel = object()\n second_element = next(it, sentinel)\n if second_element is sentinel:\n return first_element\n else:\n got_msg: str\n if isinstance(seq, Sized):\n got_msg = str_list_limited(seq, limit=10)\n else:\n got_msg = f\"{first_element!r}, {second_element!r}, and possibly more.\"\n raise ValueError(f\"Expected one item in sequence but got {got_msg}\")\n except StopIteration:\n raise ValueError(\"Expected one item in sequence but got none\")", "def PeekIterable(iterable):\n try:\n head_element = iterable.next()\n new_iterable = itertools.chain([head_element], iterable)\n return head_element, new_iterable\n except StopIteration:\n return None, iterable", "def consume(iterator, n=None, next=next, islice=islice, deque=deque):\n if n is not None:\n next(islice(iterator, n, n), None)\n else:\n exhaust(iterator)", "def peek(iterable, size=1):\r\n objs = []\r\n for _ in range(size):\r\n try:\r\n obj = next(iterable)\r\n except StopIteration:\r\n break\r\n objs.append(obj)\r\n return objs, itertools.chain(objs, iterable)", "def test_that_peekleft_returns_tail_from_list_of_iterables(iterable):\n from deque import Deque\n new_deque = Deque(iterable)\n assert new_deque.peekleft() == new_deque.popleft()", "def consume(\n self,\n count: int = 1,\n ) -> List[T]:\n return list(islice(self, count))", "def peek_last(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n return self.tail.data", "def get_last(self, limit = 1):\n if len(self.data) == 0:\n return None\n self.sort_and_reduce()\n if len(self.data) < limit:\n limit = len(self.data)\n\n return self.data[-limit:][0]", "def take_max(self):\n return self.delete_first()", "def first_every_last(iterable, first, every, last):\n did_first = False\n for item in iterable:\n if not did_first:\n first(item)\n every(item)\n if did_first:\n last(item)", "def test_deque_works_on_diff_iterables_head_is_last_val_pushed_after_pop(iterable):\n from deque import Deque\n new_deque = Deque(iterable)\n new_deque.pop()\n assert new_deque._container.head.val == iterable[-2]", "def drop(iterable, n, islice=islice):\n return islice(iterable, n, None)" ]
[ "0.65945584", "0.6441955", "0.6378709", "0.6298682", "0.62027675", "0.5920305", "0.58600926", "0.56532353", "0.5636033", "0.5536152", "0.54646903", "0.5434246", "0.5425633", "0.53895926", "0.5381128", "0.5375591", "0.5357746", "0.5334514", "0.5308047", "0.52714187", "0.5265742", "0.52291846", "0.52204406", "0.51856124", "0.51844627", "0.5172889", "0.5167413", "0.51408297", "0.51221836", "0.5112314" ]
0.7717289
0
Construct an object of the given data_type containing the given arguments.
def makedata(data_type, *args): if _coconut.hasattr(data_type, "_make") and _coconut.issubclass(data_type, _coconut.tuple): return data_type._make(args) if _coconut.issubclass(data_type, (_coconut.map, _coconut.range, _coconut.abc.Iterator)): return args if _coconut.issubclass(data_type, _coconut.str): return "".join(args) return data_type(args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, data_type=None):\n self.type = data_type", "def from_data(cls, data):\n return object.__new__(cls)", "def create(cls, data=None):\n # allow create() calls with no input\n if not data:\n data = {}\n\n return cls(**data)", "def __init__(self,\n type_id: int,\n data,\n data_type: DataType = DataType.AUTODETECT,\n length=-1):\n if type_id < 0 or 255 < type_id:\n raise ValueError('The type_id parameter must between 0 and 255 but is {val}'.format(val=type_id))\n self.type_id = type_id\n self.data_type = data_type\n self.data = data\n self.length = length", "def _create_Data(classname, typename):\n attributes = {\"typename\": typename}\n if \"complex\" in typename:\n def format_sampler(self, val):\n \"\"\"Format surrounded by [] for the sampler.\n\n 2x space (for real and complex parts).\n \"\"\"\n if isinstance(val, int):\n return \"[%s]\" % (2 * val)\n return val\n attributes[\"format_sampler\"] = format_sampler\n globals()[classname] = type(classname, (Data,), attributes)", "def build(cls, data, _type): # type: ignore[no-untyped-def]\n if isinstance(data, list):\n backing = Int32Field(cls._convert_to_int(data))\n elif isinstance(data, Int32Field):\n backing = data\n elif isinstance(data, float):\n backing = Int32Field(int(data * sensor_fixed_point_conversion))\n else:\n backing = Int32Field(data)\n as_int = int(backing.value)\n if isinstance(_type, SensorTypeField):\n _converted_type = SensorType(_type.value)\n else:\n _converted_type = _type\n return cls(backing, as_int, _converted_type)", "def __init__(self, *args):\n if len(args) == 1 and isinstance(args[0], str):\n self._data = tuple(int(i) for i in str(args[0]).split(\".\"))\n elif len(args) == 1 and isinstance(args[0], Iterable):\n self._data = tuple(int(i) for i in args[0])\n else:\n self._data = tuple(int(i) for i in args)", "def __init__(self, typt_type: Type, data: str, *args, **kwargs):\n self.typt_type = typt_type\n self.data = data\n\n super().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n if not args:\n raise TypeError('Field definition incorrect, please provide type')\n elif not isinstance(args[0], type):\n raise TypeError('Field input not a type')\n self.data_type = args[0]\n if ((self.data_type not in self.allowed_types and\n not issubclass(self.data_type, self.allowed_types))):\n raise TypeError('Field input type %s is not allowed' % self.data_type)\n self.check_kwargs(kwargs, self.data_type)\n # attributes\n if 'auto_update' in kwargs and kwargs['auto_update']:\n self.auto_update = self.data_type.utcnow # datetime.datetime\n if 'document_class' in kwargs and kwargs['document_class']:\n self.document_class = kwargs['document_class']\n self.validator = self.generate_validator(self.data_type, **kwargs)\n self.required = kwargs['required'] if 'required' in kwargs else True\n if 'default' in kwargs:\n self.default_value = kwargs['default']\n if not callable(self.default_value):\n validation_failed = False\n try:\n self.validator(self.default_value)\n except ValidationError as e:\n new_err = ('default value \"%s\"' % kwargs['default']) + ''.join(e.args)\n validation_failed = True\n if validation_failed:\n raise TypeError(new_err)\n # check if dict/list type and wrap copy in callable\n if isinstance(self.default_value, (dict, list)):\n def default_value_wrapper():\n return copy.deepcopy(kwargs['default'])\n self.default_value = default_value_wrapper", "def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data", "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def __new__(cls,name,description,args_in,required=True,data_type=None,schema=None):\n mydict={\n \"name\":name,\n \"description\":description,\n \"in\":args_in,\n \"required\":required,\n \"schema\":schema,\n \"type\":data_type,\n }\n if args_in!=\"body\":\n mydict[\"type\"]=data_type\n return mydict", "def from_data(cls,data):\n\n new_object = cls() # Only this line needs to be updated\n new_object.data = data\n\n return new_object", "def _instantiate(clz, **data):\n\n new_obj = clz()\n setattr(new_obj, \"data\", data)\n for key, val in deepcopy(data).items():\n setattr(new_obj, key, val)\n return new_obj", "def from_data(cls, data):\n self = object.__new__(cls)\n self.required = parse_required(data)\n self.title = parse_title(data)\n self.type = parse_type(data)\n self.values = parse_values(data)\n return self", "def load(cls, data):\n return cls(**data)", "def __init__(__self__, *,\n data_type: pulumi.Input['AssetModelDataType'],\n logical_id: pulumi.Input[str],\n name: pulumi.Input[str],\n type: pulumi.Input['AssetModelPropertyTypeArgs'],\n data_type_spec: Optional[pulumi.Input['AssetModelDataTypeSpec']] = None,\n unit: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"data_type\", data_type)\n pulumi.set(__self__, \"logical_id\", logical_id)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"type\", type)\n if data_type_spec is not None:\n pulumi.set(__self__, \"data_type_spec\", data_type_spec)\n if unit is not None:\n pulumi.set(__self__, \"unit\", unit)", "def createData(self, address: ghidra.program.model.address.Address, datatype: ghidra.program.model.data.DataType) -> ghidra.program.model.listing.Data:\n ...", "def __init__(self, data, time_unit, metadata=None):\r\n # Check that sensible time units were given\r\n if time_unit not in time_unit_conversion:\r\n raise ValueError('Invalid time unit %s, must be one of %s' %\r\n (time_unit, time_unit_conversion.keys()))\r\n\r\n #: the data is an arbitrary numpy array\r\n self.data = np.asanyarray(data)\r\n self.time_unit = time_unit\r\n\r\n # Every instance carries an empty metadata dict, which we promise never\r\n # to touch. This reserves this name as a user area for extra\r\n # information without the danger of name clashes in the future.\r\n if metadata is None:\r\n self.metadata = {}\r\n else:\r\n self.metadata = metadata", "def __init__(self, type, data):\n # Check type is type int\n if not isinstance(type, int):\n raise TypeError(\"ext type is not type integer\")\n # Check data is type bytes\n elif sys.version_info[0] == 3 and not isinstance(data, bytes):\n raise TypeError(\"ext data is not type \\'bytes\\'\")\n elif sys.version_info[0] == 2 and not isinstance(data, str):\n raise TypeError(\"ext data is not type \\'str\\'\")\n self.type = type\n self.data = data", "def create(self, data):\n raise NotImplementedError", "def __init__(self, type_name, args):\n super().__init__()\n self.type_name = type_name\n self.args = args\n self._projection = None", "def create(self, cls, data=None):\n return cls(self, initial_data=data)", "def __init__(self, *args, **kwargs):\n nargs = len(args) + len(kwargs)\n if nargs == 0:\n raise TypeError(\"one or more arguments required (0 given)\")\n \n first_arg = args[0]\n if isinstance(first_arg, str):\n if nargs > 2 or (nargs > 1 and \"quiet\" not in kwargs):\n raise TypeError(\n \"incorrect arguments for creating Dta from file\"\n )\n self._new_from_file(*args, **kwargs)\n elif isinstance(first_arg, Dta):\n if nargs > 3:\n raise TypeError(\n \"too many arguments to create Dta from existing Dta\"\n )\n self._new_from_dta(*args, **kwargs)\n elif isinstance(first_arg, collections.Iterable):\n self._new_from_iter(*args, **kwargs)\n else:\n raise TypeError(\"Dta cannot be created from these arguments:\")", "def __new__(subtype,parent,name,typecode,dimensions,**kwds):\n if 'values' in kwds.keys():\n result=kwds.pop('values')\n else:\n shape=[]\n for d in dimensions:\n dim = parent.dimensions[d]\n\n # Adding support for netCDF3 dimension objects\n if not isinstance(dim, int):\n dim = len(dim)\n shape.append(dim)\n\n result=np.zeros(shape,typecode)\n \n result=result[...].view(subtype)\n\n result.typecode = lambda: typecode\n result.dimensions = tuple(dimensions)\n result._ncattrs = ()\n for k,v in kwds.items():\n setattr(result,k,v)\n return result", "def _create_dataclass(obj, plain_dict=False):\n if plain_dict:\n items = obj\n name = \"Obj\"\n else:\n name = obj[\"class_name\"]\n items = obj[\"data\"]\n\n cls = dataclasses.make_dataclass(name, items.keys())\n return cls(**items)", "def __init__(self, *args, name=''):\n from collections import Iterable\n if len(args) == 1:\n if isinstance(args[0], Point):\n self.data = args[0].data.copy()\n elif isinstance(args[0], Iterable):\n self.data = list(args[0])\n else:\n self.data = list(args)\n self.name = name if not name.isspace() else ''", "def _make_cpp_data(id, timestamp, instrument, exchange, data):\n return DataCpp(id, timestamp, instrument, exchange, data)", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def __init__(self, name, data_type, functional=False, locking=True, indexed=False, unique=False):\r\n self.name = name\r\n self.data_type = data_type\r\n self.functional = functional\r\n self.locking = locking\r\n self.indexed = indexed\r\n self.unique = unique" ]
[ "0.65806115", "0.6494723", "0.6493428", "0.6436474", "0.63050216", "0.61820936", "0.6163499", "0.61415094", "0.60745573", "0.6070686", "0.6051727", "0.60087764", "0.6002497", "0.5975732", "0.5947289", "0.58672804", "0.5859592", "0.58576393", "0.5827365", "0.5820931", "0.5812757", "0.58077604", "0.57782376", "0.57708186", "0.57500553", "0.5722772", "0.5706164", "0.5698229", "0.5673332", "0.5646855" ]
0.7436465
0
fmap(func, obj) creates a copy of obj with func applied to its contents. Override by defining obj.__fmap__(func).
def fmap(func, obj): if _coconut.hasattr(obj, "__fmap__"): return obj.__fmap__(func) if obj.__class__.__module__ == "numpy": from numpy import vectorize return vectorize(func)(obj) return _coconut_makedata(obj.__class__, *(_coconut_starmap(func, obj.items()) if _coconut.isinstance(obj, _coconut.abc.Mapping) else _coconut_map(func, obj)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fmap(function, descriptor):\n return MappedDescriptor(descriptor, function)", "def map(self, func):\n return _(map(func, self._))", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def fmap(self, func):\n @wraps(self.v)\n def state_mapper(state, func=func, runner=self):\n result, new_state = runner(state)\n return (func(result), state)\n\n return State(state_mapper)", "def imap(self, func: Callable[[T], V]) -> '_[V]':\n return _(map(func, self.array))", "def map(self, func):\n if self.is_right(): return self.right.map(func)\n if self.is_left(): return self.left.map(func)", "def map(self, function):\n pass", "def __call__(self, func, *args, **kwds):\r\n results = self.map(func, *args, **kwds)\r\n if results:\r\n return results[0]", "def methdispatch(func): \n dispatcher = singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, func)\n return wrapper", "def methdispatch(func):\n # ref:\n # https://stackoverflow.com/questions/24601722/how-can-i-use-functools-singledispatch-with-instance-methods\n dispatcher = singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, func)\n return wrapper", "def map(self, func, inplace=True):\n # only leaves have to be adapted\n new_leaves = [func(l) for l in self.leaves]\n if inplace:\n self.leaves = new_leaves\n return self\n else:\n return Structure(struct=self.struct, leaves=new_leaves)", "def pfmap(func, workers=8):\n return fmap(func)", "def transform(self, func):\n return func(self)", "def imap_c(func):\n return functools.partial(imap, func)", "def map(self, func):\n return List(map(func, self))", "def map_collection(func, collection):\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection", "def map(self, obj):\n if isinstance(obj, np.ndarray) and obj.ndim >= 2 and obj.shape[0] in (2,3):\n return fn.transformCoordinates(self, obj)\n else:\n return QtGui.QMatrix4x4.map(self, obj)", "def map(self, fn, inv_fn):\r\n\t\treturn MapProjectedList(self, [fn], [inv_fn])", "def lift(func: Callable) -> Callable:\n return lambda f: compose2(func, f)", "def apply(native_object, function, *args, **kwargs):\n result = unwrap(function(wrap(native_object), *args, **kwargs))\n return result", "def __call__(self, f):\r\n return self.apply(f, None)", "def _map_fn(self):\n raise NotImplementedError", "def applymap(self, func, *args, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.applymap)(\n self, func, *args, **kwargs\n )", "def flat_map(fn, collection):\n return chain.from_iterable(map(fn, collection))", "def map(self, f):\n self.append(Mapper(f))\n return self", "def Map(\r\n data,\r\n map_fct: Callable,\r\n info: List[Dict] = None,\r\n lazy: bool = True,\r\n workers: int = 1,\r\n buffer_len: int = 3,\r\n *arg: list,\r\n **kwargs: Dict\r\n) -> Union[MapAbstract, DataAbstract, np.ndarray, list]:\r\n\r\n if lazy:\r\n return MapAbstract(data, map_fct, *arg, info=info, **kwargs)\r\n else:\r\n return DataAbstract(\r\n MapAbstract(data, map_fct, *arg, info=info, **kwargs),\r\n workers=workers,\r\n buffer_len=buffer_len,\r\n )[:]", "def mapf( f, C ):\n return (f(x) for x in C)", "def map(self, mapper):\n def _map(iterator):\n return mapper(next(iterator))\n return self.__class__(self, _map)", "def map_method(self, filter_func, method_name, *args, **kwds):\r\n return self.map(filter_func, self._call_extension_method,\r\n method_name, *args, **kwds)", "def singledispatchmethod(func):\n dispatcher = functools.singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[0].__class__).__func__(*args, **kw)\n wrapper.register = dispatcher.register\n functools.update_wrapper(wrapper, func)\n return wrapper" ]
[ "0.6220919", "0.5839332", "0.5682536", "0.56519437", "0.55576444", "0.5554828", "0.54123265", "0.53920573", "0.5390306", "0.53785086", "0.5342291", "0.5333462", "0.53316593", "0.532099", "0.526166", "0.52036095", "0.52029043", "0.51939046", "0.5186709", "0.51471406", "0.50727075", "0.5064578", "0.50631595", "0.5056027", "0.5044718", "0.5043015", "0.5027963", "0.4981475", "0.4974079", "0.49624532" ]
0.80145234
0
Thread loop. This is an infinite loop. The iter method calls self.sql_queue.get() which blocks if there are not values in the queue. As soon as values are placed into the queue the process will continue. If many executes happen at once it will churn through them all before calling commit() to speed things up by reducing the number of times commit is called.
def run(self): logging.debug("run: Thread started") execute_count = 0 for token, query, values in iter(self.sql_queue.get, None): logging.debug("sql_queue: %s", self.sql_queue.qsize()) if token != self.exit_token: logging.debug("run: %s", query) self.run_query(token, query, values) execute_count += 1 # Let the executes build up a little before committing to disk # to speed things up. if self.sql_queue.empty() \ or execute_count == self.max_queue_size: logging.debug("run: commit") self.sqlite3_conn.commit() execute_count = 0 pass # exit if # Only exit if the queue is empty. Otherwise keep getting # through the queue until it's empty. if self.exit_set and self.sql_queue.empty(): self.sqlite3_conn.commit() self.sqlite3_conn.close() self.thread_running = False return pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\r\n num_workers = g.num_query_queue_workers\r\n wq = WorkQueue(num_workers = num_workers)\r\n wq.start()\r\n\r\n while True:\r\n job = None\r\n #limit the total number of jobs in the WorkQueue. we don't\r\n #need to load the entire db queue right away (the db queue can\r\n #get quite large).\r\n if len(running) < 2 * num_workers:\r\n with running_lock:\r\n iden, pickled_cr = get_query()\r\n if pickled_cr is not None:\r\n if not iden in running:\r\n running.add(iden)\r\n job = make_query_job(iden, pickled_cr)\r\n wq.add(job)\r\n\r\n #if we didn't find a job, sleep before trying again\r\n if not job:\r\n time.sleep(1)", "def worker(self, queue):\n with sa.create_engine(dsn).connect() as dbcon:\n while True:\n if queue.qsize() == 0:\n sleep(1)\n if queue.qsize() == 0:\n break\n continue\n item = queue.get()\n try:\n if hash(item['title']) in self.exist_products:\n dbcon.execute(Product.update().values(**item).where(Product.c.id == self.get_id(item)))\n else:\n result = dbcon.execute(Product.insert().values(**item))\n self.exist_products[hash(item['title'])] = result.inserted_primary_key[0]\n except Exception as e:\n print(type(e), e)", "def _run(self) -> None:\n while True:\n args: MigrationArgs = self._queue.get(block=True)\n with self._lock:\n if args.collection in self._chunks:\n if args.shard_key not in self._chunks[args.collection]:\n self._split_chunk(args.collection, args.shard_key)\n self._move_chunk(args)", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def run(self):\n # First thing, create a MySQL connection for this thread\n self.connect()\n\n # Start thread's ioloop\n while self.conn.running:\n result = None\n error = None\n cursor = None\n try:\n # Get next task from queue\n task = self.conn.queue.get(True)\n # Handle special abort command\n if task['command'] == 'abort':\n self.conn.queue.put(task)\n break\n\n # Ignore Transactions which are not this thread's\n tx_id = task.get('tx_id')\n if tx_id is not None:\n if tx_id != self.name:\n # Put task request back into queue and wait again\n self.conn.queue.put(task)\n continue\n\n # Handle transactions\n if task['command'] == '*begin-tx*':\n if self.in_tx:\n # Already attending a transaction, return request to queue\n self.conn.queue.put(task)\n continue\n else:\n # Signal this Thread will handle the Transaction!\n self.in_tx = True\n result = self.name\n elif task['command'] == '*end-tx*':\n if self.in_tx and task['tx_id'] == self.name:\n # This is our signal to stop attending this transaction\n self.in_tx = False\n else:\n # Not attending a transaction or it's not our transaction. Either way, ignore request\n self.conn.queue.put(task)\n continue\n else:\n # Get a DB cursor and execute query (at most 3 times!)\n retries = 3\n while retries > 0:\n try:\n cursor = self.db.cursor()\n rows_affected = cursor.execute(task['query'], task.get('args'))\n error = None\n break\n except (AttributeError, MySQLdb.OperationalError) as e:\n retries -= 1\n error = e\n cursor = None\n self.connect()\n except Exception as e:\n if cursor is not None:\n cursor.close()\n error = e\n break\n\n if error is None:\n # Determine result\n if task['command'] == 'select':\n # for a SELECT, we want the resultset\n result = list(cursor.fetchall())\n if len(result) == 0:\n result = None\n elif task['command'] == 'insert':\n # for an INSERT, we want the new ID\n result = cursor.lastrowid\n else:\n # for everything else, we'll be fine with rows_affected\n result = rows_affected\n else:\n if retries == 0:\n raise Exception('Failed 3 reconnection attempts to MySQL server: {0}'.format(e))\n except Exception as e:\n error = e\n finally:\n # Make sure we close the DB cursor!\n if cursor is not None:\n cursor.close()\n\n # Send result to the query's request-ee\n self.conn._send_result(task, result, error)\n\n # No more tasks. Close connection\n self.disconnect()", "def run(self):\n _LOGGER.info(\"Started.\")\n\n ExecutorThread.local_thread.executor_object = self\n self.__persister = _persistence.MySQLPersister()\n _persistence.PersistentMeta.init_thread(self.__persister)\n\n procedure = None\n while True:\n if procedure is None or procedure.is_complete():\n procedure = self._next_procedure(procedure)\n _LOGGER.debug(\"Executing procedure (%s).\", procedure)\n if procedure is None:\n break\n\n self.__job = self.__queue.get()\n _LOGGER.debug(\"Reading next job from queue, found %s.\",\n self.__job)\n self.__job.execute(self.__persister, self.__scheduler, self.__queue)\n self.__queue.done()\n\n _persistence.PersistentMeta.deinit_thread()", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def run(self):\n # We defer creating the Couchbase object until we are actually 'in' the\n # separate process here.\n self._connect()\n\n while True:\n next_size = None\n (i, doc, size) = self.in_queue.get()\n # We use a \"magic\" null generator to terminate the workers\n if not doc:\n # Pass the death on...\n self.out_queue.put((i, doc, size))\n break\n # Actually perform the set.\n try:\n next_size = doc.next()\n value = self.buffer[:next_size]\n self._set_with_retry('doc_' + str(i), value)\n size = next_size\n except StopIteration:\n pass\n self.out_queue.put((i, doc, size))", "def processq(self):\n\n while True:\n command = None\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if any(q):\n command = q.pop(0)\n # remember q has now changed\n if not any(q):\n self.queuefile.unlink()\n else:\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()\n\n if command:\n self.execute(command)\n else:\n break", "def _watchdog(self):\n while True:\n try:\n # Arno, 2012-07-12: apswtrace detects 7 s commits with yield 5 min, so reduce\n yield 60.0\n\n # flush changes to disk every 1 minutes\n self._database.commit()\n\n except Exception:\n # OperationalError: database is locked\n dprint(exception=True, level=\"error\")\n\n except GeneratorExit:\n if __debug__: dprint(\"shutdown\")\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n self._database.commit(exiting = True)\n break", "def threadWorker(self):\n while True:\n row = self.queue.get() #get a row of data\n if row is None: #ending criterium\n break\n self.similarityQuestions(row) #the actual working function\n self.queue.task_done() #inform the queue one task is done", "def _db_execute(self, cur, sql_query):\n self.ctx.dbq_count += 1\n \n try:\n a = time.time()\n query, params = self._process_query(sql_query)\n out = cur.execute(query, params)\n b = time.time()\n except:\n if self.printing:\n print >> debug, 'ERR:', str(sql_query)\n if self.ctx.transactions:\n self.ctx.transactions[-1].rollback()\n else:\n self.ctx.rollback()\n raise\n\n if self.printing:\n print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))\n return out", "def load(self):\n while True:\n sqldata = self._runsql()\n if not sqldata: # No rows to process. Return for now\n return True\n\n status = self._load_elastic(sqldata)\n if status[1]:\n self.logger.error(\"Errors occurred : %s\" % status[1])\n # TODO: Should we quit(return False) here \n # since there are errors ?\n\n # This should be the remainder and nothing left after that\n # since we didn't exceed max rows\n if len(sqldata) < self.max_rows:\n self.logger.info(\"Finished inserting up to %d\" % self.seq)\n return True", "def _fetch_loop(self, conn: LoggingDatabaseConnection) -> None:\n i = 0\n while True:\n with self._event_fetch_lock:\n event_list = self._event_fetch_list\n self._event_fetch_list = []\n\n if not event_list:\n # There are no requests waiting. If we haven't yet reached the\n # maximum iteration limit, wait for some more requests to turn up.\n # Otherwise, bail out.\n single_threaded = self.database_engine.single_threaded\n if (\n not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING\n or single_threaded\n or i > EVENT_QUEUE_ITERATIONS\n ):\n return\n\n self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)\n i += 1\n continue\n i = 0\n\n self._fetch_event_list(conn, event_list)", "def _execute_deferred_queries(self):\n\n assert not self.__is_connected\n\n if not self._deferred_queries:\n return\n\n with Transaction(self.__database_name) as txn:\n while True:\n try:\n query = self._deferred_queries.popleft()\n txn.session.execute(query).close()\n except IndexError:\n break", "def _run(self) -> None:\n try:\n while True:\n loop_time = self._get_time()\n loop_time_flush_interval = self._get_time(self.flush_interval.total_seconds())\n\n if loop_time >= self.flushing_interval_deadline:\n self._flush_batch()\n self.flushing_interval_deadline = loop_time + loop_time_flush_interval\n self.logger.debug('Flush interval deadline. Flushed batch.')\n\n try:\n interval = self.flushing_interval_deadline - loop_time\n item = self.event_queue.get(True, interval)\n\n if item is None:\n continue\n\n except queue.Empty:\n continue\n\n if item == self._SHUTDOWN_SIGNAL:\n self.logger.debug('Received shutdown signal.')\n break\n\n if item == self._FLUSH_SIGNAL:\n self.logger.debug('Received flush signal.')\n self._flush_batch()\n continue\n\n if isinstance(item, UserEvent):\n self._add_to_batch(item)\n\n except Exception as exception:\n self.logger.error(f'Uncaught exception processing buffer. Error: {exception}')\n\n finally:\n self.logger.info('Exiting processing loop. Attempting to flush pending events.')\n self._flush_batch()", "def execute(self):\n for db_name in list(self.__dbs):\n try:\n self.__dbs[db_name].execute()\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n traceback_log = ''.join(line for line in lines)\n sys.stderr.write('For DB [ {} ]\\n\\tError:\\n {}'.format(db_name, traceback_log))\n self.__dbs = {}\n self.last_execution_time = time()", "def _doing(self, data):\n curr = self.conn.cursor()\n curr.executemany(self.sql, data)\n self.conn.commit()\n curr.close()", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def syncDBLoop(self):\n while 1:\n self.evSyncDB.wait()\n if self.bQuit == True:\n return\n \n self.evSyncDB.clear()\n self.db.syncDB()\n \n self.evAPI.set()", "def insert_many_execute(self) -> None:\n self.connection.isolation_level = None\n self.cursor.execute('BEGIN TRANSACTION')\n for i in self.__sql_buffer.split(';'):\n self.cursor.execute(i)\n self.__sql_buffer = \"\"\n self.cursor.execute('COMMIT')", "def _flush(self):\n tempbuf = self.databuffer\n self.databuffer = []\n self.database.runInteraction(self._executemany, tempbuf)", "def execute(cursor, query):\n while True:\n try:\n cursor.execute(query)\n break\n except Exception as e:\n print(\"Database query: {} {}\".format(cursor, query))\n print(\"Database retry reason: {}\".format(e))\n return cursor", "def execute(self):\n for move in self._queue:\n move.execute()", "def connection_work(\n conn: DBConn,\n conn_id: int,\n obj_list: List[DBObject],\n in_queue: multiprocessing.Queue, # type: ignore\n out_queue: multiprocessing.Queue, # type: ignore\n time_limit_sec: Optional[int],\n done_ctr: multiprocessing.Value,\n ) -> None:\n\n time.sleep(3)\n begin_ts = time.time()\n logger.info(\"[{}]: started thread\".format(conn_id))\n\n empty_cnt: int = 0\n\n while time_limit_sec is None or time.time() < begin_ts < time_limit_sec:\n try:\n txn = in_queue.get(timeout=2)\n except queue.Empty:\n empty_cnt += 1\n if empty_cnt > 3:\n break\n else:\n continue\n\n logger.info(\"[{}]: poped transaction {} (size = {})\".format(conn_id, txn.id, in_queue.qsize()))\n for hist_elem in process_txn(obj_list, conn, conn_id, txn):\n out_queue.put(hist_elem)\n logger.info(\"[{}]: finished transaction {}\".format(conn_id, txn.id))\n\n logger.info(\"[{}]: closing queue (size = {})\".format(conn_id, out_queue.qsize()))\n out_queue.close()\n with done_ctr.get_lock():\n done_ctr.value += 1\n logger.info(\"[{}]: finished thread (done ctr at {})\".format(conn_id, done_ctr.value))\n time.sleep(3)", "def run(self):\n assert self.queue is not None, \"Must specify queue or override run()\"\n\n while not self.terminated():\n qs = self.queue.objects.filter(status=self.queue.UNSUBMITTED,).order_by(\n \"-seq\"\n )[: django.conf.settings.DAEMONS_MAX_BATCH_SIZE]\n if not qs:\n self.sleep(django.conf.settings.DAEMONS_IDLE_SLEEP)\n continue\n\n for task_model in qs:\n try:\n self.do_task(task_model)\n task_model.status = self.queue.SUCCESS\n except AsyncProcessingIgnored:\n task_model.status = self.queue.IGNORED\n except Exception as e:\n if isinstance(e, AsyncProcessingRemoteError):\n # This is a bit messy. Do not log a trace when the\n # error is due to the remote service rejecting the request.\n # Such an error is still permanent for the task though.\n self.log.error(e)\n else:\n self.log.error('#' * 100)\n self.log.exception(f'Exception when handling task \"{task_model}\"')\n\n task_model.error = str(e)\n # if self.is_permanent_error(e):\n task_model.status = self.queue.FAILURE\n task_model.errorIsPermanent = True\n # raise\n else:\n task_model.submitTime = self.now_int()\n\n task_model.save()\n\n self.sleep(django.conf.settings.DAEMONS_BATCH_SLEEP)\n self.log.info(\"Exiting run loop.\")", "def run(self):\r\n while True:\r\n try:\r\n processor, iprot, oprot, otrans, callback = self.queue.get()\r\n if processor is None:\r\n break\r\n processor.process(iprot, oprot)\r\n callback(True, otrans.getvalue())\r\n except Exception:\r\n logging.exception(\"Exception while processing request\")\r\n callback(False, '')", "def run_async (self):\n if self.testing:\n return\n conn = Connection(self.db, self.host, self.user, self.passwd, True)\n conn.execute(self.sql)\n self.table = conn.fetch()", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)", "def _EventQueueWorker(self):\n while not self.stop:\n gevent.sleep(0)\n\n try:\n with Timer('task_process_time', False) as t:\n # Pull the next task off the queue.\n task, enqueue_times = model_provider.GetQueues().GetTask()\n\n # Retrieve the Tab Types for the NIDs so we know what handler to use.\n nid_to_type = model_provider.GetNames().GetTypes(task.keys())\n\n # Add the queued state parts to the main states.\n greenlets = [\n gevent.spawn(\n model_provider.GetStates().ReduceInto,\n nid, cid_to_sstate, nid_to_type[nid])\n for nid, cid_to_sstate in task.iteritems()]\n\n gevent.joinall(greenlets)\n\n LOG.info('Processed tasks %3dN %5dC %7.3fs' % (\n len(task), len(enqueue_times), t.Elapsed))\n\n model_provider.GetLatency().RecordForProcessed(enqueue_times)\n\n except Exception:\n LOG.error('Exception in Queue Worker loop')\n LOG.error(traceback.format_exc())\n gevent.sleep(1)" ]
[ "0.7164139", "0.6617921", "0.6452826", "0.6395609", "0.6328278", "0.62974733", "0.62549317", "0.6231184", "0.616941", "0.60640925", "0.6058464", "0.5961299", "0.588708", "0.5882663", "0.5875031", "0.5870313", "0.5865822", "0.5852583", "0.5845655", "0.5839005", "0.5820537", "0.58019185", "0.5799526", "0.5791952", "0.57594746", "0.57514244", "0.5749388", "0.57383335", "0.57342356", "0.57119244" ]
0.7875983
0
Get the query results for a specific token.
def query_results(self, token): delay = .001 while True: if token in self.results: return_val = self.results[token] del self.results[token] return return_val # Double back on the delay to a max of 8 seconds. This prevents # a long lived select statement from trashing the CPU with this # infinite loop as it's waiting for the query results. logging.debug("Sleeping: %s %s", delay, token) time.sleep(delay) if delay < 8: delay += delay pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listSearches(self, authenticationToken):\r\n pass", "async def _perform_get_results(self, login_token, result_token):\n data = {\"resultSetToken\": result_token, \"token\": login_token}\n return await self._perform_request(\"get-results\", data, lambda r: r.json())", "def get_query_results(QueryExecutionId=None, NextToken=None, MaxResults=None):\n pass", "def retrieve_results(self, token: str = '', measurement_id: str = ''):\n with open(self.config_file) as json_file:\n data = json.load(json_file)\n if token == '':\n token = data[self.server][self.license_key][self.user.email][\"user_token\"]\n\n if token == '':\n raise ValueError(\"No user token provided. Please log in.\")\n\n if not measurement_id or measurement_id == '':\n res = self.measurement.retrieve()\n else:\n res = self.measurement.retrieve(measurement_id=measurement_id)\n return res", "def getSearch(self, authenticationToken, guid):\r\n pass", "def listSearches(self, authenticationToken):\r\n self.send_listSearches(authenticationToken)\r\n return self.recv_listSearches()", "def getSearch(self, authenticationToken, guid):\r\n self.send_getSearch(authenticationToken, guid)\r\n return self.recv_getSearch()", "def query_results(self):\n return self.details[KEY_QUERY_RESULTS]", "def query(self):\n query_url = self.get_query_url()\n logging.info('Querying: ' + query_url)\n json_data = request.urlopen(query_url).read().decode()\n logging.debug('Retrieved the following ' + json_data)\n response = json.loads(json_data)\n\n return self.get_docs_from_response(response)", "def search(token, query):\n\n url = util.get_url() + f\"drive/root/search(q='{query}')\"\n response = util.rest(\"GET\", url, token)\n\n if response.status_code > 400:\n raise Exception(\"Error \", response.text)\n\n return json.loads(response.text), response.status_code", "def search_v1(query_tokens, inverted_index):\n return []", "def psirt_query(token):\n url = 'https://api.cisco.com/security/advisories/cvrf/latest/10'\n headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Bearer ' + token,\n }\n last_10_vulns = requests.get(url, headers=headers)\n logger.info('query response code = ' + str(last_10_vulns.status_code))\n logger.debug(last_10_vulns)", "def list_recommendations_by_next_token(self, token):\n return self.list_recommendations(next_token=token)", "def get(self, token):\n args = (token, )\n row = self.db_manager.execute_sql_and_fetchone(SQL_TOKEN_GET, args)\n if row:\n token_object = convert_db_row_to_dict(row, TOKEN_MODEL_FIELDS)\n else:\n token_object = {}\n return token_object", "async def _perform_query(self, query, login_token):\n data = {\"username\": self.user, \"query\": query.lower(), \"token\": login_token}\n return await self._perform_request(\"query\", data, lambda r: r.text())", "def getResults():", "def get(self, token):\n try:\n result = self.table.get_item(Key={\"token\": token})\n return result[\"Item\"]\n except Exception as e:\n raise self.RedirectDoesNotExist(f\"token: {token}; exception: {str(e)}\")", "def token_resources(token):\n\n headers = {\n 'accept': 'application/json',\n }\n\n url = 'https://reactome.org/AnalysisService/token/%s/resources' % token\n\n try:\n response = requests.get(url=url, headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.json()\n else:\n print('Status code returned a value of %s' % response.status_code)", "def multiQuery(self, query, limit):\n try:\n results = self.sp.search(query, limit)\n resultLists = results['tracks']['items']\n return resultLists\n except spotipy.SpotifyException as se:\n self.authenticate()\n return self.multiQuery(query, limit)", "def search(token, query):\n format_query = query.replace(\" \", \"%20\")\n url = 'https://api.thetvdb.com/search/series?name=' + format_query\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text)\n show_list = json_data.get('data')\n for show in show_list:\n if show.get('status') == 'Continuing':\n show_id = show.get('id')\n s = create_show(token, show_id)\n return s", "def get_results(self):\n return self.results", "def get_results(self):\n return self.results", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def get(self, url, token=None):\n return self.app.get(url,\n headers=_token_header(token))", "def _get_fresh_results(session, query_id, result_id):\n response = session.get('{}/api/queries/{}/results/{}.json'.format(REDASH_HOST, query_id, result_id))\n return response", "def get_results(self):\n\n return self.results_", "def get_results(self):\n return self._do_action_under_lock(self._get_all_results)", "def get_user_messages_by_token(token):\n session = get_session_by_token(token)\n if not session['success']:\n return session\n\n return get_user_messages_by_email(token, session['data']['user'])", "def get(self, url, token=None):\n headers = {}\n if token:\n headers = token_header(token)\n return self.app.get(url, headers=headers)", "def request(self, token):\n pass" ]
[ "0.6801665", "0.67875", "0.67327553", "0.62611675", "0.6151787", "0.6134605", "0.5989142", "0.596726", "0.5918041", "0.5917684", "0.5901937", "0.5862396", "0.5783096", "0.5776911", "0.57683307", "0.5756532", "0.5746983", "0.5745782", "0.571591", "0.57145137", "0.5698427", "0.5698427", "0.56116724", "0.5599927", "0.5571779", "0.55698764", "0.5563344", "0.55357367", "0.54741216", "0.5448918" ]
0.7163675
0
Returns the 2nd largest value from a given list.
def second_largest(values: List[int]) -> int: try: return sorted(set(values))[-2] except IndexError: raise ValueError("second_largest() needs at least two distinct values")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def two_largest(inlist):\n largest = second_largest = 0\n it1 = it2 = 0\n\n for i,item in enumerate(inlist):\n if item > largest:\n largest = item\n it1 = i\n elif largest > item > second_largest:\n second_largest = item\n it2 = i\n # Return the results as a tuple\n return largest, it1, second_largest, it2", "def second_largest(number_list):\n for i in range(len(number_list)):\n for j in range(len(number_list) - 1 - i):\n if number_list[j] > number_list[j+1]:\n number_list[j + 1], number_list[j] = number_list[j], number_list[j+1]\n\n return number_list[-2]", "def nth_largest2(a_list, n):\n a_list.sort()\n new_list = a_list[::-1]\n return new_list[n-1]", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def findSecondLargest(self):\n l = []\n self.flatten(l)\n print(l)\n print(l[-2])", "def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest", "def largest_number_at_least_twice_of_others2(nums: [int]) -> int:\n if len(nums) == 1:\n return 0\n\n max_index = nums.index(max(nums))\n max_val = nums.pop(max_index)\n next_max = max(nums)\n\n if next_max * 2 <= max_val:\n return max_index\n return -1", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def max_in_list(list):\n x=list[0] #set x be the first number in the list\n for i in range(0,len(list)):#go over the number in the list\n if x<=list[i]: #if the second one is bigger than the first\n x=list[i] #assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x", "def find_greatest_number(incoming_list):\n #return_value = max(incoming_list)\n #return return_value\n\n MAGIC_LOW_NUMBER = None\n retval = MAGIC_LOW_NUMBER\n\n # 1,2,3,4,5,1\n # MAGIC_LOW_NUMBER, 1 ->STORE 1\n #1 , 2 ->STORE 2\n #2, , 3 ->STORE 3\n #3, , 4 ->STORE 4 \n #4, , 5 ->STORE 5\n #5, , 1 ->??? nothing \n for value in incoming_list:\n if not retval:\n retval = value\n if value > retval:\n retval = value", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)", "def largest_item(list):\n pass", "def find_max(list):\n return find_value_at(list, 0)", "def find_max(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] > ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_max(ls[0:mid])\n m2 = find_max(ls[mid:])\n return m1 if m1 > m2 else m2", "def largest(n,xs):\n return sorted(xs, reverse = True)[:n][::-1]", "def return_max(lst, highest=None):\n if highest is None and len(lst) > 0:\n highest = lst[0]\n if len(lst) <= 1:\n return highest\n highest = max(highest, lst[0])\n return return_max(lst[1:], highest)", "def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)", "def nextMax(value,lista):\n for i in lista:\n if i>value:\n return i\n raise NameError('No value')", "def max(l):\n if l:\n s_list = sorted(l)\n return s_list[-1]\n else:\n raise ValueError(\"list empty\")", "def greatest_difference(num_list):", "def test_find_second_largest(self):\n secondLargestValue = sorted(self.values)[-2]\n valueFound = self.tree.findSecondLargest(self.tree.root)\n self.assertEquals(secondLargestValue, valueFound)", "def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)", "def max_val_rec(alist):\n ln = len(alist)\n mid = ln//2\n if ln > 2:\n left = max_val_rec(alist[:mid])\n right = max_val_rec(alist[mid:])\n return left if left > right else right\n else:\n return max(alist)", "def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])", "def get_max_loot(input_list):\n even = sum(input_list[::2])\n odd = sum(input_list[1::2])\n return even if even > odd else odd", "def largest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a > b, a, b)\r\n else:\r\n return max(stack(*args), axis=0)", "def recurrent_max_value_in_list(lst, max_value):\n if len(lst) == 0:\n return max_value\n elif lst[0] > max_value:\n max_value = lst[0]\n return recurrent_max_value_in_list(lst[1:], max_value)", "def maximum(some_list):\n return max(some_list)", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]" ]
[ "0.79893696", "0.7755604", "0.76443726", "0.75765264", "0.7529355", "0.75202054", "0.75019395", "0.746542", "0.7456516", "0.738916", "0.7388165", "0.733121", "0.7291496", "0.7259317", "0.7139178", "0.713446", "0.70853615", "0.7072863", "0.6986864", "0.6984842", "0.6957062", "0.6955505", "0.68840003", "0.6868519", "0.6793554", "0.6764099", "0.6757375", "0.67570126", "0.6728527", "0.67183864" ]
0.8168143
0
Try to get the path to pdb.py and return it in a list.
def GetPdbArgs(python): # Usually, python is /usr/bin/pythonxx and pdb is /usr/lib/pythonxx/pdb.py components = python.split('/') if len(components) >= 2: pdb_path = '/'.join(components[0:-2] + ['lib'] + components[-1:] + ['pdb.py']) if os.access(pdb_path, os.R_OK): return [pdb_path] # No pdb module found in the python path, default to -m pdb return ['-m', 'pdb']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def pdbfile_list():\n \n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def pdb_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\", \"pdb\")", "def get_scripts_already_ran(pgconn):\n\n cursor = pgconn.cursor()\n\n cursor.execute(textwrap.dedent(\"\"\"\n select script_path\n from rtapebbletest_schema_version\n \"\"\"))\n\n # Return these as a set, so lookups are a little faster.\n return {row.script_path for row in cursor}", "def get_breakpoint_files(self):\r\n return self.bpoints.values(key='filename')", "def fetchPDB(pdb_id):\n url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdb_id.split('.')[0]\n return urllib.urlopen(url).read()", "def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts", "def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)", "def fetchPDB(name, path):\n from Bio.PDB import PDBList\n pdbname = os.path.join(path,name+'.pdb')\n pdbl = PDBList()\n filename = pdbl.retrieve_pdb_file(name,pdir=path)\n os.rename(filename, pdbname)\n return", "def ReadPDB (self, pdb_path, db_path):\n\n ReadPDBFile (pdb_path, db_path)\t#", "def process_pdb(self, pdb_filename) :\n args = [self.command, pdb_filename]\n try :\n p = Popen(args, stdout=PIPE)\n (out,err) = p.communicate() \n except OSError :\n raise RuntimeError(\"Cannot communicate with STRIDE.\") \n return out", "def load_dbc_files(dbc_paths):\n import can_decoder\n from pathlib import Path\n\n db_list = []\n for dbc in dbc_paths:\n db = can_decoder.load_dbc(Path(__file__).parent / dbc)\n db_list.append(db)\n\n return db_list", "def read_confs():\n debugger = 'import pdb; pdb.set_trace()\\n'\n ignore = []\n\n home = expanduser('~')\n pdberc = home + '/.pdberc'\n\n confs = {}\n\n if not exists(pdberc):\n return {\n 'debugger': debugger,\n 'ignore': ignore,\n }\n\n with open(pdberc, 'r') as file:\n content = [line.strip() for line in file.readlines()]\n\n for line in content:\n if '=' in line and line.strip()[0] != '#':\n key, value = line.split('=')\n confs[key] = value\n\n if 'debugger' in confs:\n if confs['debugger'] == 'ipdb':\n debugger = 'import ipdb; ipdb.set_trace()\\n'\n\n if 'ignore' in confs:\n ignore = confs['ignore'].split(',')\n\n result = {\n 'debugger': debugger,\n 'ignore': ignore,\n }\n\n return result", "def load_pdblist(pdblist, addext = 0):\n\n\t#Load the pdblist, and convert to a list.\n\tlistfile = open(pdblist, 'r')\n\tpdbs = listfile.readlines()\n\t\n\tfor pdb in pdbs:\n\t\tpdbname = pdb.strip()\n\t\tif (addext):\n\t\t\tpdbname = pdb.strip() + '.pdb'\n\t\t\n\t\tcmd.load(pdbname)", "def getPdbInfo(self) -> ghidra.app.util.bin.format.pdb.PdbInfoIface:\n ...", "def get_python_args(fname, python_args, interact, debug, end_args):\n p_args = []\n if python_args is not None:\n p_args += python_args.split()\n if interact:\n p_args.append('-i')\n if debug:\n p_args.extend(['-m', 'pdb'])\n if fname is not None:\n if os.name == 'nt' and debug:\n # When calling pdb on Windows, one has to replace backslashes by\n # slashes to avoid confusion with escape characters (otherwise, \n # for example, '\\t' will be interpreted as a tabulation):\n p_args.append(osp.normpath(fname).replace(os.sep, '/'))\n else:\n p_args.append(fname)\n if end_args:\n p_args.extend(shell_split(end_args))\n return p_args", "def get_db_path():\n \n return(db_run.db_abs_path)", "def read_banfile():\n # matches stuff like\n # \"/GLOW/*\"\n # and extracts the stuff between the quotes\n regex = re.compile(r'^\\s*[\"](/[^\"]+)[\"]\\s*(?:$|[#])')\n bans = []\n\n try:\n with open(BAN_MAPFILE, \"r\", encoding=\"latin-1\") as filehandle:\n for line in filehandle:\n match = regex.match(line)\n if not match:\n continue\n else:\n bans.append(match.group(1))\n except EnvironmentError as err:\n if err.errno == errno.ENOENT:\n logging.getLogger(__name__).warning(\"%s not found - all mappings might fail!\", BAN_MAPFILE)\n else:\n raise\n\n return bans", "def lookup_module(filename):\r\n\r\n # stolen from pdb\r\n import os\r\n import sys\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return filename\r\n f = os.path.join(sys.path[0], filename)\r\n if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:\r\n return f\r\n root, ext = os.path.splitext(filename)\r\n if ext == '':\r\n filename = filename + '.py'\r\n if os.path.isabs(filename):\r\n return filename\r\n for dirname in sys.path:\r\n while os.path.islink(dirname):\r\n dirname = os.readlink(dirname)\r\n fullname = os.path.join(dirname, filename)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return None", "def db_file():\n return abspath('vmchecker.db')", "def get_attached_database_list(self, file=False):\n if self.isMSSQL():\n return [] # pragma: no cover\n else:\n cur = self._connection.cursor()\n cur.execute(\"PRAGMA database_list;\")\n res = cur.fetchall()\n cur.close()\n res = [r for r in res if r[1] != \"temp\" and r[1] != \"main\"]\n if file:\n return [(r[1], r[2]) for r in res]\n else:\n return [r[1] for r in res]", "def _extract_system_path(self, script):\r\n\r\n DEFAULT_PATH = ['code']\r\n\r\n # Separate paths by :, like the system path.\r\n raw_path = script.get('system_path', '').split(\":\") + DEFAULT_PATH\r\n\r\n # find additional comma-separated modules search path\r\n path = []\r\n\r\n for dir in raw_path:\r\n if not dir:\r\n continue\r\n\r\n # path is an absolute path or a path relative to the data dir\r\n dir = os.path.join(self.capa_system.filestore.root_path, dir)\r\n # Check that we are within the filestore tree.\r\n reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)\r\n if \"..\" in reldir:\r\n log.warning(\"Ignoring Python directory outside of course: %r\", dir)\r\n continue\r\n\r\n abs_dir = os.path.normpath(dir)\r\n path.append(abs_dir)\r\n\r\n return path", "def open_pdb_file(dir_pdb: str, debug_mode: bool = False):\n # Get the current logger\n logger = logging.getLogger('root.utils.open_pdb')\n if debug_mode:\n logger.setLevel(10)\n try:\n # open PDB file (as an np-array):\n pdb = np.genfromtxt(dir_pdb, dtype=\"str\")\n\n except FileNotFoundError as e:\n logger.warning(\":: PDB file not found!\")\n logger.error(f\":: {e}\")\n logger.warning(\":: The process was interrupted!\")\n # If no pdb file was found we can't proceed with this node.\n return exit()\n logger.debug(\":: PDB file read complete!\")\n return pdb", "def module_path():\n from sys import path\n from os import getcwd\n from os.path import basename,exists\n from inspect import getmodulename,getfile\n from logging import warn\n # 'getfile' retreives the source file name name compiled into the .pyc file.\n pathname = getfile(lambda x: None)\n if exists(pathname): return pathname\n # The module might have been compiled on a different machine or in a\n # different directory.\n pathname = pathname.replace(\"\\\\\",\"/\")\n filename = basename(pathname)\n dirs = [dir for dir in [getcwd()]+path if exists(dir+\"/\"+filename)]\n if len(dirs) == 0: warn(\"pathname of file %r not found\" % filename)\n dir = dirs[0] if len(dirs) > 0 else \".\"\n pathname = dir+\"/\"+filename\n return pathname", "def find_backtrace(self):\n return [ft for ft in os.listdir(self.output_dir)\n if os.path.isfile(ft) and ft.startswith(\"Backtrace.\")]", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]", "def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]", "def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb" ]
[ "0.7001057", "0.6927535", "0.6694564", "0.5969401", "0.5900031", "0.58544034", "0.5816103", "0.58008647", "0.58002234", "0.57855517", "0.57504606", "0.5737899", "0.5679186", "0.5636797", "0.55898374", "0.5576825", "0.54973745", "0.53993475", "0.53957206", "0.53377414", "0.5327768", "0.53255343", "0.5325156", "0.5315229", "0.53067875", "0.5305509", "0.5305509", "0.5298336", "0.52901244", "0.52893674" ]
0.72876877
0
Print usage for the stub script.
def PrintOurUsage(): print 'Stub script %s (auto-generated). Options:' % sys.argv[0] print ('--helpstub ' 'Show help for stub script.') print ('--debug_binary ' 'Run python under debugger specified by --debugger.') print ('--debugger=<debugger> ' "Debugger for --debug_binary. Default: 'gdb --args'.") print ('--debug_script ' 'Run wrapped script with python debugger module (pdb).') print ('--show_command_and_exit ' 'Print command which would be executed and exit.') print ('These options must appear first in the command line, all others will ' 'be passed to the wrapped script.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def usage():", "def usage():", "def print_usage():\n print(helptxt)\n sys.exit(2)", "def usage():\n pass", "def usage() :\n\n print usage.__doc__", "def usage():\n print(__doc__.strip())", "def display_usage():\n print >> sys.stderr, __doc__", "def print_usage():\r\n print(\"USAGE: python[3] pso.py [<seed>] [<filename>]\")\r\n print(\" where:\")\r\n print(\"\\t[<seed>]\\tOPTIONAL - seed for the random generator\")\r\n print(\"\\t[<filename>]\\tOPTIONAL - name for the output file\")", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def usage():\n print(\"Hello World!\")", "def usage():\n print(\"\"\"Usage:\n\\t%s storer - initialize storer machine\n\\t%s tester - initialize tester machine\n\\t%s --help - print this message\"\"\"% (sys.argv[0], sys.argv[0], sys.argv[0]))", "def show_usage():\n\n usage_screen = \"\\nUsage:\\n\" \\\n f\" {basename(argv[0])} <mock_1> [<mock_2> ...]\\n\" \\\n \"\\nOptions:\\n\" \\\n \" mock-departments Send HTTP requests to create some mock departments in the backend.\\n\" \\\n \" mock-employees Send HTTP requests to create some mock employees in the backend.\\n\" \\\n \" help Show this help page.\\n\" \\\n \"\" \\\n \" verbose Enables detailed request logging for the remaining options.\\n\"\n print(usage_screen)", "def print_usage():\r\n\tprint(\"Usage: python gddownloader.py arg\")\r\n\tprint(\"where arg can be one of the following:\")\r\n\tprint(\" github url (e.g. https://github.com/user/repo)\")\r\n\tprint(\" path to txt file containing github urls\")", "def showUsage():\n None", "def print_usage():\r\n print \"\"\r\n print \"\"\r\n print \" JoomFind v0.1\"\r\n print \"\"\r\n print \" Script made by Jasdev Singh\"\r\n print \"\"\r\n print \" This script is made only for educational and offline self-testing \"\r\n print \" purposes. The creator is not responsible or accountable for any \"\r\n print \" damage or loss caused that you perform with this script. \"\r\n print \"\"\r\n print \" Usage example:\"\r\n print '\\tpython joomfind.py -f filepath | -v'\r\n print \"\"\r\n print \" Put URL(s) to scan in a newline delimited file\"\r\n print \" URL(s) must point to homepage of the CMS \"\r\n print \"\"\r\n print \" Options:\"\r\n print \" -f filename (specify input file)\"\r\n print \" -v, --verbose (show detailed output)\"\r\n print \" --help (displays this help text)\"\r\n print \"\"\r\n return", "def print_usage(msg):\n print('Usage: ' + msg)", "def usage():\n return _usage", "def usage():\n print \"\"\n print \"Usage: python parabot.py [options] <testsuite.tsv>\"\n print \"\"\n print \"<testsuite.tsv> can be absolute or relative path + filename of a testsuite.\"\n print \"The containing folder will be used as working directory\"\n print \"\"\n print \"Options:\"\n print \"-h\\t--help\\t\\tThis screen\"\n print \"-i\\t--include\\tInclude a tag\"\n print \"-e\\t--exclude\\tExclude a tag\"\n print \"-f\\t--forceserial\\tForces serial test execution\"\n print \"-b\\t--basedir\\tSet parabots base dir\"\n print \"\"", "def print_usage(self):\n print((\"@brief Usage is not defined for command \" + self.command))", "def printhelp():", "def usage():\n program_name = sys.argv[PROGRAM_ARG_NUM]\n print(\"Usage:\")\n print(\"%s IP LOGFILE [PORT]\" % program_name)\n print(\" IP : IP address of host running the desired FTP Server.\")\n print(\" LOGFILE : Name of file containing FTP Client log details.\")\n print(\" PORT (optional) : Port used to connect to FTP Server. Default is\"\\\n \" 21.\")", "def printUsage():\n print 'Usage: wue2stein.py nodeFile edgeFile steinFile'", "def printCLIHelp():\n \n cmd = os.path.basename(sys.argv[0])\n print \"\"\"\n - quickCurve - \n\nPerform a liklihood analysis on Fermi LAT data. You can use the\ncommand line functions listed below or run this module from within\npython. For full documentation on this module execute 'pydoc\nquickCurve'.\n \n%s (-h|--help) ... This help text.\n \n%s (-i|--initialize) ... Generate a default config file called\n example.cfg. Edit this file and rename it <basename>.cfg for use\n in the quickLike module.\n\n%s (-a|--analyze) (-n |--basename=)<basename> ... Perform an analysis\n on <basename>. <basename> is the prefix used for this analysis.\n You must already have a configuration file if using the command\n line interface.\n\n\"\"\" %(cmd,cmd,cmd)", "def print_usage(retcode=None):\n\n print(USAGE)\n\n if retcode is not None:\n sys.exit(retcode)", "def usage():\n print 'cpp file Generator.'\n print 'Usage:'\n print ' -h: help'\n print ' -i: file'\n print ' --help: help'\n print ' --in: help'\n sys.exit(1)", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def test_usage(self):\n # Make sure the usage message is shown when no arguments\n # are given and when the -h or --help option is given.\n for options in [], ['-h'], ['--help']:\n exit_code, output = run_cli(*options)\n assert \"Usage:\" in output", "def print_usage():\n print(\"usage: MILP.py -p <parameter file> -i <payoff file> -o <output file>\")\n print(\"-p, --params\\t sets the parameter file\")\n print(\"-i, --payoff\\t sets the payoff file\")\n print(\"-o, --output\\t sets the output file. Defaults to out.csv\")\n print(\"-d, --delimiter\\t sets the delimiter of ALL files. Defaults to csv\")", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)" ]
[ "0.810532", "0.810532", "0.80087817", "0.7974354", "0.7823985", "0.7744431", "0.7735299", "0.760537", "0.76011324", "0.76011324", "0.7589592", "0.7553557", "0.7550795", "0.75118643", "0.7472906", "0.746817", "0.7346284", "0.7329634", "0.7301601", "0.7280952", "0.7278727", "0.7260191", "0.72328395", "0.72024983", "0.7202459", "0.7194138", "0.7190348", "0.7183997", "0.7168461", "0.7166564" ]
0.8339372
0
Run a module as a script. Locates the module's file and runs it in the current interpreter, or optionally a debugger.
def RunScriptModule(module): args = sys.argv[1:] debug_binary = False debugger = 'gdb --args' debug_script = False show_command_and_exit = False while args: if args[0] == '--helpstub': PrintOurUsage() sys.exit(0) if args[0] == '--debug_binary': debug_binary = True args = args[1:] continue if args[0] == '--debug_script': debug_script = True args = args[1:] continue if args[0] == '--show_command_and_exit': show_command_and_exit = True args = args[1:] continue matchobj = re.match('--debugger=(.+)', args[0]) if matchobj is not None: debugger = StripQuotes(matchobj.group(1)) args = args[1:] continue break # Now look for my main python source file # TODO(dborowitz): This will fail if the module was zipimported, which means # no egg depending on this script runner can be zip_safe. main_filename = module.__file__ assert os.path.exists(main_filename), ('Cannot exec() %r: file not found.' % main_filename) assert os.access(main_filename, os.R_OK), ('Cannot exec() %r: file not' ' readable.' % main_filename) args = [main_filename] + args if debug_binary: debugger_args = debugger.split() program = debugger_args[0] # If pathname is not absolute, determine full path using PATH if not os.path.isabs(program): program = FindEnv(program) python_path = sys.executable command_vec = [python_path] if debug_script: command_vec.extend(GetPdbArgs(python_path)) args = [program] + debugger_args[1:] + command_vec + args elif debug_script: args = [sys.executable] + GetPdbArgs(program) + args else: program = sys.executable args = [sys.executable] + args if show_command_and_exit: print 'program: "%s"' % program print 'args:', args sys.exit(0) try: sys.stdout.flush() os.execv(program, args) except EnvironmentError as e: if not getattr(e, 'filename', None): e.filename = program # Add info to error message raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_python_script(package=None, module=None, args=[], p_args=[]):\n assert module is not None\n assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))\n path = python_script_exists(package, module)\n run_program(sys.executable, p_args + [path] + args)", "def run_script(extension_invocation_info):\n acm.RunModuleWithParameters(__name__, acm.GetDefaultContext())", "def exec_module(self, module):\n\n if not self.filename.endswith(config.FILE_EXT) and not self.filename.endswith(\n \"__init__.py\"\n ):\n print(\"Fatal error: ExtensionLoader is asked to load a normal file.\")\n print(\"filename:\", self.filename)\n print(\"Expected extension:\", config.FILE_EXT)\n raise SystemExit\n\n name = module.__name__\n if module.__name__ == config.MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n config.MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n transforms.identify_requested_transformers(source)\n\n if config.TRANSFORMERS:\n original = source\n source = transforms.add_all_imports(source)\n source = transforms.apply_source_transformations(source)\n\n if config.DIFF and original != source:\n self.write_html_diff(name, original, source)\n\n if config.CONVERT and self.filename.endswith(config.FILE_EXT):\n print(\"############### Original source: ############\\n\")\n print(original)\n print(\"\\n############### Converted source: ############\\n\")\n print(source)\n print(\"=\" * 50, \"\\n\")\n\n source = transforms.apply_ast_transformations(source)\n exec(source, vars(module))", "def module_runner(module):\n task_queue.put(1)\n result = sys.modules[module].run()\n task_queue.get()\n store_module_result(result) # Store the result in our repo", "def exec_module(self, module):\n pass", "def start(data_file, script_file=None):\n\n # Try to start up the interpreter\n try:\n initialize(data_file)\n except blotish.BlotishError, err:\n blot_common.print_blot_error(err)\n return\n\n # Maybe run a script\n exit_flag = False\n if script_file:\n exit_flag = execute_file(script_file)\n\n # Start the interpreter unless the script called exit\n if not exit_flag:\n global interpreter\n interpreter.cmdloop()\n\n # Cleanup\n finalize()", "def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))", "def _run_script(fullname):\n name = posixpath.basename(fullname)\n if name[-3:] == '.py':\n name = name[:-3] # strip .py extension\n\n modname = [string.join(fullname.split('/')[0:-1],'/')]\n trylist = ((name, None), (name+'.py', None),\n (name, modname), (name+'.py', modname))\n\n # look for the modulate in standard locations, load it if you\n # find it, otherwise return 1\n for fname, path in trylist:\n try:\n if path:\n fp, pathname, description = imp.find_module(fname, path)\n else:\n fp, pathname, description = imp.find_module(fname)\n except ImportError:\n fp = None\n if fp:\n sys.argv[0] = pathname\n try:\n mod = imp.load_module('__main__', fp, pathname, description)\n finally:\n fp.close()\n return 1\n return 0", "def load_script_as_module(script_name):\n spec = create_script_spec(script_name)\n script = module_from_spec(spec)\n spec.loader.exec_module(script)\n\n return script", "def execute_module(self, module, *args, **opts):\n module_file = module.__file__\n if module_file.endswith('.pyc'):\n module_file = module_file[:-1]\n cmd = [self._path]\n if 'python_options' in opts:\n cmd.extend(opts['python_options'])\n del opts['python_options']\n cmd.append(module_file)\n cmd.extend(args)\n return get_cmd_output(*cmd, **opts)", "def run_file(filename, logfile=None, execdir=None):\n if not runpy_available: #pragma:nocover\n raise pyutilib.common.ConfigurationError(\"Cannot apply the run_file() function because runpy is not available\") \n #\n # Open logfile\n #\n if not logfile is None:\n sys.stderr.flush()\n sys.stdout.flush()\n save_stdout = sys.stdout\n save_stderr = sys.stderr\n OUTPUT=open(logfile,\"w\")\n sys.stdout=OUTPUT\n sys.stderr=OUTPUT\n #\n # Add the file directory to the system path\n #\n if '/' in filename:\n tmp= \"/\".join((filename).split(\"/\")[:-1])\n tmp_import = (filename).split(\"/\")[-1]\n sys.path.append(tmp)\n elif '\\\\' in filename:\n tmp = \"\\\\\".join((filename).split(\"\\\\\")[:-1])\n tmp_import = (filename).split(\"\\\\\")[-1]\n sys.path.append(tmp)\n else:\n tmp_import = filename\n name = \".\".join((tmp_import).split(\".\")[:-1])\n #\n # Run the module\n #\n try:\n if not execdir is None:\n tmp=os.getcwd()\n os.chdir(execdir)\n tmp_path = sys.path\n sys.path = [execdir] + sys.path\n runpy.run_module(name,None,\"__main__\")\n if not execdir is None:\n os.chdir(tmp)\n sys.path = tmp_path\n except Exception: #pragma:nocover\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr\n raise\n #\n # Close logfile\n #\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr", "def _run_script(fullname):\n name = posixpath.basename(fullname)\n if name[-3:] == '.py':\n name = name[:-3] # strip .py extension\n\n modname = [string.join(fullname.split('/')[0:-1],'/')]\n trylist = ((name, None), (name+'.py', None),\n (name, modname), (name+'.py', modname))\n\n # look for the module in standard locations, load it if you\n # find it, otherwise return 1\n for fname, path in trylist:\n try:\n if path:\n fp, pathname, description = imp.find_module(fname, path)\n else:\n fp, pathname, description = imp.find_module(fname)\n except ImportError:\n fp = None\n if fp:\n sys.argv[0] = pathname\n try:\n mod = imp.load_module('__main__', fp, pathname, description)\n finally:\n fp.close()\n return 1\n return 0", "def do_run(self, line: str):\n if self._real_module is None:\n print(\"'run' command depends on using a module. See 'use' for help.\")\n return\n\n self._real_module.run()", "def modExec(module):\n modName = module.split('_')[-1]\n if \"live\" in module:\n dn = '{0} (live)'.format(modName.upper())\n else:\n dn = '{0}'.format(modName.upper())\n\n try:\n modStart = datetime.utcnow()\n log.info(\"Running {0}\".format(dn))\n modImport = 'modules.' + module\n\n import_module(modImport)\n\n modOutput = [i for i in glob.glob(outputdir + '/*') if all(p in i for p in [modName, runID])]\n try:\n arch = [archive.add_file(os.path.basename(outfile)) for outfile in modOutput]\n except IndexError:\n pass\n\n modEnd = datetime.utcnow()\n modRuntime = modEnd - modStart\n log.debug(\"{0} finished in {1}.\".format(dn, modRuntime))\n\n except KeyboardInterrupt:\n sys.stdout.write('\\r')\n sys.stdout.flush()\n log.error(\"{0} was killed. \".format(module))\n\n except Exception:\n log.error(\"{0} failed: {1}\".format(module, [traceback.format_exc()]))", "def run_script(script_file: str, config_file: str, **kwargs: Any) -> None:\n # Add config path and current working directory to sys.path to correctly load the configuration\n script_filepath = Path(script_file)\n config_filepath = Path(config_file)\n sys.path.insert(0, script_filepath.resolve().parent.as_posix())\n sys.path.insert(0, config_filepath.resolve().parent.as_posix())\n sys.path.insert(0, os.getcwd())\n\n module = load_module(script_filepath)\n _check_script(module)\n\n run_fn = module.__dict__[\"run\"]\n\n # Lazy setup configuration\n config = ConfigObject(config_filepath, script_filepath=script_filepath)\n\n run_fn(config, **kwargs)", "def runScriptAtPath(path):\n \n sys.argv = [path]\n for arg in PytoClasses.Python.shared.args:\n sys.argv.append(str(arg))\n \n def run() -> None:\n os.system = PytoClasses.Python.shared.system\n directory = os.path.expanduser(os.path.dirname(path))\n sys.path.insert(0, directory)\n try:\n global __script__\n spec = importlib.util.spec_from_file_location(\"__main__\", path)\n __script__ = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(__script__)\n PytoClasses.Python.shared.values = [item for item in dir(__script__) if not item.startswith(\"__\")]\n except SystemExit:\n print(\"SystemExit\")\n except Exception as e:\n \n exc_type, exc_obj, exc_tb = sys.exc_info()\n \n extracts = traceback.extract_tb(sys.exc_info()[2])\n count = len(extracts)\n \n lineNumber = -1\n \n fileName = path\n for i, extract in enumerate(extracts):\n if extract[0] == fileName:\n lineNumber = extract[1]\n break\n count -= 1\n \n if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number\n lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1]\n \n PytoClasses.Python.shared.errorType = exc_type.__name__\n PytoClasses.Python.shared.errorReason = str(e)\n PytoClasses.EditorViewController.visible.showErrorAtLine(lineNumber)\n \n print(traceback.format_exc(limit=-count))\n \n sys.path.remove(directory)\n\n PytoClasses.ReviewHelper.shared.launches = PytoClasses.ReviewHelper.shared.launches+1\n PytoClasses.ReviewHelper.shared.requestReview()\n PytoClasses.Python.shared.isScriptRunning = False\n \n thread = threading.Thread(target=run, args=())\n \n def loop():\n while PytoClasses.Python.shared.isScriptRunning:\n time.sleep(1)\n ignoredThreads.append(thread)\n raise Exception(\"Stopped script!\")\n \n def runLoop():\n try:\n loop()\n except:\n pass\n\n\n thread.start()\n\n runLoop()\n return __script__", "def run_module(self, path):\n\n module = self.import_module(path)\n result = None\n\n if module:\n try:\n result = module.run()\n except AttributeError:\n self.error('Error Running Module: Missing run() method.')\n except Exception:\n e = sys.exc_info()[1]\n traceback = sys.exc_info()[2]\n self.warning('Exeption caught in module: {0} line: {1}'.format(\n e,\n traceback.tb_lineno))\n self.calls.append({path: result})\n state.save_hook_call(path, result)\n return result", "def run_execute_file(file_path, globals=None, locals=None):\n if globals is None:\n globals = {}\n globals.update({\n \"__file__\": file_path,\n \"__name__\": \"__main__\",\n })\n with open(file_path, 'rb') as file:\n exec(compile(file.read(), file_path, 'exec'), globals, locals)", "def main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('code', help='Python code to execute')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-3', action='store_const', dest='python',\n const='python3', help='Explicitly use Python 3')\n group.add_argument('-2', action='store_const', dest='python',\n const='python2', help='Explicitly use Python 2')\n group.add_argument('-p', '--python', help='Specify python interpreter')\n args = parser.parse_args()\n if args.python is not None:\n call([args.python, __file__, args.code])\n else:\n InteractiveInterpreter(LocalsImportDict()).runsource(args.code)", "def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n print script_path\n execfile(script_path, globals_)", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def load_script(filename):\n path, module_name, ext = _extract_script_components(filename)\n add_search_path(path)\n return importlib.import_module(module_name)\n # return _load_module(module_name)", "def run_zxpy(filename: str, module: ast.Module) -> None:\n patch_shell_commands(module)\n exec(compile(module, filename, mode='exec'))", "def run_example_debug_cmd(example_module_name, example_argv):\n return run_example_debug(example_module_name, example_argv)", "def do_pyscript(self, arg, opts=None):\n if not arg:\n self.perror(\"pyscript command requires at least 1 argument ...\", traceback_war=False)\n self.do_help('pyscript')\n return\n\n if not USE_ARG_LIST:\n arg = shlex.split(arg, posix=POSIX_SHLEX)\n\n # Get the absolute path of the script\n script_path = os.path.expanduser(arg[0])\n\n # Save current command line arguments\n orig_args = sys.argv\n\n # Overwrite sys.argv to allow the script to take command line arguments\n sys.argv = [script_path]\n sys.argv.extend(arg[1:])\n\n # Run the script - use repr formatting to escape things which need to be escaped to prevent issues on Windows\n self.do_py(\"run({!r})\".format(script_path))\n\n # Restore command line arguments to original state\n sys.argv = orig_args", "def main(args):\n module = args.module\n\n if args.step not in STEP_OPTIONS:\n raise ValueError(\n f\"{args.step} is an unknown option. Your options are {STEP_OPTIONS}.\"\n )\n\n if module == \"structure_plan\":\n run_module_structure_plan(args)\n elif module == \"floor_plan\":\n run_module_floor_plan(args)\n elif module == \"complete_floorplan\":\n run_module_complete_floorplan(args)\n elif module == \"ground_plan\":\n run_module_ground_plan(args)\n elif module == \"text_to_gdf\":\n run_module_text_to_gdf(args)\n else:\n raise ValueError(\n f\"{module} is an unknown option. Your options are {MODULE_OPTIONS}.\"\n )", "def run():\n print('')\n\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir('tools/' + CHANGEME_GITNAME)\n if sop.debug.lower() == 'y':\n comm.runCommand('python2 ' + program + ' -s ' + sop.ip + ' -d', 'ChangeMe')\n else:\n comm.runCommand('python2 ' + CHANGEME_GITRUN + ' -s ' + sop.ip, 'ChangeMe')\n os.chdir(dname)", "def execute_module(self):\n raise NotImplementedError", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.6864769", "0.6780037", "0.6562564", "0.65126973", "0.63618916", "0.62197036", "0.6204552", "0.6153162", "0.6135312", "0.61225456", "0.61223215", "0.61039793", "0.60877556", "0.6078428", "0.603614", "0.5902457", "0.5856509", "0.58559155", "0.5846937", "0.58052075", "0.5713502", "0.5711306", "0.57003784", "0.5679043", "0.5636366", "0.56026405", "0.5555395", "0.5517942", "0.5512342", "0.5512342" ]
0.84551585
0
Generates a dict of dicts from dot separated keys. Yet without associated values.
def make_tree(dot_separated_keys): tree = {} for item in dot_separated_keys: inside_tree = tree for part in item.split('.'): inside_tree = inside_tree.setdefault(part, {}) return tree
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undotted_keys(dict):\n return {k.lstrip(\".\"): v for k, v in dict.items()}", "def hdict(keys, value, sep=\".\"):\n return reduce(lambda v, k: {k: v}, reversed(keys.split(sep)), value)", "def create_recursive_dot_dict(data: Dict[str, Any], cls=DotDict) -> Union[DotDict, DotDefaultDict]:\n res = cls()\n for k, v in data.items():\n k = k.split(\".\")\n target = res\n for i in range(0, len(k)-1):\n t2 = target.get(k[i])\n if t2 is None:\n t2 = cls()\n target[k[i]] = t2\n\n assert isinstance(t2, cls), f\"Trying to overwrite key {'.'.join(k[:i+1])}\"\n target = t2\n\n assert isinstance(target, cls), f\"Trying to overwrite key {'.'.join(k)}\"\n target[k[-1]] = v\n return res", "def create_namespace_tree(dotted_names):\r\n ret = {}\r\n for dn in dotted_names:\r\n path = dn.split('.')\r\n for i in xrange(len(path)):\r\n ns = '.'.join(path[:i])\r\n itempath = '.'.join(path[:i + 1])\r\n if ns not in ret:\r\n ret[ns] = []\r\n if itempath not in ret[ns]:\r\n ret[ns].append(itempath)\r\n return ret", "def convert_dotKeyToNestedDict(self, tree, key, value):\n\n t = tree\n if \".\" in key:\n key, rest = key.split(\".\", 1)\n if key not in tree:\n t[key] = {}\n self.convert_dotKeyToNestedDict(t[key], rest, value)\n else:\n t[key] = value\n\n return t", "def dict_by_key(d, k):\n k = k.split('.')\n while len(k) != 0:\n if isinstance(d, dict):\n d = d[k[0]]\n else:\n d = d[int(k[0])]\n k = k[1:]\n return d", "def flatten_dict_string_keys(x):\n return {'/'.join(k): v for k, v in flatten_dict(unfreeze(x)).items()}", "def dottify(self, base_name):\n obj_dict = vars(self)\n dotted_dict = {}\n for k, v in obj_dict.items():\n if v is not None:\n dotted_dict[base_name + '.' + k] = v\n return dotted_dict", "def parse_dot_key(data, key):\n for key_part in key.split('.'):\n data = data.get(key_part)\n if data is None:\n break\n return data", "def set_dict_attrs(d, values):\n key = values.keys()[0]\n key_parts = key.split('.')\n if len(key_parts) > 1:\n if key_parts[:1][0] in d.keys():\n d[key_parts[:1][0]] = set_dict_attrs(d[key_parts[:1][0]],\n {'.'.join(key_parts[1:]): values.values()[0]})\n else:\n d[key_parts[:1][0]] = set_dict_attrs({}, {'.'.join(key_parts[1:]): values.values()[0]})\n else:\n d[key_parts[:1][0]] = values.values()[0]\n return d", "def replace_dots(son):\n for key, value in son.items():\n if '.' in key:\n new_key = key.replace('.', '_')\n if isinstance(value, dict):\n son[new_key] = replace_dots(\n son.pop(key)\n )\n else:\n son[new_key] = son.pop(key)\n elif isinstance(value, dict): # recurse into sub-docs\n son[key] = replace_dots(value)\n return son", "def _build_config(key, value, d):\n pieces = key.split('.', 1)\n if len(pieces) == 1:\n d[pieces[0]] = value.strip()\n else:\n d[pieces[0]] = _build_config(pieces[1], value, {})\n\n return d", "def test_dotwiz_plus_to_dict():\n dw = DotWizPlus(hello=[{\"Key\": \"value\", \"Another-KEY\": {\"a\": \"b\"}}],\n camelCased={r\"th@#$%is.is.!@#$%^&*()a{}\\:<?>/~`.T'e'\\\"st\": True})\n\n assert dw.to_dict() == {\n 'hello': [\n {\n 'Another-KEY': {'a': 'b'},\n 'Key': 'value',\n }\n ],\n 'camelCased': {\n 'th@#$%is.is.!@#$%^&*()a{}\\\\:<?>/~`.T\\'e\\'\\\\\"st': True\n },\n }", "def test_dotwiz_plus_to_attr_dict():\n dw = DotWizPlus(hello=[{\"Key\": \"value\", \"Another-KEY\": {\"a\": \"b\"}}],\n camelCased={r\"th@#$%is.is.!@#$%^&*()a{}\\:<?>/~`.T'e'\\\"st\": True})\n\n assert dw.to_attr_dict() == {\n 'hello': [\n {\n 'another_key': {'a': 'b'},\n 'key': 'value',\n }\n ],\n 'camel_cased': {'th_is_is_a_t_e_st': True},\n }", "def convert_dot_notation(key, val):\n split_list = key.split('.')\n if len(split_list) == 1: # no dot notation found\n return key, val\n split_list.reverse()\n newval = val\n item = None\n for item in split_list:\n if item == split_list[-1]:\n return item, newval\n newval = {item:newval}\n return item, newval", "def path_to_dict(path: str, val: Any) -> Dict:\n d = val\n for k in reversed(path.split('.')):\n d = {k: d}\n return d", "def expand_objects(record):\n new_record = copy.deepcopy(record)\n for key, value in record.items():\n parts = key.split(\".\")\n if len(parts) > 1:\n parts.reverse()\n current = {parts[0]: value}\n for part in parts[1:]:\n current = {part: current}\n del new_record[key]\n new_record = merge_dicts(new_record, current)\n\n return new_record", "def del_dict_attrs(d, key):\n key_parts = key.split('.')\n if len(key_parts) > 1:\n d[key_parts[:1][0]] = del_dict_attrs(d[key_parts[:1][0]], '.'.join(key_parts[1:]))\n else:\n del d[key_parts[:1][0]]\n return d", "def FlattenDictionary(value, keys=[]):\n result = {}\n if type(value) is dict:\n for key in value:\n result.update(FlattenDictionary(value[key], keys + [key]))\n return result\n else:\n key = '.'.join(keys)\n return {key: value}", "def keys_breadth_first(self, include_dicts=False):\n namespaces = []\n for key in self._key_order:\n if isinstance(getattr(self, key), DotDict):\n namespaces.append(key)\n if include_dicts:\n yield key\n else:\n yield key\n for a_namespace in namespaces:\n for key in self[a_namespace].keys_breadth_first(include_dicts):\n yield '%s.%s' % (a_namespace, key)", "def flatten(d: MutableMapping, sep: str = \".\", parent_key: str = \"\") -> dict:\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def serialize_dict_keys(d, prefix=\"\"):\n keys = []\n for k, v in d.items():\n fqk = \"{}{}\".format(prefix, k)\n keys.append(fqk)\n if isinstance(v, dict):\n keys.extend(serialize_dict_keys(v, prefix=\"{}.\".format(fqk)))\n\n return keys", "def unflatten(dictionary, sep=\".\"):\n unflattened_dictionary = {}\n for key, value in dictionary.items():\n parts = key.split(sep)\n sub_dictionary = unflattened_dictionary\n for part in parts[:-1]:\n if part not in sub_dictionary:\n sub_dictionary[part] = {}\n sub_dictionary = sub_dictionary[part]\n sub_dictionary[parts[-1]] = value\n return unflattened_dictionary", "def add_by_dot_path(dictionary: Dict, key_path: str, value: Any) -> Dict:\n return add_by_list_of_keys(dictionary, key_path.split(\".\"), value)", "def _dotted_dict_to_object(cls, dict_: dict, key: Key = None):\n\n dotted_pairs = {}\n for k, val in dict_.copy().items():\n if '.' in k:\n dotted_pairs[k] = val\n del dict_[k]\n\n class_dict = {}\n for k, val in dotted_pairs.items():\n class_, prop_key = k.split('.', 1)\n if isinstance(val, list):\n class_dict[class_] = class_dict.get(class_) or list()\n for i, each_val in enumerate(val):\n if len(class_dict[class_]) < i + 1:\n class_dict[class_].append(dict())\n class_dict[class_][i][prop_key] = each_val\n else:\n class_dict[class_] = class_dict.get(class_) or dict()\n class_dict[class_][prop_key] = val\n\n type_hints = get_type_hints(cls)\n for class_, nested_prop in class_dict.items():\n if isinstance(nested_prop, list):\n nested_prop_list = []\n for each_nested_prop in nested_prop:\n nested_prop_list.append(type_hints[class_].__args__[0](**each_nested_prop))\n dict_[class_] = nested_prop_list\n else:\n dict_[class_] = type_hints[class_](**nested_prop)\n\n filtered_dict = {k: v for k, v in dict_.items() if k in type_hints}\n obj = cls(**filtered_dict)\n if key:\n obj.key = key\n return obj", "def get_by_dot_path(dictionary: Dict, key_path: str) -> Any:\n return get_by_list_of_keys(dictionary, key_path.split(\".\"))", "def _flatten_metadata(metadata):\r\n if metadata:\r\n # After changing recursive_keypairs` output we need to keep\r\n # flattening output unchanged.\r\n # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.')\r\n # output before: a.b:c=d\r\n # output now: a.b.c=d\r\n # So to keep the first variant just replace all dots except the first\r\n return dict((k.replace('.', ':').replace(':', '.', 1),\r\n six.text_type(v))\r\n for k, v in utils.recursive_keypairs(metadata,\r\n separator='.')\r\n if type(v) is not set)\r\n return {}", "def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n items = dict(items)\n # remove info like PCA primitive ID\n items_not_strings = {k: v for k, v in items.items() if type(v) != str}\n return dict(items_not_strings)", "def flatten_dict(\n d, parent_key=\"\", sep=\".\", ignore_under_prefixed=True, mark_value=True\n):\n items = {}\n for k in d:\n if ignore_under_prefixed and k.startswith(\"__\"):\n continue\n v = d[k]\n if mark_value and k.startswith(\"_\") and not k.startswith(\"__\"):\n v = MarkValue(repr(v))\n\n new_key = sep.join((parent_key, k)) if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.update(\n flatten_dict(\n v, new_key, sep=sep, ignore_under_prefixed=True, mark_value=True\n )\n )\n else:\n items[new_key] = v\n\n return items", "def dict(self, keys) -> dict:\n return {k: self(k) for k in keys}" ]
[ "0.73150593", "0.666839", "0.6528275", "0.647951", "0.6416033", "0.639155", "0.6269472", "0.6193029", "0.6132605", "0.6059191", "0.5956652", "0.59102404", "0.5872739", "0.5800979", "0.57487774", "0.5689122", "0.5673279", "0.562703", "0.5615472", "0.5609791", "0.56088036", "0.5602017", "0.5595142", "0.5573848", "0.5544635", "0.55432194", "0.5530819", "0.55306786", "0.5470455", "0.5464332" ]
0.73592526
0
Sum of the factorials of the digits of a number x
def factsum(x): return sum(list(map(lambda x: factorial(x), getdigits(x))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def factorial_digit_sum(n):\n sum = 0\n factStr = str(factorial(n))\n for digit in factStr:\n sum += int(digit)\n return sum", "def Sum_Numbers_x_Power_Digits(x):\n totalSum = 0 \n for i in xrange(10, 999999):\n if i == sum([int(j)**x for j in str(i)]):\n totalSum += i\n return totalSum", "def calculateFactorials():\n\n ni = []\n ni.append( 295232799039604140847618609643520000000) # 34!\n ITERATIONS = 34\n for n in range( 1, ITERATIONS,1 ) :\n ni.append(math.floor(ni[n - 1] / n))\n print( \"\\n \".join([\"xi = (xi * _x) >> PRECISION;\\n res += xi * %s;\" % hex(int(x)) for x in ni]))", "def factorial(x):\r\n res = 1\r\n for i in range (1, x+1)\r\n res *= i\r\n return res", "def factorial(x):\r\n output = 1\r\n for factor in range(2,x+1):\r\n output = output * factor\r\n return output", "def obtain_factorial(x):\n product = 1\n for ii in list(range(x)):\n product = product * (ii + 1)\n\n return(product)", "def factorial(x):\n value = 1\n for i in range(2, add(x, 1)):\n value = multiply(value, i)\n return value", "def calculate(x: int) -> int:\n\n digits = list(map(int, list(str(x))))\n return sum(list(map(lambda a: a**2, digits)))", "def fact(n):\n return float(misc.factorial(n, True))", "def calcularfactorial(n):\r\n fact = 1\r\n for i in range(1, n+1): ## El valor inicial 1 es para que no arranque desde 0 si no desde 1. El valor final es n+1 xq el valor final del range nunca esta incluido\r\n fact = fact * i ## Multiplicamos el fact por el i. I va a valer lo que devuelva el range: 1,2,3,4 etc. Vamos a multiplicar los valores fact partiendo de 1 por todos los valores a recorrer\r\n return fact", "def factorial(x):\n ans = 1\n for i in range(x, 1, -1):\n ans *= i\n return ans", "def fact(n):\n\treturn int(factorial(n, exact=True))", "def foo_6(x): ## calculate the factorial of x in a different way\n\tfacto=1\n\twhile x>=1:\n\t\tfacto=facto*x\n\t\tx=x-1\n\treturn facto", "def digit_sum(x):\n s = 0\n while x>0:\n s = s+(x%10)\n x = x//10\n\n return s", "def Factorial(x):\n # 0) SECURITY CHECK\n if not isinstance(x, int):\n raise ValueError( \"'Factorial' function only accepts integers\" )\n\n # 1) COMPUTE THE FACTORIAL\n if x == 0 or x == 1:\n return 1\n else:\n return functools.reduce(lambda x, y: x * y, range(1, x + 1))", "def digits(x):\n \n if type(x) != int: \n print(\"ERROR <- x in factorial(x) is not type int\")\n return\n \n return [int(i) for i in list(str(x))]", "def task17_factorial(num):\n result = 1\n for i in range(1, num + 1):\n result *= i\n return result", "def factorial(n):\n return reduce(mul, range(1, n), 1)", "def twenty():\r\n \r\n n = 100\r\n factorial = 1\r\n sum = 0\r\n \r\n while n > 0:\r\n factorial *= n\r\n n -= 1\r\n \r\n for c in str(factorial):\r\n sum += int(c)\r\n \r\n return sum", "def factorial(n):\n \n result = 1\n\n for i in range(1,n+1):\n result *= i\n\n return result", "def factorial(n):\n\n # the code for factorial", "def get_factorial_digits(limit):\r\n\r\n factorials = [1]\r\n\r\n digits = [1]\r\n for num in range(1, limit + 1):\r\n new_digits = []\r\n carry = 0\r\n for digit in digits:\r\n digit_product = digit * num + carry\r\n new_digits.append(int(str(digit_product)[-1]))\r\n if digit_product >= 10:\r\n carry = int(str(digit_product)[:-1])\r\n else:\r\n carry = 0\r\n if carry:\r\n new_digits.extend([int(i) for i in reversed(str(carry))])\r\n factorials.append(len(new_digits))\r\n digits = new_digits\r\n\r\n return factorials", "def factorial(n):\n result = 1\n for i in range(1, n + 1):\n result *= i\n return result", "def factorial(n):\n result = 1\n for x in range(2, n + 1):\n result = result * x\n\n return result", "def Factorial(n):\n\tx = 1\n\tfor i in range(1, n + 1):\n\t\tx *= i\n\treturn x", "def factorial(n: int) -> int:\n result = 1\n for i in range(1, n+1):\n result *= i\n return result", "def factorial_trailing_zero(n):\n\n count = 0\n idx = 5\n while(n/idx >= 1):\n count += math.floor(n/idx)\n idx *= 5\n\n return count", "def sumDigit():", "def sum_of_digits(n):\n return sum(int(c) for c in str(n))", "def factorial(n):\n if not n>=0:\n \traise ValueError('n must be >=0')\n if math.floor(n)!=n:\n \traise ValueError('n must be exact integer')\n if n+1==n:\n \traise OverflowError(\"n too large\")\n result=1\n factor=2\n while factor<=n:\n \tresult*=factor\n \tfactor+=1\n return result" ]
[ "0.7923695", "0.75797725", "0.75408524", "0.7531802", "0.7449112", "0.73583555", "0.7314269", "0.7309004", "0.7186043", "0.7158655", "0.71404076", "0.7107824", "0.70873946", "0.70631707", "0.7045362", "0.7043718", "0.7035779", "0.70124704", "0.6972245", "0.6952173", "0.6941045", "0.6922352", "0.69098485", "0.69054556", "0.69030523", "0.68970525", "0.68852866", "0.6874781", "0.6865312", "0.6847531" ]
0.8542544
0
Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments
def _clone_sequential_model(model, input_tensors=None): if not isinstance(model, Sequential): raise ValueError('Expected `model` argument ' 'to be a `Sequential` model instance, ' 'but got:', model) def clone(layer): return layer.__class__.from_config(layer.get_config()) layers = [clone(layer) for layer in model.layers] if input_tensors is None: return Sequential(layers=layers, name=model.name) else: if len(to_list(input_tensors)) != 1: raise ValueError('To clone a `Sequential` model, we expect ' ' at most one tensor ' 'as part of `input_tensors`.') x = to_list(input_tensors)[0] if K.is_keras_tensor(x): origin_layer = x._keras_history[0] if isinstance(origin_layer, InputLayer): return Sequential(layers=[origin_layer] + layers, name=model.name) else: raise ValueError('Cannot clone a `Sequential` model on top ' 'of a tensor that comes from a Keras layer ' 'other than an `InputLayer`. ' 'Use the functional API instead.') input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name)) input_layer = input_tensor._keras_history[0] return Sequential(layers=[input_layer] + layers, name=model.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone_model(model, input_tensors=None):\n if isinstance(model, Sequential):\n return _clone_sequential_model(model, input_tensors=input_tensors)\n else:\n return _clone_functional_model(model, input_tensors=input_tensors)", "def clone(self):\n return _libsbml.Model_clone(self)", "def make_cloning_model(input_shape=(66, 200, 3)):\n # Create the Sequential model\n print(\"input shape\", input_shape)\n model = Sequential()\n model.add(Lambda(lambda x: x / 128. - 1., output_shape=input_shape, input_shape=input_shape))\n add_conv_type1(model, 12, input_shape)\n add_conv_type1(model, 18)\n add_conv_type1(model, 24)\n add_conv_type2(model, 30)\n add_conv_type2(model, 30)\n model.add(Flatten(input_shape=(13, 33, 30)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n return model", "def clone_keras_model(target, custom_objects=None):\n new_model = model_from_json(target.to_json(),custom_objects)\n new_model.set_weights(target.get_weights())\n return new_model", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def create_original_model():\n model = Sequential()\n model.add(Embedding(max_features,\n embedding_dims,\n input_length=maxlen))\n model.add(Dropout(0.2))\n model.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model", "def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone", "def _clone_functional_model(model, input_tensors=None):\n if not isinstance(model, Model):\n raise ValueError('Expected `model` argument '\n 'to be a `Model` instance, got ', model)\n if isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a functional `Model` instance, '\n 'got a `Sequential` instance instead:', model)\n\n layer_map = {} # Cache for created layers.\n tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}\n if input_tensors is None:\n # Create placeholders to build the model on top of.\n input_layers = []\n input_tensors = []\n for layer in model.input_layers:\n input_tensor = Input(batch_shape=layer.batch_input_shape,\n dtype=layer.dtype,\n sparse=layer.sparse,\n name=layer.name)\n input_tensors.append(input_tensor)\n # Cache newly created input layer.\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[layer] = newly_created_input_layer\n for original_input_layer, cloned_input_layer in zip(model.input_layers, input_layers):\n layer_map[original_input_layer] = cloned_input_layer\n else:\n # Make sure that all input tensors come from a Keras layer.\n # If tensor comes from an input layer: cache the input layer.\n input_tensors = to_list(input_tensors)\n _input_tensors = []\n for i, x in enumerate(input_tensors):\n if not K.is_keras_tensor(x):\n name = model.input_layers[i].name\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + name)\n _input_tensors.append(input_tensor)\n # Cache newly created input layer.\n original_input_layer = x._keras_history[0]\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[original_input_layer] = newly_created_input_layer\n else:\n _input_tensors.append(x)\n input_tensors = _input_tensors\n\n for x, y in zip(model.inputs, input_tensors):\n tensor_map[x] = (y, None) # tensor, mask\n\n # Iterated over every node in the reference model, in depth order.\n depth_keys = list(model._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n for depth in depth_keys:\n nodes = model._nodes_by_depth[depth]\n for node in nodes:\n # Recover the corresponding layer.\n layer = node.outbound_layer\n\n # Get or create layer.\n if layer not in layer_map:\n # Clone layer.\n new_layer = layer.__class__.from_config(layer.get_config())\n layer_map[layer] = new_layer\n layer = new_layer\n else:\n # Reuse previously cloned layer.\n layer = layer_map[layer]\n # Don't call InputLayer multiple times.\n if isinstance(layer, InputLayer):\n continue\n\n # Gather inputs to call the new layer.\n reference_input_tensors = node.input_tensors\n reference_output_tensors = node.output_tensors\n\n # If all previous input tensors are available in tensor_map,\n # then call node.inbound_layer on them.\n computed_data = [] # List of tuples (input, mask).\n for x in reference_input_tensors:\n if x in tensor_map:\n computed_data.append(tensor_map[x])\n\n if len(computed_data) == len(reference_input_tensors):\n # Call layer.\n if node.arguments:\n kwargs = node.arguments\n else:\n kwargs = {}\n if len(computed_data) == 1:\n computed_tensor, computed_mask = computed_data[0]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_mask\n output_tensors = to_list(\n layer(computed_tensor, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensor,\n computed_mask))\n computed_tensors = [computed_tensor]\n computed_masks = [computed_mask]\n else:\n computed_tensors = [x[0] for x in computed_data]\n computed_masks = [x[1] for x in computed_data]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_masks\n output_tensors = to_list(\n layer(computed_tensors, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensors,\n computed_masks))\n # Update tensor_map.\n for x, y, mask in zip(reference_output_tensors,\n output_tensors,\n output_masks):\n tensor_map[x] = (y, mask)\n\n # Check that we did compute the model outputs,\n # then instantiate a new model from inputs and outputs.\n output_tensors = []\n for x in model.outputs:\n assert x in tensor_map, 'Could not compute output ' + str(x)\n tensor, _ = tensor_map[x]\n output_tensors.append(tensor)\n return Model(input_tensors, output_tensors, name=model.name)", "def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model", "def clone(self, camera = None, light = None):\r\n newModel = Model(file_string = \"__clone__\", x=self.unif[0], y=self.unif[1], z=self.unif[2],\r\n rx=self.unif[3], ry=self.unif[4], rz=self.unif[5], sx=self.unif[6], sy=self.unif[7], sz=self.unif[8],\r\n cx=self.unif[9], cy=self.unif[10], cz=self.unif[11])\r\n newModel.buf = self.buf\r\n newModel.vGroup = self.vGroup\r\n newModel.shader = self.shader\r\n newModel.textures = self.textures\r\n return newModel", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone", "def sequential_model():\n model = build_models()\n seq_model = Sequential(model[0]['layers'], name=model[0]['name'])\n return seq_model", "def clone(self,\n from_model: entities.Model,\n model_name: str,\n dataset: entities.Dataset = None,\n configuration: dict = None,\n status=None,\n scope=None,\n project_id: str = None,\n labels: list = None,\n description: str = None,\n tags: list = None,\n train_filter: entities.Filters = None,\n validation_filter: entities.Filters = None,\n ) -> entities.Model:\n from_json = {\"name\": model_name,\n \"packageId\": from_model.package_id,\n \"configuration\": from_model.configuration,\n \"metadata\": from_model.metadata,\n \"outputType\": from_model.output_type,\n \"inputType\": from_model.input_type}\n if project_id is None:\n project_id = self.project.id\n from_json['projectId'] = project_id\n if dataset is not None:\n if labels is None:\n labels = list(dataset.labels_flat_dict.keys())\n from_json['datasetId'] = dataset.id\n if labels is not None:\n from_json['labels'] = labels\n # if there are new labels - pop the mapping from the original\n _ = from_json['configuration'].pop('id_to_label_map', None)\n _ = from_json['configuration'].pop('label_to_id_map', None)\n if configuration is not None:\n from_json['configuration'].update(configuration)\n if description is not None:\n from_json['description'] = description\n if tags is not None:\n from_json['tags'] = tags\n if scope is not None:\n from_json['scope'] = scope\n if status is not None:\n from_json['status'] = status\n\n metadata = self._set_model_filter(metadata=from_model.metadata,\n train_filter=train_filter,\n validation_filter=validation_filter)\n if metadata['system']:\n from_json['metadata'] = metadata\n success, response = self._client_api.gen_request(req_type='post',\n path='/ml/models/{}/clone'.format(from_model.id),\n json_req=from_json)\n if not success:\n raise exceptions.PlatformException(response)\n new_model = entities.Model.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n package=from_model._package)\n\n if new_model._dataset is not None and new_model._dataset.readonly is False:\n logger.warning(\n \"Model is using an unlocked dataset {!r}. Make it readonly for training reproducibility\".format(\n new_model.dataset.name))\n\n return new_model", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def copy(self):\n model = LBM(\n n_row_clusters=self.n_row_clusters,\n n_column_clusters=self.n_column_clusters,\n max_iter=self.max_iter,\n n_init=self.n_init,\n n_init_total_run=self.n_init_total_run,\n n_iter_early_stop=self.nb_iter_early_stop,\n rtol=self.rtol,\n atol=self.atol,\n verbosity=self.verbosity,\n use_gpu=self.use_gpu,\n gpu_index=self.gpu_index,\n )\n model._nb_rows = self._nb_rows\n model._nb_cols = self._nb_cols\n model.loglikelihood_ = self.loglikelihood_\n model._np = self._np\n model._cupyx = self._cupyx\n model.trained_successfully_ = self.trained_successfully_\n model.pi_ = copy.copy(self.pi_)\n model.alpha_1_ = copy.copy(self.alpha_1_)\n model.alpha_2_ = copy.copy(self.alpha_2_)\n model.tau_1_ = copy.copy(self.tau_1_)\n model.tau_2_ = copy.copy(self.tau_2_)\n return model", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN", "def clone(self):\n return _libsbml.ModelDefinition_clone(self)", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def clone(self):\r\n obj = CylinderModel()\r\n obj.params = copy.deepcopy(self.params)\r\n return obj", "def clone(self):\n return _libsbml.Submodel_clone(self)", "def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def copy_model_state(model):\n model_state = deepcopy(model.state_dict())\n return model_state", "def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)", "def iris():\n n = keras.models.clone_model(iris_model)\n n.compile('adam', 'sparse_categorical_crossentropy')\n return n", "def clone(self):\n return _libsbml.Input_clone(self)", "def copy(self) -> \"Pipeline\":\n model = PipelineModel(self._config.as_dict(), vocab=copy.deepcopy(self.vocab))\n config = copy.deepcopy(self._config)\n\n pipeline_copy = Pipeline(model, config)\n pipeline_copy._model.load_state_dict(self._model.state_dict())\n\n return pipeline_copy", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model" ]
[ "0.7385709", "0.66616267", "0.6578116", "0.6526493", "0.6467478", "0.62646365", "0.62586915", "0.6220581", "0.6103913", "0.60691476", "0.6056226", "0.6018872", "0.6015779", "0.6011683", "0.5961887", "0.5907166", "0.58924145", "0.5868687", "0.58646977", "0.584091", "0.58357793", "0.5659199", "0.5609348", "0.55329376", "0.5524156", "0.55141765", "0.5498603", "0.5490629", "0.54903555", "0.5490342" ]
0.7651821
0
Clone any `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments
def clone_model(model, input_tensors=None): if isinstance(model, Sequential): return _clone_sequential_model(model, input_tensors=input_tensors) else: return _clone_functional_model(model, input_tensors=input_tensors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):\n return _libsbml.Model_clone(self)", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def _clone_sequential_model(model, input_tensors=None):\n if not isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a `Sequential` model instance, '\n 'but got:', model)\n\n def clone(layer):\n return layer.__class__.from_config(layer.get_config())\n\n layers = [clone(layer) for layer in model.layers]\n if input_tensors is None:\n return Sequential(layers=layers, name=model.name)\n else:\n if len(to_list(input_tensors)) != 1:\n raise ValueError('To clone a `Sequential` model, we expect '\n ' at most one tensor '\n 'as part of `input_tensors`.')\n x = to_list(input_tensors)[0]\n if K.is_keras_tensor(x):\n origin_layer = x._keras_history[0]\n if isinstance(origin_layer, InputLayer):\n return Sequential(layers=[origin_layer] + layers,\n name=model.name)\n else:\n raise ValueError('Cannot clone a `Sequential` model on top '\n 'of a tensor that comes from a Keras layer '\n 'other than an `InputLayer`. '\n 'Use the functional API instead.')\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + str(x.name))\n input_layer = input_tensor._keras_history[0]\n return Sequential(layers=[input_layer] + layers, name=model.name)", "def clone(self, camera = None, light = None):\r\n newModel = Model(file_string = \"__clone__\", x=self.unif[0], y=self.unif[1], z=self.unif[2],\r\n rx=self.unif[3], ry=self.unif[4], rz=self.unif[5], sx=self.unif[6], sy=self.unif[7], sz=self.unif[8],\r\n cx=self.unif[9], cy=self.unif[10], cz=self.unif[11])\r\n newModel.buf = self.buf\r\n newModel.vGroup = self.vGroup\r\n newModel.shader = self.shader\r\n newModel.textures = self.textures\r\n return newModel", "def clone_keras_model(target, custom_objects=None):\n new_model = model_from_json(target.to_json(),custom_objects)\n new_model.set_weights(target.get_weights())\n return new_model", "def clone(self,\n from_model: entities.Model,\n model_name: str,\n dataset: entities.Dataset = None,\n configuration: dict = None,\n status=None,\n scope=None,\n project_id: str = None,\n labels: list = None,\n description: str = None,\n tags: list = None,\n train_filter: entities.Filters = None,\n validation_filter: entities.Filters = None,\n ) -> entities.Model:\n from_json = {\"name\": model_name,\n \"packageId\": from_model.package_id,\n \"configuration\": from_model.configuration,\n \"metadata\": from_model.metadata,\n \"outputType\": from_model.output_type,\n \"inputType\": from_model.input_type}\n if project_id is None:\n project_id = self.project.id\n from_json['projectId'] = project_id\n if dataset is not None:\n if labels is None:\n labels = list(dataset.labels_flat_dict.keys())\n from_json['datasetId'] = dataset.id\n if labels is not None:\n from_json['labels'] = labels\n # if there are new labels - pop the mapping from the original\n _ = from_json['configuration'].pop('id_to_label_map', None)\n _ = from_json['configuration'].pop('label_to_id_map', None)\n if configuration is not None:\n from_json['configuration'].update(configuration)\n if description is not None:\n from_json['description'] = description\n if tags is not None:\n from_json['tags'] = tags\n if scope is not None:\n from_json['scope'] = scope\n if status is not None:\n from_json['status'] = status\n\n metadata = self._set_model_filter(metadata=from_model.metadata,\n train_filter=train_filter,\n validation_filter=validation_filter)\n if metadata['system']:\n from_json['metadata'] = metadata\n success, response = self._client_api.gen_request(req_type='post',\n path='/ml/models/{}/clone'.format(from_model.id),\n json_req=from_json)\n if not success:\n raise exceptions.PlatformException(response)\n new_model = entities.Model.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n package=from_model._package)\n\n if new_model._dataset is not None and new_model._dataset.readonly is False:\n logger.warning(\n \"Model is using an unlocked dataset {!r}. Make it readonly for training reproducibility\".format(\n new_model.dataset.name))\n\n return new_model", "def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone", "def make_cloning_model(input_shape=(66, 200, 3)):\n # Create the Sequential model\n print(\"input shape\", input_shape)\n model = Sequential()\n model.add(Lambda(lambda x: x / 128. - 1., output_shape=input_shape, input_shape=input_shape))\n add_conv_type1(model, 12, input_shape)\n add_conv_type1(model, 18)\n add_conv_type1(model, 24)\n add_conv_type2(model, 30)\n add_conv_type2(model, 30)\n model.add(Flatten(input_shape=(13, 33, 30)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n return model", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def clone(self):\n return _libsbml.ModelDefinition_clone(self)", "def _clone_functional_model(model, input_tensors=None):\n if not isinstance(model, Model):\n raise ValueError('Expected `model` argument '\n 'to be a `Model` instance, got ', model)\n if isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a functional `Model` instance, '\n 'got a `Sequential` instance instead:', model)\n\n layer_map = {} # Cache for created layers.\n tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}\n if input_tensors is None:\n # Create placeholders to build the model on top of.\n input_layers = []\n input_tensors = []\n for layer in model.input_layers:\n input_tensor = Input(batch_shape=layer.batch_input_shape,\n dtype=layer.dtype,\n sparse=layer.sparse,\n name=layer.name)\n input_tensors.append(input_tensor)\n # Cache newly created input layer.\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[layer] = newly_created_input_layer\n for original_input_layer, cloned_input_layer in zip(model.input_layers, input_layers):\n layer_map[original_input_layer] = cloned_input_layer\n else:\n # Make sure that all input tensors come from a Keras layer.\n # If tensor comes from an input layer: cache the input layer.\n input_tensors = to_list(input_tensors)\n _input_tensors = []\n for i, x in enumerate(input_tensors):\n if not K.is_keras_tensor(x):\n name = model.input_layers[i].name\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + name)\n _input_tensors.append(input_tensor)\n # Cache newly created input layer.\n original_input_layer = x._keras_history[0]\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[original_input_layer] = newly_created_input_layer\n else:\n _input_tensors.append(x)\n input_tensors = _input_tensors\n\n for x, y in zip(model.inputs, input_tensors):\n tensor_map[x] = (y, None) # tensor, mask\n\n # Iterated over every node in the reference model, in depth order.\n depth_keys = list(model._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n for depth in depth_keys:\n nodes = model._nodes_by_depth[depth]\n for node in nodes:\n # Recover the corresponding layer.\n layer = node.outbound_layer\n\n # Get or create layer.\n if layer not in layer_map:\n # Clone layer.\n new_layer = layer.__class__.from_config(layer.get_config())\n layer_map[layer] = new_layer\n layer = new_layer\n else:\n # Reuse previously cloned layer.\n layer = layer_map[layer]\n # Don't call InputLayer multiple times.\n if isinstance(layer, InputLayer):\n continue\n\n # Gather inputs to call the new layer.\n reference_input_tensors = node.input_tensors\n reference_output_tensors = node.output_tensors\n\n # If all previous input tensors are available in tensor_map,\n # then call node.inbound_layer on them.\n computed_data = [] # List of tuples (input, mask).\n for x in reference_input_tensors:\n if x in tensor_map:\n computed_data.append(tensor_map[x])\n\n if len(computed_data) == len(reference_input_tensors):\n # Call layer.\n if node.arguments:\n kwargs = node.arguments\n else:\n kwargs = {}\n if len(computed_data) == 1:\n computed_tensor, computed_mask = computed_data[0]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_mask\n output_tensors = to_list(\n layer(computed_tensor, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensor,\n computed_mask))\n computed_tensors = [computed_tensor]\n computed_masks = [computed_mask]\n else:\n computed_tensors = [x[0] for x in computed_data]\n computed_masks = [x[1] for x in computed_data]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_masks\n output_tensors = to_list(\n layer(computed_tensors, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensors,\n computed_masks))\n # Update tensor_map.\n for x, y, mask in zip(reference_output_tensors,\n output_tensors,\n output_masks):\n tensor_map[x] = (y, mask)\n\n # Check that we did compute the model outputs,\n # then instantiate a new model from inputs and outputs.\n output_tensors = []\n for x in model.outputs:\n assert x in tensor_map, 'Could not compute output ' + str(x)\n tensor, _ = tensor_map[x]\n output_tensors.append(tensor)\n return Model(input_tensors, output_tensors, name=model.name)", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def clone(self):\r\n obj = CylinderModel()\r\n obj.params = copy.deepcopy(self.params)\r\n return obj", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def copy(self):\n model = LBM(\n n_row_clusters=self.n_row_clusters,\n n_column_clusters=self.n_column_clusters,\n max_iter=self.max_iter,\n n_init=self.n_init,\n n_init_total_run=self.n_init_total_run,\n n_iter_early_stop=self.nb_iter_early_stop,\n rtol=self.rtol,\n atol=self.atol,\n verbosity=self.verbosity,\n use_gpu=self.use_gpu,\n gpu_index=self.gpu_index,\n )\n model._nb_rows = self._nb_rows\n model._nb_cols = self._nb_cols\n model.loglikelihood_ = self.loglikelihood_\n model._np = self._np\n model._cupyx = self._cupyx\n model.trained_successfully_ = self.trained_successfully_\n model.pi_ = copy.copy(self.pi_)\n model.alpha_1_ = copy.copy(self.alpha_1_)\n model.alpha_2_ = copy.copy(self.alpha_2_)\n model.tau_1_ = copy.copy(self.tau_1_)\n model.tau_2_ = copy.copy(self.tau_2_)\n return model", "def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone", "def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model", "def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN", "def clone(self):\n return _libsbml.Submodel_clone(self)", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def clone(self):\n return _libsbml.Input_clone(self)", "def _try_clone_model(model):\n try:\n return copy.deepcopy(model)\n except Exception:\n warnings.warn(\n \"Failed to clone model. Model state might be mutated during verification.\"\n )\n return model", "def clone(self, **kwargs):\n return attr.evolve(self, **kwargs)", "def clone(self):\n return _libsbml.FbcModelPlugin_clone(self)", "def clone(self):\n return _libsbml.ListOfInputs_clone(self)", "def clone(self):\n return _libsbml.ListOfSubmodels_clone(self)", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def clone(self, *args, **kwargs):\n return self.copy().reset(*args, **kwargs)", "def clone(self):\n return _libsbml.MultiModelPlugin_clone(self)" ]
[ "0.7209398", "0.70578766", "0.6805522", "0.6779844", "0.6744178", "0.6651548", "0.66036665", "0.64693105", "0.6441572", "0.64124787", "0.6412031", "0.64043045", "0.63693136", "0.636181", "0.6343172", "0.6320473", "0.6242863", "0.6219034", "0.61284405", "0.6126675", "0.60146576", "0.5943569", "0.59153146", "0.5906954", "0.5825736", "0.58104175", "0.5756897", "0.5731018", "0.5728004", "0.5720808" ]
0.7679161
0
Initialize the joystick components
def init(self): pygame.init() pygame.joystick.init() self.controller = pygame.joystick.Joystick(0) self.controller.init() self.x=0 self.y=0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.isMoving = 0#0 is stop, 1 is moving forward, -1 is moving backward\n self.isRoutating = False\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)", "def __init__(self, setup=False):\n\n # Initilise pygame and joystick\n pygame.init()\n pygame.joystick.init()\n\n # Number of joysticks available\n js_count = pygame.joystick.get_count()\n \n try:\n # Return first joystick object if available, False if not\n self.js = pygame.joystick.Joystick(0)\n self.js.init()\n\n # Setup mode for finding control indices\n if setup == True:\n print \"In setup mode\"\n\n self.num_buttons = self.js.get_numbuttons()\n self.num_axes = self.js.get_numaxes()\n self.num_hats = self.js.get_numhats()\n\n print \"No. buttons: {}\".format(self.num_buttons)\n print \"No. axes: {}\".format(self.num_axes)\n print \"No. hats: {}\".format(self.num_hats)\n\n # Assign controls from joystick name\n if self.js.get_name() == \"PG-9037\" and setup == False:\n print \"Controller detected: PG-9037\"\n self.button_list, self.axis_list, self.hat_list = self.gamepad_default()\n elif setup == False:\n print \"Unfamiliar controller: Using defaults\"\n self.button_list, self.axis_list, self.hat_list = self.gamepad_default()\n\n except Exception, error:\n print \"No controllers detected\"", "def __init__(self, name='demo'):\n init()\n joystick.init()\n for i in range(joystick.get_count()):\n joystick.Joystick(i).init()\n\n State.game = util.load_cfg(name)\n State.clock = Clock(10, State.game['frame_rate'])\n State.window = display.set_mode(State.game['screen_size'])\n\n self._last_joystick_action = None\n self.create_screens()", "def __init__(self, joystick_ID):\n self.isReady = False\n self._jsID = joystick_ID\n pygame.init()\n pygame.joystick.init()\n n = pygame.joystick.get_count()\n if joystick_ID >= 0 and joystick_ID < n:\n # Joystick with that ID was found, initialize it\n self._JS = pygame.joystick.Joystick(joystick_ID)\n self._JS.init()\n\n # Create controller elements\n self.StickL = Stick(self._JS, [AXS_LX, AXS_LY])\n self.StickR = Stick(self._JS, [AXS_RX, AXS_RY])\n\n self.BtnL = Button(self._JS, BTN_LB_ID)\n self.BtnR = Button(self._JS, BTN_RB_ID)\n self.BtnBack = Button(self._JS, BTN_BACK_ID)\n self.BtnStart = Button(self._JS, BTN_START_ID)\n self.BtnA = Button(self._JS, BTN_A_ID)\n self.BtnB = Button(self._JS, BTN_B_ID)\n self.BtnX = Button(self._JS, BTN_X_ID)\n self.BtnY = Button(self._JS, BTN_Y_ID)\n self.BtnStickL = Button(self._JS, BTN_STICK_L_ID)\n self.BtnStickR = Button(self._JS, BTN_STICK_R_ID)\n\n self.HatL = Hat(self._JS, 0)\n self.isReady = True", "def joy_init():\n\n pygame.init();\n pygame.joystick.init();\n if pygame.joystick.get_count() == 0:\n raise Exception(\"joy_init: No joysticks connected\");\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n \n control.tare()\n \n return joystick", "def init(self):\n\n pygame.init()\n pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Gears 4 Geeks\")\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ser = serial.Serial('COM4', 9600)\n\n #ADAFRUIT_IO_KEY = 'd1a1bd3737714fa488e0364c775a4b4d' ##This will only be good until the end of the competition\n #self.aio = Client(ADAFRUIT_IO_KEY)", "def __init__(self, joystick):\n\t\tself.js = joystick", "def pyga_joysetup(self):\n jcount=0\n if PYG:\n self.dbgprint(\"pygame starts\")\n jcount=PYG.joystick.get_count()\n if jcount > 0:\n for x in range(jcount):\n j = PYG.joystick.Joystick(x)\n j.init()\n self.dbgprint(\">>>Enabled joystick: %s\" % j.get_name())\n taskMgr.add(self.pyga_joytask, 'tsk_pygajoy')\n else:\n self.dbgprint(\"No Joysticks to Initialize!\")\n\n return jcount", "def init_pygame(self):\n # Startup the pygame system\n pygame.init()\n # Create our window\n self.screen = pygame.display.set_mode((Settings.width, Settings.height))\n # Set the title that will display at the top of the window.\n pygame.display.set_caption(self.title)\n # Create the clock\n self.clock = pygame.time.Clock()\n self.last_checked_time = pygame.time.get_ticks()\n # Startup the joystick system\n pygame.joystick.init()\n # For each joystick we find, initialize the stick\n for i in range(pygame.joystick.get_count()):\n pygame.joystick.Joystick(i).init()\n # Set the repeat delay for key presses\n pygame.key.set_repeat(Settings.key_repeat)\n # Create statistics font\n self.statistics_font = pygame.font.Font(None,30)", "def __init__(self, s_width, s_height, setup):\n pygame.init()\n pygame.font.init()\n\n self.arcade = False\n fullscreen = False\n for opt in setup:\n if opt == Setup.Arcade:\n self.arcade = True\n elif opt == Setup.Fullscreen:\n fullscreen = True\n \n self.joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]\n for j in self.joysticks:\n j.init()\n\n self.display = Display((s_width, s_height), fullscreen)\n self.clock = pygame.time.Clock()\n self.FPS = 60\n\n self.ui = UI(self. display)\n if self.arcade:\n if len(self.joysticks) == 0: \n print(\"=================== plug in the controller ===================\") \n exit(1)\n self.ui.enable_arcade_mode()\n \n self.selected_speed = \"speed Medium\"\n self.game_manager = GameManager(self.display, self.ui, GameMode.EatToGrow, GameState.Menu)", "def __init__(self):\n\n # The Microsoft XBox 360 Wired controller has 11 buttons and 8 axes.\n # Buttons can be 0 (not pressed) or 1 (pressed)\n # Axes are floats and range between -1 and 1. Note that for LT and RT, their \"not pressed\" value is 1 and for the others it is 0. Cross keys only have values -1, 0, and 1. The others have be any value in between -1 and 1.\n num_buttons = 11\n num_axes = 8\n self.inputs = [0 for i in range(num_buttons + num_axes)]\n self.inputs[JoyInput.LT] = self.inputs[JoyInput.RT] = 1\n\n # Dictionary of saved inputs. If an input is not currently saved, you must set it to None.\n # For example, the LS_Y (\"left stick Y\") axis may be saved in self.saved[JoyInput.LS_Y]\n self.saved = {\n JoyInput.LS_Y: None,\n Joystick.RS_ANGLE: None,\n }\n\n # Field variables\n self.depth_state = None # stores the depth state\n self.depth_last_received = 0 # how long since the last depth state callback\n self.depth_pwm_input = 0 # tracks pwm given to depth thrusters\n\n # ROS Subscribers\n rospy.Subscriber(\"/joy\", Joy, self.joy_callback)\n rospy.Subscriber(Topic.YAW_STATE, Float64, self.yaw_state_callback)\n rospy.Subscriber(Topic.DEPTH_STATE, Float64, self.depth_state_callback)\n rospy.Subscriber(Topic.YAW_SETPOINT, Float64, self.yaw_setpoint_callback)\n rospy.Subscriber(Topic.DEPTH_SETPOINT, Int16, self.depth_setpoint_callback)\n\n # ROS Publishers\n # self.topics is a dictionary of dictionaries.\n # 'publisher' contains the rospy.Publisher()\n # 'msg' contains the Int16(), Float64(), or Bool() related to the publisher\n # Use self.publish() rather than using self.topics directly.\n self.topics = {\n Topic.YAW_PWM: {'publisher':rospy.Publisher(Topic.YAW_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PWM_FEEDBACK: {'publisher':rospy.Publisher(Topic.YAW_PWM_FEEDBACK, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PID: {'publisher':rospy.Publisher(Topic.YAW_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.YAW_SETPOINT: {'publisher':rospy.Publisher(Topic.YAW_SETPOINT, Float64, queue_size=10), 'msg':Float64()},\n\n Topic.DEPTH_PWM: {'publisher':rospy.Publisher(Topic.DEPTH_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.DEPTH_PID: {'publisher':rospy.Publisher(Topic.DEPTH_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.DEPTH_SETPOINT: {'publisher':rospy.Publisher(Topic.DEPTH_SETPOINT, Int16, queue_size=10), 'msg':Int16()},\n }", "def init():\n\n global leftDriverStick\n global rightDriverStick\n global goGamePad\n\n try:\n leftDriverStick = T16000M(0)\n except:\n print('OI: Error - Could not instantiate Left Driver Stick on USB port 0!!!')\n\n try:\n rightDriverStick = T16000M(1)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 0!!!')\n\n try:\n goGamePad = Joystick(2)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 2!!!')\n\n\n # ----------------------------------------------------------\n # Driver Controls\n # ----------------------------------------------------------\n #global resetYawBtn\n #resetYawBtn = JoystickButton(rightDriverStick, config.btnResetYawAngleIndex)\n #resetYawBtn.whenPressed(NavxResetYawAngle())\n\n global btnDriveSlow\n btnDriveSlow = JoystickButton(leftDriverStick, config.btnDriveSlow)\n \n global btnEnableLightSensor\n btnEnableLightSensor = JoystickButton(leftDriverStick, config.btnEnableLightSensorIndex)\n\n global btnExtendAll\n btnExtendAll = JoystickButton(rightDriverStick, config.btnExtendAllIndex)\n btnExtendAll.whenPressed(ExtendAll())\n\n global btnRetract\n btnRetract = JoystickButton(rightDriverStick, config.btnRetractAllIndex)\n btnRetract.whenPressed(RetractAll())\n\n global btnExtendFront\n btnExtendFront = JoystickButton(rightDriverStick, config.btnExtendFrontIndex)\n btnExtendFront.whenPressed(ExtendFront())\n\n global btnExtendBack\n btnExtendBack = JoystickButton(rightDriverStick, config.btnExtendBackIndex)\n btnExtendBack.whenPressed(ExtendBack())\n\n global btnRetractFront\n btnRetractFront = JoystickButton(rightDriverStick, config.btnRetractFrontIndex)\n btnRetractFront.whenPressed(RetractFront())\n\n global btnCargoGrabTog\n btnCargoGrabTog = JoystickButton(goGamePad, config.btnHatchGrabTogIndex)\n btnCargoGrabTog.whenPressed(ExtendBack())\n \n \"\"\"\n global btnResetEncoders\n btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n btnResetEncoders.whenPressed(TankDriveResetEncoders())\n \"\"\"\n\n \"\"\"\n global axisElevator\n axisElevator = JoystickAxis(goGamePad, config.axisElevatorIndex)\n axisElevator. #??? idk how to configure joystick axis\n \"\"\"\n\n \"\"\"\n global btnRampTog\n btnRampTog = JoystickButton(goGamePad, config.btnRampTogIndex)\n btnRampTog.whenPressed(ExtendFront())\n \"\"\"\n #global btnResetEncoders\n #btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n #btnResetEncoders.whenPressed(TankDriveResetEncoders())\n\n # These variable names are inconsistent, need to be fixed!!!!\n #global btnRampExtendTog\n #btnRampExtendTog = JoystickButton(goGamePad, config.btnRampExtendTogIndex)\n #btnRampExtendTog.whenPressed(RampExtend())\n\n #global btnRampRetractTog\n #btnRampRetractTog = JoystickButton(goGamePad, config.btnRampRetractTogIndex)\n #btnRampRetractTog.whenPressed(RampRetract())", "def __init__(self):\n super().__init__()\n\n self._registry = {}\n el = gremlin.event_handler.EventListener()\n el.joystick_event.connect(self._joystick_cb)", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def on_update(self, delta_time):\n #pressed = self.window.joys[0].on_joybutton_press \n #print(pressed) # <bound method Joystick.on_joybutton_press of <pyglet.input.base.Joystick object at 0x7f5169264d90>>\n\n #print(type(pressed)) # <class 'method'>\n\n joy_dico = self.window.joys[0]\n\n\n\n btns = joy_dico.buttons\n print(btns)\n #print(type(btns)) # list\n\n print(\">>>>\")\n\n\n print(joy_dico.button_controls) # [Button(raw_name=BTN_A), Button(raw_name=BTN_B), Button(raw_name=BTN_X), Button(raw_name=BTN_Y), Button(raw_name=BTN_TL), Button(raw_name=BTN_TR), Button(raw_name=BTN_SELECT), Button(raw_name=BTN_START), Button(raw_name=BTN_MODE), Button(raw_name=BTN_THUMBL), Button(raw_name=BTN_THUMBR)]\n\n print(joy_dico.button_controls[0].__dict__)\n\n print(\"_______*******\")\n #print(joy_dico.button_controls.BTN_A)\n\n joy_dico = self.window.joys[0]\n\n BTN_A = joy_dico.button_controls[0]\n BTN_B = joy_dico.button_controls[1]\n BTN_X = joy_dico.button_controls[2]\n BTN_Y = joy_dico.button_controls[3]\n BTN_TL = joy_dico.button_controls[4]\n BTN_TR = joy_dico.button_controls[5]\n BTN_SELECT = joy_dico.button_controls[6]\n BTN_START = joy_dico.button_controls[7]\n BTN_MODE = joy_dico.button_controls[8]\n BTN_THUMBL = joy_dico.button_controls[9]\n BTN_THUMBR = joy_dico.button_controls[10]\n\n\n print(f\"\\n BTN_A ----> {BTN_A}\")\n\n\n BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]\n\n BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y]\n\n for BTN in BTN_list:\n if BTN._value == 1:\n print(f\"=====> >=====> ====> {BTN.raw_name}\")\n\n idx = BTN_list.index(BTN)\n\n BTN_fn_list[idx]()", "def init(self, address, port):\n \n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.event_dict = {}\n\n # Create a TCP/IP socket\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (address, port)\n print('connecting to {} port {}'.format(address, port))\n self.sock.connect(server_address)\n self.axis_data = {i:0 for i in range(7)}\n self.verbose = True", "def __init__(self):\n self.inches_moved = 0\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = True\n self.ir_sensor = ev3.InfraredSensor()\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n assert self.ir_sensor\n assert self.touch_sensor\n self.arm_motor.position = 0\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n\n self.right_motor_encoder = self.right_motor.position\n self.left_motor_encoder = self.left_motor.position", "def __init__(self, robot):\n\n #initialise the stick and the smart dashboard (in case we need stuff for auton):\n self.stick = wpilib.Joystick(0)\n self.smart_dashboard = NetworkTable.getTable(\"SmartDashboard\")\n\n #Main stick buttons.\n #-----------------------------------------------------------------------\n trigger = JoystickButton(self.stick, 1)\n thumb = JoystickButton(self.stick, 2)\n three = JoystickButton(self.stick, 3)\n four = JoystickButton(self.stick, 4)\n five = JoystickButton(self.stick, 5)\n six = JoystickButton(self.stick, 6)\n seven = JoystickButton(self.stick, 7)\n eight = JoystickButton(self.stick, 8)\n nine = JoystickButton(self.stick, 9)\n ten = JoystickButton(self.stick, 10)\n eleven = JoystickButton(self.stick, 11)\n twelve = JoystickButton(self.stick, 12)\n\n #Hat switch POV stuff.\n #-----------------------------------------------------------------------\n pov_north = POVButton(self.stick, 0)\n pov_northeast = POVButton(self.stick, 45)\n pov_east = POVButton(self.stick, 90)\n pov_southeast = POVButton(self.stick, 135)\n pov_south = POVButton(self.stick, 180)\n pov_southwest = POVButton(self.stick, 225)\n pov_west = POVButton(self.stick, 270)\n pov_northwest = POVButton(self.stick, 315)\n\n pov_south.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kBack))\n pov_north.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kForward))\n pov_east.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kRight))\n pov_west.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kLeft))", "def inp():\n return joystick", "def input(self):\n\n self.vx, self.vy = 0, 0\n\n game = self.game\n\n x_axis = game.get_axis(0)\n if abs(x_axis) < JOYSTICK_THRESHOLD:\n x_axis = 0\n y_axis = game.get_axis(1)\n if abs(y_axis) < JOYSTICK_THRESHOLD:\n y_axis = 0\n\n if game.get_vbutton_down('left'):\n x_axis = -1\n elif game.get_vbutton_down('right'):\n x_axis = 1\n if game.get_vbutton_down('up'):\n y_axis = -1\n elif game.get_vbutton_down('down'):\n y_axis = 1\n elif game.get_vbutton_down('top_left'):\n x_axis = -1\n y_axis = -1\n elif game.get_vbutton_down('top_right'):\n x_axis = 1\n y_axis = -1\n elif game.get_vbutton_down('bottom_left'):\n x_axis = -1\n y_axis = 1\n elif game.get_vbutton_down('bottom_right'):\n x_axis = 1\n y_axis = 1\n\n # Check for collisions\n if self.get_obstacles(self.spd * x_axis, 0):\n x_axis = 0\n if self.get_obstacles(0, self.spd * y_axis):\n y_axis = 0\n\n self.vx = self.spd * x_axis\n self.vy = self.spd * y_axis\n\n if y_axis != 0:\n self.last_movey = y_axis\n self.last_movex = 0\n elif x_axis != 0:\n self.last_movex = x_axis\n self.last_movey = 0\n\n # diagonals\n if self.vx != 0 and self.vy != 0:\n self.vx *= 0.707\n self.vy *= 0.707\n\n if game.get_vbutton_jp('drop') or game.get_joystick_jp(J_BUTTONS['X']):\n self.drop_item()\n elif game.get_vbutton_jp('pickup') or game.get_joystick_jp(J_BUTTONS['A']):\n self.pickup_items()\n\n return self.is_moving()", "def antenny_init_components(self):\n if self.antenny_config is None:\n print(\"Please load a config before initializing components\")\n if not self.antenny_config.check():\n print(\"Config {} is not valid, failed to initialize\".format(self.antenny_config.get_name()))\n print(\"If you believe this is an error, or you have modified the base components of the antenny board, \"\n \"please check Config class as well as the default configs for more details.\")\n\n self.imu_init()\n self.pwm_controller_init()\n self.elevation_servo_init()\n self.azimuth_servo_init()\n self.screen_init()\n self.gps_init()\n self.telemetry_init()\n self.platform_init()", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0", "def joy_callback(self, joy):\n self.inputs[JoyInput.A] = joy.buttons[0]\n self.inputs[JoyInput.B] = joy.buttons[1]\n self.inputs[JoyInput.X] = joy.buttons[2]\n self.inputs[JoyInput.Y] = joy.buttons[3]\n self.inputs[JoyInput.LB] = joy.buttons[4]\n self.inputs[JoyInput.RB] = joy.buttons[5]\n self.inputs[JoyInput.BACK] = joy.buttons[6]\n self.inputs[JoyInput.START] = joy.buttons[7]\n self.inputs[JoyInput.POWER] = joy.buttons[8]\n self.inputs[JoyInput.LS] = joy.buttons[9]\n self.inputs[JoyInput.RS] = joy.buttons[10]\n\n self.inputs[JoyInput.LS_X] = joy.axes[0]\n self.inputs[JoyInput.LS_Y] = joy.axes[1]\n self.inputs[JoyInput.LT] = joy.axes[2]\n self.inputs[JoyInput.RS_X] = joy.axes[3]\n self.inputs[JoyInput.RS_Y] = joy.axes[4]\n self.inputs[JoyInput.RT] = joy.axes[5]\n self.inputs[JoyInput.CROSS_X] = joy.axes[6]\n self.inputs[JoyInput.CROSS_Y] = joy.axes[7]", "def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.MAX_SPEED = 900\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def on_init(self):\n self.controller = gameController.Controller()", "def map_joystick(joystick):\n left = baxter_interface.Limb('left')\n right = baxter_interface.Limb('right')\n grip_left = baxter_interface.Gripper('left')\n grip_right = baxter_interface.Gripper('right')\n lcmd = {}\n rcmd = {}\n\n #available joints\n lj = left.joint_names()\n rj = right.joint_names()\n\n #abbreviations\n jhi = lambda s: joystick.stick_value(s) > 0\n jlo = lambda s: joystick.stick_value(s) < 0\n bdn = joystick.button_down\n bup = joystick.button_up\n\n def print_help(bindings_list):\n print(\"press any keyboard key to quit.\")\n for bindings in bindings_list:\n for (test, cmd, doc) in bindings:\n if callable(doc):\n doc = doc()\n print(\"%s: %s\" % (str(test[1][0]), doc))\n\n bindings_list = []\n bindings = (\n ((bdn, ['rightTrigger']), (grip_left.close, []), \"left gripper close\"),\n ((bup, ['rightTrigger']), (grip_left.open, []), \"left gripper open\"),\n ((bdn, ['leftTrigger']), (grip_right.close, []), \"right gripper close\"),\n ((bup, ['leftTrigger']), (grip_right.open, []), \"right gripper open\"),\n ((jlo, ['leftStickHorz']), (set_j, [rcmd, right, rj, 0, 0.1]), lambda i=0:\"right inc \"+rj[i]),\n ((jhi, ['leftStickHorz']), (set_j, [rcmd, right, rj, 0, -0.1]), lambda i=0:\"right dec \"+rj[i]),\n ((jlo, ['rightStickHorz']), (set_j, [lcmd, left, lj, 0, 0.1]), lambda i=0:\"left inc \"+lj[i]),\n ((jhi, ['rightStickHorz']), (set_j, [lcmd, left, lj, 0, -0.1]), lambda i=0:\"left dec \"+lj[i]),\n ((jlo, ['leftStickVert']), (set_j, [rcmd, right, rj, 1, 0.1]), lambda i=1:\"right inc \"+rj[i]),\n ((jhi, ['leftStickVert']), (set_j, [rcmd, right, rj, 1, -0.1]), lambda i=1:\"right dec \"+rj[i]),\n ((jlo, ['rightStickVert']), (set_j, [lcmd, left, lj, 1, 0.1]), lambda i=1:\"left inc \"+lj[i]),\n ((jhi, ['rightStickVert']), (set_j, [lcmd, left, lj, 1, -0.1]), lambda i=1:\"left dec \"+lj[i]),\n ((bdn, ['rightBumper']), (rotate, [lj]), \"left: cycle joint\"),\n ((bdn, ['leftBumper']), (rotate, [rj]), \"right: cycle joint\"),\n ((bdn, ['btnRight']), (grip_left.calibrate, []), \"left calibrate\"),\n ((bdn, ['btnLeft']), (grip_right.calibrate, []), \"right calibrate\"),\n ((bdn, ['function1']), (print_help, [bindings_list]), \"help\"),\n ((bdn, ['function2']), (print_help, [bindings_list]), \"help\"),\n )\n bindings_list.append(bindings)\n\n rate = rospy.Rate(100)\n print_help(bindings_list)\n print(\"press any key to stop. \")\n while not rospy.is_shutdown():\n c = iodevices.getch()\n if c:\n if c == '?':\n print_help(bindings_list)\n else:\n return True\n for (test, cmd, doc) in bindings:\n if test[0](*test[1]):\n cmd[0](*cmd[1])\n if callable(doc):\n print(doc())\n else:\n print(doc)\n if len(lcmd):\n left.set_joint_positions(lcmd)\n lcmd.clear()\n if len(rcmd):\n right.set_joint_positions(rcmd)\n rcmd.clear()\n rate.sleep()\n return False", "def joystickController(self):\n return self.__joystickController", "def create_device(self, layout):\n events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],\n ecodes.EV_REL: []}\n\n # Joystick device\n if layout.axes or layout.buttons or layout.hats:\n self.joystick_dev = next_joystick_device()\n\n for name in layout.axes:\n params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.hats:\n params = (0, -1, 1, 0, 0)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.buttons:\n events[ecodes.EV_KEY].append(name)\n\n if layout.mouse:\n self.mouse_pos = {}\n self.mouse_rel = {}\n self.mouse_analog_sensitivity = float(\n layout.mouse_options.get(\"MOUSE_SENSITIVITY\",\n DEFAULT_MOUSE_SENSITIVTY)\n )\n self.mouse_analog_deadzone = int(\n layout.mouse_options.get(\"MOUSE_DEADZONE\",\n DEFAULT_MOUSE_DEADZONE)\n )\n self.scroll_repeat_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_REPEAT_DELAY\",\n DEFAULT_SCROLL_REPEAT_DELAY)\n )\n self.scroll_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_DELAY\",\n DEFAULT_SCROLL_DELAY)\n )\n\n for name in layout.mouse:\n if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):\n if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:\n # This ensures that scroll wheel events can work\n events[ecodes.EV_REL].append(ecodes.REL_WHEEL)\n else:\n events[ecodes.EV_REL].append(name)\n self.mouse_rel[name] = 0.0\n\n self.device = UInput(name=layout.name, events=events,\n bustype=layout.bustype, vendor=layout.vendor,\n product=layout.product, version=layout.version)\n self.layout = layout", "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def listen(self):\n\n if not self.key_data:\n self.key_data = {}\n for i in range(1024):\n self.key_data[i] = False\n\n if not self.axis_data:\n self.axis_data = {}\n for i in range(self.controller.get_numaxes()):\n self.axis_data[i] = 0.0\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n debug_toggle = True\n print_state_toggle = True\n\n # These parameters define how frequesnt speed setting sent over serial to arduino\n speed_threshold = 10.0 # sets update threshold\n speed_step = 1 # sets acceleration\n speed_delay = 0.01 # delay per 1 step in sec\n\n mode_switch = \"j\" # control mode: k - keyboard, j - joystick\n\n # Parameters for keyboard control mode\n speed = 0.0\n speed_current = 0\n direction = \"r\" # r - release, f - forward, b - backward\n direction_current = \"r\"\n\n # Parameters for joystick control mode\n speed_l = 0\n speed_r = 0\n prev_speed_l = 0\n prev_speed_r = 0\n prev_btn = False\n\n while True:\n prev = self.axis_data\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n self.key_data[event.key] = True\n elif event.type == pygame.KEYUP:\n self.key_data[event.key] = False\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n # check for exit command\n if self.button_data[9] or self.key_data[pygame.QUIT] or self.key_data[pygame.K_ESCAPE]:\n pygame.quit()\n break\n\n # toggle debug\n if self.key_data[pygame.K_d]:\n if debug_toggle:\n print(\"Toggle debug\")\n self.ser.write(b'd')\n debug_toggle = False\n else:\n debug_toggle = True\n\n # print out motors status\n if self.key_data[pygame.K_p]:\n if print_state_toggle:\n self.ser.write(b'p')\n if self.ser.in_waiting:\n print (self.ser.readline())\n print_state_toggle = False\n else:\n print_state_toggle = True\n\n if self.key_data[pygame.K_1] and mode_switch != \"k\":\n mode_switch = \"k\"\n\n if self.key_data[pygame.K_2] and mode_switch != \"j\":\n print(\"Joystick mode: ON\")\n mode_switch = \"j\"\n\n if mode_switch == \"k\": # keyboard control mode\n # accelearte forward\n if self.key_data[pygame.K_a] and direction != \"r\":\n if speed < 255.0:\n speed = speed + speed_step\n sleep(speed_delay)\n # accelerate backward\n if self.key_data[pygame.K_z] and direction != \"r\":\n if speed > 0.0:\n speed = speed - speed_step\n sleep(speed_delay)\n\n if self.key_data[pygame.K_UP] and direction != \"f\":\n direction = \"f\"\n if self.key_data[pygame.K_DOWN] and direction != \"b\":\n direction = \"b\"\n if self.key_data[pygame.K_UP] == False and direction == \"f\":\n direction = \"r\"\n if self.key_data[pygame.K_DOWN] == False and direction == \"b\":\n direction = \"r\"\n\n if math.fabs(speed - speed_current) > speed_threshold or direction != direction_current:\n # print(\"{0}, {1}, {2}, {3}\".format(speed, speed_current, direction, direction_current))\n direction_current = direction\n if direction == \"r\":\n speed = 0.0\n speed_current = int(speed)\n str_r = \"sr\" + direction_current + str(speed_current) + \"e\"\n str_l = \"sl\" + direction_current + str(speed_current) + \"e\"\n print(str_l)\n print(str_r)\n self.ser.write(str_r.encode())\n self.ser.write(str_l.encode())\n\n if(self.key_data[pygame.K_LEFT]):\n str_rf = \"srf\" + str(speed_current) + \"e\"\n self.ser.write(str_rf.encode())\n str_lf = \"slf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_lf.encode())\n elif(self.key_data[pygame.K_RIGHT]):\n str_rb = \"srf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_rb.encode())\n str_lb = \"slf\" + str(speed_current) + \"e\"\n self.ser.write(str_lb.encode())\n\n if (self.key_data[pygame.K_UP] == False and self.key_data[pygame.K_DOWN] == False) and (self.key_data[pygame.K_a] == False and self.key_data[pygame.K_z] == False):\n speed = 0\n speed_current = speed\n direction = \"r\"\n direction_current = direction\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n if mode_switch == \"j\": # joystick control mode\n if self.ser.in_waiting:\n data = str(self.ser.readline().strip())\n data = data[2 :len(data)-1]\n print(data)\n #self.aio.send('Team Hacky Slackers', data)\n\n prev_speed_l = speed_l\n prev_speed_r = speed_r\n speed_threshold = 1\n\n #simplified linear mapping for controller\n speed_l = int((self.axis_data[0]*(-50)) + 90)\n speed_r = int(math.fabs(self.axis_data[3]*255))\n #print(self.axis_data)\n #print(\"curr_l: {0}, perv_l: {1}, curr_r:{2}, perv_r:{3}\".format(speed_l, prev_speed_l, speed_r,prev_speed_r))\n\n if self.axis_data[0] < -0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lf = \"slf\" + str(speed_l) + \"e\"\n self.ser.write(str_lf.encode())\n elif self.axis_data[0] > 0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lb = \"slb\" + str(speed_l) + \"e\"\n self.ser.write(str_lb.encode())\n\n\n if self.axis_data[3] < -0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rf = \"srf\" + str(speed_r) + \"e\"\n self.ser.write(str_rf.encode())\n elif self.axis_data[3] > 0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rb = \"srb\" + str(speed_r) + \"e\"\n self.ser.write(str_rb.encode())\n\n if ( self.axis_data[0] >= -0.05 and self.axis_data[0] <= 0.05 ) and ( self.axis_data[3] >= -0.05 and self.axis_data[3] <= 0.05 ):\n speed_l = 90\n speed_r = 0\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n #Logic to call RFID scan only once per click of R1 button\n # if(prev_btn != self.button_data[5]):\n # prev_btn = self.button_data[5]\n # if self.button_data[5] :\n # print(\"Scanning for RFID Card\")\n # self.ser.write(\"i\".encode())\n\n # clear()\n # pprint.pprint(self.button_data)\n # pprint.pprint(self.axis_data)\n # pprint.pprint(self.hat_data)" ]
[ "0.77946836", "0.7709619", "0.7708428", "0.7690953", "0.76627296", "0.7585769", "0.7510696", "0.7043306", "0.69623667", "0.68985695", "0.6756267", "0.67009485", "0.65528977", "0.6516639", "0.64917547", "0.63566685", "0.63212687", "0.6281358", "0.61667794", "0.612322", "0.61198604", "0.61082023", "0.6049388", "0.60411876", "0.59494656", "0.5938758", "0.59214735", "0.5913836", "0.59133005", "0.59058243" ]
0.82271236
0
Shift the colormap by dragging the cursor left or right. Stretch the colormap by dragging the cursor up or down.
def ms_contrast(self, viewer, event, data_x, data_y, msg=True): if not self.cancmap: return False event.accept() msg = self.settings.get('msg_contrast', msg) x, y = self.get_win_xy(viewer) if event.state == 'move': self._tweak_colormap(viewer, x, y, 'preview') elif event.state == 'down': self._start_x, self._start_y = x, y if msg: self.onscreen_message( "Shift and stretch colormap (drag mouse)", delay=1.0) else: self.onscreen_message(None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_cmap(cmap, start=0., locpoint=0.5, stop=1.0, name='centered'):\r\n\r\n # declare a colour + transparency dictionary\r\n cdict={'red':[], 'green':[], 'blue':[], 'alpha':[]}\r\n\r\n # regular index to compute the colors\r\n RegInd = np.linspace(start, stop, cmap.N)\r\n\r\n # shifted index to match what the data should be centered on\r\n ShiftInd = np.hstack([np.linspace(0., locpoint, int(cmap.N / 2),\r\n endpoint=False),\r\n np.linspace(locpoint, 1., int(cmap.N / 2))])\r\n\r\n # associate the regular cmap's colours with the newly shifted cmap colour\r\n for RI, SI in zip(RegInd, ShiftInd):\r\n\r\n # get standard indexation of red, green, blue, alpha\r\n r, g, b, a = cmap(RI)\r\n\r\n cdict['red'].append((SI, r, r))\r\n cdict['green'].append((SI, g, g))\r\n cdict['blue'].append((SI, b, b))\r\n cdict['alpha'].append((SI, a, a))\r\n\r\n return LinearSegmentedColormap(name, cdict)", "def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )", "def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name=\"shiftedcmap\"):\n cdict = {\"red\": [], \"green\": [], \"blue\": [], \"alpha\": []}\n\n # regular index to compute the colors\n reg_index = np.linspace(start, stop, 257)\n\n # shifted index to match the data\n shift_index = np.hstack(\n [\n np.linspace(0.0, midpoint, 128, endpoint=False),\n np.linspace(midpoint, 1.0, 129, endpoint=True),\n ]\n )\n\n for ri, si in zip(reg_index, shift_index):\n r, g, b, a = cmap(ri)\n\n cdict[\"red\"].append((si, r, r))\n cdict[\"green\"].append((si, g, g))\n cdict[\"blue\"].append((si, b, b))\n cdict[\"alpha\"].append((si, a, a))\n\n newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)\n\n return newcmap", "def _move_cursors_to_pos(self):\n for axis in range(3):\n x, y = self._vox[list(self._xy_idx[axis])]\n self._images['cursor_v'][axis].set_xdata([x, x])\n self._images['cursor_h'][axis].set_ydata([y, y])\n self._zoom(0) # doesn't actually zoom just resets view to center\n self._update_images(draw=True)\n self._update_moved()", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def changeColor(self):\n self.layer.new_colormap()", "def shifted_color_map(cmap, start=0, midpoint=0.5, stop=1.0,\n name='shiftedcmap', data=None):\n if data is not None:\n midpoint = midpoint_to_shift_color_to_zero(data)\n\n cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []}\n\n # regular index to compute the colors\n reg_index = np.linspace(start, stop, 257)\n\n # shifted index to match the data\n shift_index = np.hstack([\n np.linspace(0.0, midpoint, 128, endpoint=False),\n np.linspace(midpoint, 1.0, 129, endpoint=True)])\n\n for ri, si in zip(reg_index, shift_index):\n r, g, b, a = cmap(ri)\n cdict['red'].append((si, r, r))\n cdict['green'].append((si, g, g))\n cdict['blue'].append((si, b, b))\n cdict['alpha'].append((si, a, a))\n\n newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)\n plt.register_cmap(cmap=newcmap)\n\n return newcmap", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def set_zooming_mouse(self):\n # Zooming: right button mouse\n self.set('RightClickMove', 'Zoom',\n param_getter=lambda p: (p[\"mouse_position_diff\"][0]*2.5,\n p[\"mouse_press_position\"][0],\n p[\"mouse_position_diff\"][1]*2.5,\n p[\"mouse_press_position\"][1]))", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def enableZoomOut(self):\n self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)\n self.master.config(cursor = \"cross\")", "def mouseReleaseEvent(self, event):\n width = self.frameGeometry().width()\n height = self.frameGeometry().height()\n cursor = QtGui.QCursor()\n new_pos = self.mapFromGlobal(cursor.pos())\n x = new_pos.x()\n y = new_pos.y()\n self.__selector_y = y/float(height) # normalized value of the y position\n \tself.__selector_x = x/float(width) #normalised value of the x position\n self.updatePixelColor()\n self.repaint()", "def mouseMoveEvent(self, e):\n if e.pos().y() == self.offset:\n return\n adder = (self.offset - e.y())\n self.deltacount += adder\n #adder *= self.accelerator\n adder *= (abs(adder) * 0.01)\n #self._state[0] = max(self._min[0], min(self._max[0], self._state[0] + adder))\n QtGui.qApp.emit( QtCore.SIGNAL(\"deltaChanged\"), self, adder)\n #self._param.update()\n QtGui.QCursor.setPos(self.origo)", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def shift(x, row_ind, col_ind, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='constant', cval=0.):\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = row_ind - (h / 2)\n ty = col_ind - (w / 2) \n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n\n transform_matrix = translation_matrix # no need to do offset\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n \n return x", "def mouseMoveEvent(self, ev):\n shift = ev.modifiers() & QtCore.Qt.ShiftModifier\n ctrl = ev.modifiers() & QtCore.Qt.ControlModifier\n if shift:\n y = ev.pos().y()\n if not hasattr(self, '_prev_zoom_pos') or not self._prev_zoom_pos:\n self._prev_zoom_pos = y\n return\n dy = y - self._prev_zoom_pos\n def delta():\n return -dy * 5\n ev.delta = delta\n self._prev_zoom_pos = y\n self.wheelEvent(ev)\n elif ctrl:\n pos = ev.pos().x(), ev.pos().y()\n if not hasattr(self, '_prev_pan_pos') or not self._prev_pan_pos:\n self._prev_pan_pos = pos\n return\n dx = pos[0] - self._prev_pan_pos[0]\n dy = pos[1] - self._prev_pan_pos[1]\n self.pan(dx, dy, 0, relative=True)\n self._prev_pan_pos = pos\n else:\n super(PlotObject, self).mouseMoveEvent(ev)", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def set_zoombox_keyboard(self):\n # Idem but with CTRL + left button mouse \n self.set('LeftClickMove', 'ZoomBox',\n key_modifier='Control',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)", "def mousePressEvent(self, event):\n self.dragging = True\n self.moved = False\n self.parent.setCursor(QtCore.Qt.ClosedHandCursor)", "def _updateColormapImage(self, *args, **kwargs):\n if self._colormapImage is not None:\n self._colormapImage = None\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def keypress(self, event):\n stepsize = 0.15 # pixels, i.e. 0.05 arcsec\n if event.key == 'right':\n if self.ccd == 2:\n self.shift_crpix1 -= stepsize\n else:\n self.shift_crpix2 += stepsize\n elif event.key == 'left':\n if self.ccd == 2:\n self.shift_crpix1 += stepsize\n else:\n self.shift_crpix2 -= stepsize\n elif event.key == ']':\n if self.ccd == 2:\n self.shift_crpix1 -= 5*stepsize\n else:\n self.shift_crpix2 += 5*stepsize\n elif event.key == '[':\n if self.ccd == 2:\n self.shift_crpix1 += 5*stepsize\n else:\n self.shift_crpix2 -= 5*stepsize\n elif event.key == 'up':\n if self.ccd == 2:\n self.shift_crpix2 += stepsize\n else:\n self.shift_crpix1 += stepsize\n elif event.key == 'down':\n if self.ccd == 2:\n self.shift_crpix2 -= stepsize\n else:\n self.shift_crpix1 -= stepsize\n elif event.key == \"'\":\n if self.ccd == 2:\n self.shift_crpix2 += 5*stepsize\n else:\n self.shift_crpix1 += 5*stepsize\n elif event.key == '/':\n if self.ccd == 2:\n self.shift_crpix2 -= 5*stepsize\n else:\n self.shift_crpix1 -= 5*stepsize\n elif event.key == '1':\n self.shift_cd1_1 += 1e-8\n elif event.key == '2':\n self.shift_cd1_1 -= 1e-8\n elif event.key == '3':\n self.shift_cd1_2 += 1e-8\n elif event.key == '4':\n self.shift_cd1_2 -= 1e-8\n elif event.key == '5':\n self.shift_cd2_1 += 1e-8\n elif event.key == '6':\n self.shift_cd2_1 -= 1e-8\n elif event.key == '7':\n self.shift_cd2_2 += 2e-8\n elif event.key == '8':\n self.shift_cd2_2 -= 2e-8\n elif event.key == 'r': # Reset\n self.shift_crpix1 = 0.0\n self.shift_crpix2 = 0.0\n self.shift_cd1_1 = 0.0\n self.shift_cd1_2 = 0.0\n self.shift_cd2_1 = 0.0\n self.shift_cd2_2 = 0.0\n elif event.key == 'o': # OK, field does not need fix\n self.mark_done()\n elif event.key == 'w': # Write WCS\n self.write() # Save the results to a csv file\n self.mark_done()\n # Now update the plot for the slight change in WCS parameters\n self.update()", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def mouse_middle_down(self):\n pass", "def _hotswap(self, color: 'Color') -> 'Color':\n\n self._space, self._coords = self.CS_MAP[color.space()], color[:]\n return self", "def normalize_cmap(self):\n vmax, vmin = np.max(self.values), np.min(self.values)\n self.midpoint = 1 - vmax/(vmax + abs(vmin))\n if self.midpoint > 0.5:\n self.start, self.stop = 0, 0.5 + (1-self.midpoint)\n else:\n self.start, self.stop = 0.5 - self.midpoint, 1", "def change_layer_with_keys(self, event):\n if event.key in (pg.K_w, pg.K_UP):\n index = (LAYERS.index(self.map_state.layer)-1)%len(LAYERS)\n self.layer_select.buttons[index].press()\n elif event.key in (pg.K_s, pg.K_DOWN):\n index = (LAYERS.index(self.map_state.layer)+1)%len(LAYERS)\n self.layer_select.buttons[index].press()", "def c_map(val):\n return int(remap(val, -1, 1, 0, 255))", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart" ]
[ "0.5790858", "0.57863927", "0.5722508", "0.56208885", "0.5615264", "0.56151456", "0.54330593", "0.54213333", "0.52664816", "0.52410126", "0.5228845", "0.5213258", "0.520547", "0.5198562", "0.5171602", "0.5124414", "0.5097131", "0.5093487", "0.5091165", "0.5089418", "0.50846124", "0.50455993", "0.5041792", "0.50262266", "0.5013308", "0.501157", "0.50101125", "0.4981155", "0.49806538", "0.49777427" ]
0.58629936
0
An interactive way to restore the colormap contrast settings after a warp operation.
def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True): if not self.cancmap: return False event.accept() if event.state == 'down': self.restore_contrast(viewer, msg=msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ms_contrast(self, viewer, event, data_x, data_y, msg=True):\n if not self.cancmap:\n return False\n event.accept()\n msg = self.settings.get('msg_contrast', msg)\n\n x, y = self.get_win_xy(viewer)\n\n if event.state == 'move':\n self._tweak_colormap(viewer, x, y, 'preview')\n\n elif event.state == 'down':\n self._start_x, self._start_y = x, y\n if msg:\n self.onscreen_message(\n \"Shift and stretch colormap (drag mouse)\", delay=1.0)\n else:\n self.onscreen_message(None)", "def changeColor(self):\n self.layer.new_colormap()", "def change_contrast(self, b):\n self.fft_plot.set_clim(0, self.contrast_slider.value * self.v_range)\n clear_output()\n display(self.fig)", "def set_contrast(value):\n command([0x21, 0x14, value, 0x20, 0x0c])", "def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )", "def set_contrast(level):\n send_command(0x81)\n send_command(level)", "def SetColorMap(self, colormap, contrast=None, bias=None):\n extra = ''\n if contrast is not None:\n extra += '%f,' % contrast\n if bias is not None:\n extra += '%f,' % bias\n fmt = dict(wid=self.wid,cmap=colormap, extra=extra, suffix=self.suffix)\n command = \"JS9.SetColormap('{cmap}', {extra} {{display:'{wid}{suffix}'}});\".format(**fmt)\n get_ipython().run_cell_magic('javascript', '', command)", "def test_colormap():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(colors=['k', 'w', 'r'],\n controls=[0.0, 0.1, 1.0]),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_kwr.png\")", "def update_colormap(self, to_overlay=None, **kwargs):\n if self._n_overlay >= 1:\n overlay = self._n_overlay - 1 if to_overlay is None else to_overlay\n # Define the colormap data :\n data_lim = self._data_lim[overlay]\n col = np.linspace(data_lim[0], data_lim[1], LUT_LEN)\n self._text2d_data[overlay, ...] = Colormap(**kwargs).to_rgba(col)\n self._text2d.set_data(self._text2d_data)\n self.update()", "def test_colormap_discrete():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(colors=['r', 'g', 'b'],\n interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_rgb.png\")", "def image_local_enhance_contrast(image: np.ndarray):\n \n #  Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n image = filters.rank.enhance_contrast(image, morphology.disk(2))\n\n #  Resize the iamge back to a shape of (2304, )\n return image_as_array(image)", "def _updateColormapImage(self, *args, **kwargs):\n if self._colormapImage is not None:\n self._colormapImage = None\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def test_colormap_coolwarm():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap='coolwarm', clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_coolwarm.png\")", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])", "def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0", "def restoreRenderSettings():\n bpy.context.scene.render.engine = cache.values[\"engine\"]\n bpy.context.scene.render.film_transparent = cache.values[\"transparent\"]\n\n bpy.context.scene.render.filepath = cache.values[\"filepath\"]\n bpy.context.scene.render.image_settings.file_format = cache.values[\"format\"]\n bpy.context.scene.render.image_settings.color_mode = cache.values[\"mode\"]\n bpy.context.scene.render.image_settings.color_depth = cache.values[\"depth\"]\n\n bpy.context.scene.render.resolution_x = cache.values[\"resolutionX\"]\n bpy.context.scene.render.resolution_y = cache.values[\"resolutionY\"]\n bpy.context.scene.render.resolution_percentage = cache.values[\"percentage\"]\n bpy.context.scene.render.pixel_aspect_x = cache.values[\"aspectX\"]\n bpy.context.scene.render.pixel_aspect_y = cache.values[\"aspectY\"]\n\n if cache.values[\"world\"]:\n bpy.context.scene.world = cache.values[\"world\"]", "def reset_active_settings(self):\n self.compute = yacman.YacAttMap()\n return True", "def unflip_colors(self):\n self.colors[self.bondA] = self.colA\n self.colors[self.bondB] = self.colB\n self.set_bcol(self.bondA)\n self.set_bcol(self.bondB)\n return", "def enhanceContrast(image, mask, target_path, name, save=False):\n \n\n \n # Contrast stretching\n p2, p98 = np.percentile(image, (2, 98))\n image_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))\n \n # Equalization\n image_eq = exposure.equalize_hist(image)\n \n # Adaptive Equalization\n image_adapteq = exposure.equalize_adapthist(image, clip_limit=0.03)\n \n # Display results\n fig = plt.figure(figsize=(19, 13))\n axes = np.zeros((2, 4), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 4, 1)\n for i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image, mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('Low contrast image')\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_rescale, mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('Contrast stretching')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_eq, mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('Histogram equalization')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_adapteq,mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('Adaptive equalization')\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n return image_adapteq", "def resetOpacity(self):\n opa = (0,)\n for i in range(1,256):\n opa += (i,)\n if self._displayPjt:\n self._displayPjt.setOpacityPalette(opa)\n if self._displayUsr:\n self._displayUsr.setOpacityPalette(opa)\n if self._displayVtk:\n self._displayVtk.setOpacityPalette(opa)", "def _adjust_contrast_img(self, results, factor=1.0):\n for key in results.get('img_fields', ['image']):\n img = results[key]\n results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)", "def apply_colormap_on_image(org_im, activation, colormap_name):\n # Get colormap\n color_map = mpl_color_map.get_cmap(colormap_name)\n no_trans_heatmap = color_map(activation)\n # Change alpha channel in colormap to make sure original image is displayed\n heatmap = copy.copy(no_trans_heatmap)\n heatmap[:, :, 3] = 0.4\n heatmap = PILImage.fromarray((heatmap*255).astype(np.uint8))\n no_trans_heatmap = PILImage.fromarray((no_trans_heatmap*255).astype(np.uint8))\n\n # Apply heatmap on iamge\n heatmap_on_image = PILImage.new(\"RGBA\", org_im.size)\n heatmap_on_image = PILImage.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))\n heatmap_on_image = PILImage.alpha_composite(heatmap_on_image, heatmap)\n return no_trans_heatmap, heatmap_on_image", "def contrastSide(trim = True, exclOverlap = False, exclY = True):\n\t\n\tfig = plt.figure()\n\ttitle = \"Effect of contrast - exclOverlap = %s - exclY = %s\" % (exclOverlap, exclY)\n\tplt.suptitle(title)\n\t\n\tfor sacc in [\"1\", \"2\", \"3\"]:\n\t\t\n\t\tcolList = [\"#ef2929\", \"#3465a4\",\"#73d216\", \"#f57900\"]\n\t\tplt.subplot(1,3, int(sacc))\n\t\tplt.title(\"sacc = %s\"% (sacc))\n\t\t\n\t\t# Exp 1:\n\t\texp = \"004A\"\n\t\tdm1 = getDM.getDM(exp = exp, driftCorr = True, onlyControl = False)\n\n\t\t# This is the same for corrected landing positions (the saccade\n\t\t# doesn't change; only the reference point does)\n\t\tdm1 = dm1.select(\"endX%sNorm != ''\" % sacc, verbose = False)\n\t\tdm1 = dm1.select(\"endX%sNorm > -.5\" % sacc, verbose = False)\n\t\tdm1 = dm1.select(\"endX%sNorm < .5\" % sacc, verbose = False)\n\t\t\n\t\tif exclY:\n\t\t\tdm1 = dm1.select(\"endY%sNorm != ''\" % sacc)\n\t\t\tdm1 = dm1.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\tdm1 = dm1.select(\"endY%sNorm < .5\" % sacc)\n\t\t\t\n\t\tfor dv in [\"endX%sNorm\" % sacc, \"endX%sCorrNorm\" % sacc]:\n\t\t\t\n\t\t\t#If wanted, trim the data\n\t\t\tif trim:\n\t\t\t\t_dm1 = dm1.selectByStdDev(keys = [\"contrast_side\", \"file\"], dv = dv)\n\n\t\t\t# For experiment 1 there are not enough third fixations anyway,\n\t\t\t# not even when not filtering on-object on the y-axis.\n\t\t\tif exp == \"004A\" and sacc == \"3\":\n\t\t\t\t\n\t\t\t\tcolList = [\"#ef2929\", \"#3465a4\"]\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# Get pivot matrix:\n\t\t\tpm = PivotMatrix(_dm1, [\"contrast_side\"], [\"file\"], dv=dv, colsWithin=True)#, xLabels = [\"left\", \"control\", \"right\"])\n\t\t\tcol = colList.pop()\n\t\t\tpm.plot(fig = fig, nLvl1 = 1, colors = [col])\n\n\t\t# Experiment 2 and 3:\n\t\tdv = \"endX%sNorm\" % sacc\n\t\t\n\t\tfor exp in [\"004B\", \"004C\"]:\n\t\t\tif exclY and exp == \"004B\" and sacc == \"3\":\n\t\t\t\tcolList = [\"#ef2929\"]\n\t\t\t \n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif exp == \"004C\" and exclOverlap:\n\t\t\t\tdm = dm.select(\"gap == 'zero'\")\n\t\t\t\n\t\t\tprint \"EXP = \", exp\n\t\t\t\n\t\t\tdm = getDM.getDM(exp = exp, driftCorr = True, onlyControl = False)\n\t\t\t\n\t\t\t# This is the same for corrected landing positions (the saccade\n\t\t\t# doesn't change; only the reference point does)\n\t\t\tdm = dm.select(\"endX%sNorm != ''\" % sacc, verbose = False)\n\t\t\tdm = dm.select(\"endX%sNorm > -.5\" % sacc, verbose = False)\n\t\t\tdm = dm.select(\"endX%sNorm < .5\" % sacc, verbose = False)\n\t\t\t\n\t\t\tif exclY:\n\t\t\t\tdm = dm.select(\"endY%sNorm != ''\" % sacc)\n\t\t\t\tdm = dm.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\t\tdm = dm.select(\"endY%sNorm < .5\" % sacc)\n\n\t\t\t\n\t\t\t#If wanted, trim the data\n\t\t\tif trim:\n\t\t\t\t_dm = dm.selectByStdDev(keys = [\"contrast_side\", \"file\"], dv = dv)\n\t\t\t# Get pivot matrix:\n\t\t\tpm = PivotMatrix(_dm, [\"contrast_side\"], [\"file\"], dv=dv, colsWithin=True)\n\t\t\tcol = colList.pop()\n\t\t\tpm.plot(fig = fig, nLvl1 = 1, colors = [col])\n\t\t\n\t\t# Modify plot:\n\t\tplt.ylim(-.2, .2)\n\t\t\n\t\tplt.legend([\"Exp1 (abs)\", \"Exp1 (corr)\", \"Exp2 (abs)\", \"Exp2 (sim)\"])\n\t\tif sacc == \"3\":\n\t\t\tplt.legend([\"Exp2 (abs)\", \"Exp2 (sim)\"])\n\t\t\tif exclY:\n\t\t\t\tplt.legend([\"Exp2 (sim)\"])\n\t\t\n\t\tplt.axhline(0, color = \"#888a85\", linestyle = \"--\", linewidth = 2)\n\t\n\tplt.savefig(\"%s.png\" % title)", "def popcolor():\r\n _pycolor.setcolor(_colorstack.pop())", "def AutoContrast(img: Image, _: float) -> Image:\n return PIL.ImageOps.autocontrast(img)", "def extreme_contrast(picture: Image) -> Image:\r\n \r\n contrast_pic = copy(picture)\r\n for x, y, (r, g, b) in contrast_pic:\r\n if r <= 127:\r\n r = 0\r\n if r >= 128 and r <= 255:\r\n r = 255\r\n if g <= 127:\r\n g = 0\r\n if g >= 128 and g <= 255:\r\n g = 255 \r\n if b <= 127:\r\n b = 0\r\n if b >= 128 and b <= 255:\r\n b = 255 \r\n contrast_color = create_color(r, g, b)\r\n set_color(contrast_pic, x, y, contrast_color)\r\n return contrast_pic", "def reset(self):\n # must NOT reset color map here, otherwise we loose provided configs by user,\n # which are more important in this case for result images vs whatever the model task specified\n self.class_names = None\n self._map = None" ]
[ "0.63572615", "0.6100809", "0.5673598", "0.5571644", "0.55231154", "0.551467", "0.547287", "0.5454797", "0.5436428", "0.53191686", "0.52949184", "0.52944916", "0.52695817", "0.5267348", "0.5224302", "0.52170444", "0.5212828", "0.52027863", "0.5189108", "0.5177404", "0.5158237", "0.51526034", "0.5122462", "0.511686", "0.51103014", "0.5106642", "0.5104638", "0.50994325", "0.5090994", "0.50710434" ]
0.64222914
0
This decorator is meant to decorate management commands. Any exceptions raised in the command's handle method will be logged and reraised.
def log_exceptions(cls): class NewClass(cls): def handle(self, *args, **options): try: super().handle(args, options) except Exception: logger.exception("Management command '{}' failed. Traceback follows: ".format(sys.argv[1])) raise return NewClass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(login_required=True):\n def decorate(f):\n def wrapper(self, *args):\n try:\n return f(self, *args)\n except ApiError as e:\n log_exception(e)\n raise BackendException('dpbx api error \"%s\"' % (e,))\n except Exception as e:\n log_exception(e)\n log.Error('dpbx code error \"%s\"' % (e,), log.ErrorCode.backend_code_error)\n raise\n\n wrapper.__doc__ = f.__doc__\n return wrapper\n return decorate", "def command(wrapped=None, synonyms=(), helphint=\"\", hidden=False,\n chat_only=False, muc_only=False):\n def decorator(fn):\n fn.is_command = True\n fn.synonyms = synonyms\n fn.helphint = helphint\n fn.hidden = hidden\n fn.chat_only = chat_only\n fn.muc_only = muc_only\n return fn\n if wrapped is None:\n return decorator\n else:\n return decorator(wrapped)", "def command(func):\n\n @wraps(func)\n def decorated(args, **kwargs):\n try:\n func(args, **kwargs)\n\n return True\n\n except KeyboardInterrupt:\n pass\n\n except TeapotError as ex:\n LOGGER.error(ex.msg, *ex.msg_args)\n\n if args.debug:\n LOGGER.debug('\\nTraceback is:\\n' + ''.join(traceback.format_tb(sys.exc_info()[2])))\n\n except Exception as ex:\n if args.debug:\n LOGGER.exception(ex)\n else:\n LOGGER.error(str(ex))\n\n LOGGER.error('teapot execution failed.')\n\n return False\n\n return decorated", "def command_(self, name):\n def decorator(func):\n func.__name__ = name\n return self.command(func)\n return decorator", "def _command(self, *cmd, handler=None):", "def command(self, *args, **kwargs):\n help_group = kwargs.pop(\"group\", None)\n decorator = super(GroupedGroup, self).command(*args, **kwargs)\n\n def wrapper(f):\n cmd = decorator(f)\n cmd.help_group = help_group\n return cmd\n\n return wrapper", "def _handle_exceptions(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except Exception as err:\n logger.exception(\n f\"{type(self).__name__}.{f.__name__}(*{args!r}, **{kwargs!r}) failed\"\n )\n content = self.message.content\n self.reply(f\"Oops, the {content} command encountered a problem: {err!r}\")\n\n wrapper._handle_exceptions = True\n return wrapper", "def __new__(cls, *args, **kwargs):\n obj = super(BaseCommandWithLogger, cls).__new__(cls, *args, **kwargs)\n _initial_handle = obj.handle\n\n def handle(self, *args, **kwargs):\n try:\n result = _initial_handle(*args, **kwargs)\n try:\n return json.dumps(result)\n except TypeError:\n return str(result)\n except Exception as exc:\n # If the exception was raised from a command running on the\n # Django Q cluster, we need to raise it again to mark the task\n # as failed and record the exception in the task result.\n #if hasattr(self, 'scheduling_info'):\n raise\n\n obj.handle = types.MethodType(handle, obj)\n return obj", "def create_from_management_command(\n cls,\n *,\n command_cls: Type[BaseCommand],\n args: Tuple[Any, ...],\n kwargs: Mapping[str, Any],\n ) -> BaseContext:\n\n return cast(\n BaseContext,\n cls.objects.create(\n context_type=\"management-command\",\n context={\n # Get the name of the command in the same way Django does in\n # the call_command utility.\n \"command\": command_cls.__module__.split(\".\")[-1],\n # Include args and kwargs in the request log. This handles\n # most common argument types like file etc. For anything not\n # handled by default the user must provide a function that\n # converts the value to a JSON encodeable value.\n \"args\": args,\n \"kwargs\": kwargs,\n },\n ),\n )", "def command_handling(args, log=COMMAND_LOG):\n # Create the Command object\n command = Command(args, None)\n\n # Resume calls are not logged\n if not command.resume:\n u.sys_log_message(command.command.replace('\\\\', '\\\\\\\\'), log_file=log)\n\n return command", "def track_command(func):\n\n def wrapped(*args, **kwargs):\n\n if not _telemetry_enabled():\n # When Telemetry is disabled, call the function immediately and return.\n return func(*args, **kwargs)\n\n telemetry = Telemetry()\n\n exception = None\n return_value = None\n exit_reason = \"success\"\n exit_code = 0\n\n duration_fn = _timer()\n try:\n\n # Execute the function and capture return value. This is returned back by the wrapper\n # First argument of all commands should be the Context\n return_value = func(*args, **kwargs)\n\n except UserException as ex:\n # Capture exception information and re-raise it later so we can first send metrics.\n exception = ex\n exit_code = ex.exit_code\n exit_reason = type(ex).__name__\n\n except Exception as ex:\n exception = ex\n # Standard Unix practice to return exit code 255 on fatal/unhandled exit.\n exit_code = 255\n exit_reason = type(ex).__name__\n\n ctx = Context.get_current_context()\n telemetry.emit(\"commandRun\", {\n # Metric about command's general environment\n \"awsProfileProvided\": bool(ctx.profile),\n \"debugFlagProvided\": bool(ctx.debug),\n \"region\": ctx.region or \"\",\n \"commandName\": ctx.command_path, # Full command path. ex: sam local start-api\n\n # Metric about command's execution characteristics\n \"duration\": duration_fn(),\n \"exitReason\": exit_reason,\n \"exitCode\": exit_code\n })\n\n if exception:\n raise exception # pylint: disable=raising-bad-type\n\n return return_value\n\n return wrapped", "def group(self, *args, **kwargs):\n def decorator(f):\n cmd = group( *args, **kwargs )( f )\n self.add_command(cmd)\n return cmd\n return decorator", "def standard_error_handler(error_function):\n\n async def wrapper(cls, ctx, error):\n\n extra = f\"\\n\\nSee the help message for more information.\"\n\n # This prevents any commands with local handlers being handled here\n if hasattr(ctx.command, \"on_error\"):\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n\n ignored = (commands.CommandNotFound,)\n\n # Anything in ignored will return and prevent anything happening.\n if any([isinstance(error, i) for i in ignored]):\n return\n\n if isinstance(error, DisabledCommand):\n await pretty_print(\n ctx, \"This command is disabled!\", title=\"Error\", color=ERROR_COLOR\n )\n\n elif isinstance(error, MemberNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, RoleNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, NoPrivateMessage):\n await pretty_print(\n ctx,\n \"This command cannot be run in a private message.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, PrivateMessageOnly):\n try:\n await ctx.message.delete()\n extra += \"\\nYour message has been deleted\"\n except:\n print(\"Could not delete message\")\n await pretty_print(\n ctx,\n \"This command should be run in a Private Message only!\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRole):\n await pretty_print(\n ctx, str(error) + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, IllegalRole):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, CheckFailure):\n await pretty_print(\n ctx,\n \"Could not run command, do you have sufficient permissions in this channel?\"\n + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, BadArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Could not run command, is it formatted properly?\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRequiredArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx, \"Missing required arguments\", title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, BadUnionArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Invalid argument\",\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, WalletNotVerified):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, InvalidCoin):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, RequestError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n elif isinstance(error, FatalError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n await error_function(cls, ctx, error)\n\n return wrapper", "async def admin(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid Command\")", "def handler(self, command, args=[]):\n ###\n # command parsing and handling logic to be implemented by child\n ###\n if not command and not hasattr(self, 'handle_'):\n return f'Service {str(self.__class__.__name__)}: {self.__doc__ or \"\"}'\n methodname = 'handle_{}'.format(command or '')\n logger.info('method name: {}'.format(methodname))\n logger.info('args: {}'.format(args))\n method = self.__getattribute__(methodname)\n return method(args)", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n def add_meta(f):\n def decorator(*args, **kwargs):\n f(*args, **kwargs)\n decorator.bytes_needed = len_args - 1 # exclude self\n decorator.__name__ = f.__name__\n return decorator\n func = add_meta(func)\n self._command_handlers[cmd] = func", "def self_decorator(self, func):\n # TODO: Any other ways to pass variables to handlers?\n def command_func(update, context, *args, **kwargs):\n return func(self, update, context, *args, **kwargs)\n return command_func", "def command(func: 'function') -> 'function':\n func._decorators = (Bot.command,)\n return func", "def command(\n self,\n name: str,\n aliases: list[str] | None = None,\n *,\n subtype: str | None = None,\n short_help: str | None = None,\n help: str | None = None,\n use_shlex: bool = True,\n ) -> DECORATOR_TYPE:\n\n def decorator(target: DECORATOR_ARGS_TYPE) -> Handler:\n handler = get_handler(target)\n\n self.apps.append(\n App(\n \"message\",\n subtype,\n handler,\n name=name,\n aliases=aliases,\n short_help=short_help,\n help=help,\n is_command=True,\n use_shlex=use_shlex,\n ),\n )\n\n return handler\n\n return decorator", "def command(\n self,\n handler: Handler = None,\n *,\n name: str = None,\n aliases: Sequence[str] = (),\n help_text: str = None,\n ) -> CommandProxy:\n\n def inner(func: Handler) -> CommandProxy:\n kwargs = {\"aliases\": aliases}\n\n help_text_ = help_text or func.__doc__\n if help_text_:\n kwargs[\"help\"] = help_text_.strip()\n\n name_ = name or func.__name__\n if asyncio.iscoroutinefunction(func):\n proxy = AsyncCommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n else:\n proxy = CommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n\n self._add_handler(proxy, name_, aliases)\n\n return proxy\n\n return inner(handler) if handler else inner", "async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n\n if hasattr(ctx.command, 'on_command_error'):\n return\n\n # This prevents any cogs with an overwritten cog_command_error being handled here.\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n return\n\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(f'Command pas trouvé')\n return\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{ctx.command} has been disabled.')\n return\n\n if isinstance(error,commands.errors.PrivateMessageOnly):\n await ctx.message.delete()\n channel = await ctx.message.author.create_dm()\n await channel.send(f'{ctx.command} ne peut être exécuté que en message privé !!')\n return\n # For this error example we check to see where it came from...\n if isinstance(error, commands.BadArgument):\n await ctx.send('Mauvais arguments passés')\n return\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('Il manque des arguments à la commande')\n return\n # All other Errors not returned come here. And we can just print the default TraceBack.\n logger.error(f'Ignoring exception in command {ctx.command} : {type(error)} {error} {error.__traceback__}')", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def _handle(self, *args, **options):\n return super()._handle(*args, **options)", "def wrapper(callback):\n self.commands[name] = SlashCommand(callback, name, description, options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)", "def additional_command(self):\n pass", "async def on_command_error(\n self, ctx: commands.Context, error: commands.CommandError\n ):\n log.debug(\"The Error Handler was invoked to handle an error\")\n\n trace = \"\".join(\n traceback.format_exception(type(error), error, error.__traceback__)\n )\n trace = trace.strip()\n\n if hasattr(ctx.command, \"on_error\"):\n log.debug(\"Invoked, but will not override command's own error handler\")\n return\n\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n log.debug(\"Invoked, but will not override cog's own error handler\")\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n ignored = (commands.CommandNotFound,)\n\n if isinstance(error, ignored):\n log.debug(f\"Ignored exception {type(error)} - {error}\")\n return\n\n # Check for specific exceptions to be handled\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f\"{ctx.command} has been disabled.\")\n\n elif isinstance(error, commands.CommandOnCooldown):\n try:\n await ctx.send(str(error))\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.author.send(\n f\"{ctx.command} can not be used in Private Messages.\"\n )\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.errors.CheckFailure):\n log.debug(f\"A command was called, but a check failed. Trace: \\n{trace}\")\n\n elif isinstance(error, commands.MissingRequiredArgument):\n log.debug(f\"A command was missing a required argument. Trace: \\n{trace}\")\n try:\n await ctx.send(\"```\\nUsage:\\n\" + ctx.command.help + \"```\")\n except discord.HTTPException:\n pass\n\n elif isinstance(error, merrors.MiltonInputError):\n # Send feedback to user\n try:\n await ctx.send(error.msg)\n except discord.HTTPException:\n pass\n\n else:\n # All other Errors not returned come here.\n # Skip the prompt line\n if \"CommandInterface\" in self.bot.cogs:\n print(\"\")\n\n log.error(f\"Ignoring exception in command {ctx.command}:\\n\" f\"{trace}\")\n\n # Re-print the handle for the CLI cog\n if \"CommandInterface\" in self.bot.cogs:\n print(\">> \", end=\"\")", "def command(self):\n raise NotImplementedError", "def command(self, *commands):\n def decorator(function):\n for command in commands:\n self.functions[command] = function\n return function\n return decorator", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration" ]
[ "0.5904763", "0.578845", "0.5735835", "0.56567407", "0.56270766", "0.5600731", "0.5581101", "0.5554357", "0.5539104", "0.55316645", "0.5529846", "0.55071104", "0.5505431", "0.5505338", "0.5483254", "0.54173476", "0.54166645", "0.53439975", "0.53280574", "0.5321739", "0.52679396", "0.52521676", "0.52418435", "0.5196691", "0.5196499", "0.517249", "0.5151756", "0.51446766", "0.5133573", "0.5130136" ]
0.62160754
0
Creates a list of random minibatches from (X, Y)
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0): np.random.seed(seed) # To make your "random" minibatches the same as ours m = X.shape[1] # number of training examples mini_batches = [] # Step 1: Shuffle (X, Y) permutation = list(np.random.permutation(m)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation].reshape((1,m)) # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning for k in range(0, num_complete_minibatches): mini_batch_X = shuffled_X[:, k*mini_batch_size:(k+1)*mini_batch_size] mini_batch_Y = shuffled_Y[:, k*mini_batch_size:(k+1)*mini_batch_size] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) # Handling the end case (last mini-batch < mini_batch_size) if m % mini_batch_size != 0: mini_batch_X = shuffled_X[:, num_complete_minibatches*mini_batch_size:] mini_batch_Y = shuffled_X[:, num_complete_minibatches*mini_batch_size:] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_mini_batches(X, Y, mini_batch_size = 64):\n\n\n\tm = X.shape[1]\n\tmini_batches = []\n\n\t#Shuffling around the data randomly according to the 'permutation' list\n\tpermutation = list(np.random.permutation(m))\n\tshuffled_X = X[:, permutation]\n\tshuffled_Y = Y[:, permutation].reshape((1,m))\n\n\tcomplete_minibatch_number = int(math.floor(m/mini_batch_size))\n\n\tfor k in xrange(complete_minibatch_number):\n\n\t\tmini_batch_X = shuffled_X[:, k*mini_batch_size : (k+1)*mini_batch_size]\n\t\tmini_batch_Y = shuffled_Y[:, k*mini_batch_size : (k+1)*mini_batch_size]\n\n\t\tmini_batch = (mini_batch_X, mini_batch_Y)\n\t\tmini_batches.append(mini_batch)\n\n\tif m%mini_batch_size != 0:\n\t\tmini_batch_X = shuffled_X[:, (k+1)*mini_batch_size : m]\n\t\tmini_batch_Y = shuffled_Y[:, (k+1)*mini_batch_size : m]\n\n\t\tmini_batch = (mini_batch_X, mini_batch_Y)\n\t\tmini_batches.append(mini_batch)\n\n\treturn mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \n np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n m = X.shape[1] # number of training examples\n mini_batches = []\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((1,m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = int(math.floor(m/mini_batch_size)) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n ### START CODE HERE ### (approx. 2 lines)\n end = m - mini_batch_size * math.floor(m / mini_batch_size)\n mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]\n mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 64):\n \n m = X.shape[1] # number of training examples\n mini_batches = []\n\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) \n # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 64):\n \n m = X.shape[0] # number of training examples.\n mini_batches = []\n \n # Step 1: Shuffle (X, Y).\n permutation = list(np.random.permutation(m))\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in partitionning.\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]\n mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size).\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def generate_mines(self, number):\n mine_locations = []\n available_places = [[j, i]\n for i in xrange(0, self.x) for j in xrange(0, self.y)]\n while number > 0:\n # the chosen coordinate for a mine is appended into the list and is\n # removed from the list of choices to prevent duplicates.\n choice = random.choice(available_places)\n available_places.remove(choice)\n mine_locations.append(choice)\n number -= 1\n return mine_locations", "def iterate_minibatches(X_,y_,batchsize_,shuffle_=True):\n\tindx = [i for i in range(len(X_))]\n\trandom.shuffle(indx)\n\tindxgenerator = (i for i in indx)\n\tdel indx\n\twhile True:\n\t\tbatch_indx = list(islice(indxgenerator, batchsize_))\n\t\tif len(batch_indx)==0:\n\t\t\tbreak;\n\t\telse:\n\t\t\tyield X_[batch_indx,:], y_[batch_indx]", "def mini_batches(X, Y, mini_batch_size = 64):\n m = X.shape[1]\n mini_batches = []\n\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((10,m))\n\n num_complete_minibatches = math.floor(m/mini_batch_size)\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n if m % mini_batch_size != 0:\n end = m - mini_batch_size * math.floor(m / mini_batch_size)\n mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]\n mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n return mini_batches", "def sample_trajectories(self):\n minibatch = []\n for i in range(self.num_avg_gradient):\n trajectory = self.replay_buffer[random.randint(0, len(self.replay_buffer) - 1)]\n trajectory = trajectory[random.randint(0, len(trajectory) - 1):]\n minibatch.append(trajectory)\n return minibatch", "def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \n m = X.shape[0] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n #permutation = list(np.random.permutation(m))\n permutation = list(torch.randperm(m))\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = int(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]\n mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 32, seed = 0):\n \n m = X.shape[1] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine", "def get_mines(self):\n\t\treturn ((x, y) for x in range(self.width)\n\t\t for y in range(self.height) if self.mines[x][y])", "def random_mini_batches(X, Y, mini_batch_size = 4, seed = 0):\n \n m = X.shape[0] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]\n mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def minibatch(x_train, y_train, batch_size, train_epochs):\n epoch = 0\n start = 0\n key = random.PRNGKey(0)\n\n while epoch < train_epochs:\n end = start + batch_size\n\n if end > x_train.shape[0]:\n key, split = random.split(key)\n permutation = random.permutation(split,\n np.arange(x_train.shape[0], dtype=np.int64))\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n epoch += 1\n start = 0\n continue\n\n yield x_train[start:end], y_train[start:end]\n start = start + batch_size", "def get_minibatches(data, minibatch_size, shuffle=True):\r\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\r\n data_size = len(data[0]) if list_data else len(data)\r\n indices = np.arange(data_size)\r\n if shuffle:\r\n np.random.shuffle(indices)\r\n for minibatch_start in np.arange(0, data_size, minibatch_size):\r\n minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]\r\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\r\n else minibatch(data, minibatch_indices)", "def _get_bootstrap_sample(x, y, num_reps):\r\n combined = array(list(x) + list(y))\r\n total_obs = len(combined)\r\n num_x = len(x)\r\n for i in range(num_reps):\r\n # sampling with replacement\r\n indices = randint(0, total_obs, total_obs)\r\n sampled = combined.take(indices)\r\n # split into the two populations\r\n sampled_x = sampled[:num_x]\r\n sampled_y = sampled[num_x:]\r\n yield sampled_x, sampled_y", "def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations", "def generateMines(num_rows, num_cols, num_mines):\n arr = np.random.permutation(num_rows * num_cols)\n return arr[:num_mines]", "def get_minibatches(data, minibatch_size, shuffle=True):\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\n data_size = len(data[0]) if list_data else len(data)\n indices = np.arange(data_size)\n if shuffle:\n np.random.shuffle(indices)\n for minibatch_start in np.arange(0, data_size, minibatch_size):\n minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\n else minibatch(data, minibatch_indices)", "def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result", "def random_mini_batches(X, mini_batch_size = 64, seed = 0):\n \n X = X.T\n \n m = X.shape[1] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n \n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch = (mini_batch_X.T)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch = (mini_batch_X.T)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def generate_small_hypermoons(m_rel: int, max_r: float, min_r: float) -> List[Tuple[List[float], float]]:\n c_big, r_big = generate_small_hypersphere(m_rel, max_r, min_r)\n\n c_small = c_big + ((np.random.rand(m_rel) * (0.4 * r_big)) * random.choice([1, -1]))\n r_small = 0.95 * r_big\n\n return [(c_big, r_big), (c_small, r_small)]", "def create_minibatch(self):\r\n if self.experience_batch.shape[0] <= self.minibatch_size:\r\n self.minibatch = self.experience_batch\r\n\r\n else:\r\n ind = np.random.randint(self.experience_batch.shape[0], size=self.minibatch_size) # same sample can be in the minibatch multiple times --> problem for algorithm ?\r\n self.minibatch = self.experience_batch[ind]", "def supervised_random_mini_batches(X, Y, mini_batch_size, seed):\n\n np.random.seed(seed)\n m = X.shape[0] #number of examples in set\n n_classes = Y.shape[1]\n mini_batches=[]\n\n permutation = list(np.random.permutation(m))\n \n shuffled_X = X[permutation,:]\n shuffled_Y = Y[permutation,:]\n #partition of (shuffled_X, shuffled_Y) except the last mini_batch\n \n num_complete_mini_batches = math.floor(m/mini_batch_size)\n for k in range(num_complete_mini_batches):\n mini_batch_X = shuffled_X[k*mini_batch_size:(k+1)*mini_batch_size,:]\n mini_batch_Y = shuffled_Y[k*mini_batch_size:(k+1)*mini_batch_size,:]\n \n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # handling the case of last mini_batch < mini_batch_size \n if m % mini_batch_size !=0:\n \n mini_batch_X = shuffled_X[mini_batch_size*num_complete_mini_batches:m,:]\n mini_batch_Y = shuffled_Y[mini_batch_size*num_complete_mini_batches:m,:]\n \n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def test_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def generate_batches(x, y, x_placeholder, y_placeholder, batch_size=20, seed=None):\n\n # Sanitize inputs\n assert(isinstance(batch_size, int)), \"generate_batches: batch size must be an integer.\"\n assert(batch_size > 0), \"generate_batches: batch size must be greater than zero.\"\n\n assert(seed is None or isinstance(seed, int)), \"generate_batches: seed must be an integer or `None`\"\n\n assert seed is None or (0 <= seed <= 2 ** 32 - 1)\n\n assert(y.shape[0] == x.shape[0]), \"Not exactly one label per datapoint!\"\n\n n_examples = x.shape[0]\n\n if seed is None:\n seed = np.random.randint(1, 100000)\n\n rng = np.random.RandomState()\n rng.seed(seed)\n\n # Check if we have enough data points to form a minibatch\n # otherwise set the batchsize equal to the number of input points\n initial_batch_size = batch_size\n # print(batch_size)\n batch_size = min(initial_batch_size, n_examples)\n # print(batch_size)\n\n if initial_batch_size != batch_size:\n logging.error(\"Not enough datapoints to form a minibatch. \"\n \"Batchsize was set to %s\", batch_size)\n\n while True:\n # `np.random.randint` is end-exclusive => for n_examples == batch_size, start == 0 holds\n start = rng.randint(0, (n_examples - batch_size + 1))\n\n minibatch_x = x[start:start + batch_size]\n minibatch_y = y[start:start + batch_size, None]\n\n feed_dict = {\n x_placeholder: minibatch_x,\n y_placeholder: minibatch_y.reshape(-1, 1)\n }\n yield feed_dict", "def sample(self):\n # For each row: round(random .* (max - min) + min, 0)\n random_array = prng.np_random.rand(self.num_discrete_space)\n return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]", "def random_positions(mini, maxi):\n x_cord = (maxi - mini)*np.random.random(SIZE) + mini\n y_cord = (maxi - mini)*np.random.random(SIZE) + mini\n return np.column_stack([x_cord, y_cord])", "def get_random_point(self):\n\t\tx = np.random.uniform(self.xmin, self.xmax)\n\t\ty = np.random.uniform(self.ymin, self.ymax)\n\t\treturn [x, y, 0.0]", "def process_minibatch(\n self,\n x: torch.Tensor,\n y: torch.Tensor,\n train_context: TrainContext,\n ) -> List[torch.Tensor]:\n output = train_context.model(x)\n if isinstance(output, torch.Tensor):\n return [output]\n return output" ]
[ "0.65385675", "0.6229042", "0.6218775", "0.62119395", "0.62032425", "0.61461645", "0.6137679", "0.60998034", "0.6097758", "0.60959524", "0.60723096", "0.60708946", "0.6014316", "0.5990937", "0.5940285", "0.5884059", "0.5871586", "0.5844728", "0.58423215", "0.5820612", "0.5779203", "0.5763866", "0.5759305", "0.5740081", "0.5703614", "0.5698038", "0.568929", "0.5668056", "0.5660645", "0.5634374" ]
0.6306558
1
Tests that Predictor instances are not serializable.
def test_serialization(): # Class is serializable. ray.put(DummyPredictor) # Instance is not serializable. predictor = DummyPredictor() with pytest.raises(PredictorNotSerializableException): ray.put(predictor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def test_valid_serialization_unfit_model(self):\n instance = GammaUnivariate()\n result = GammaUnivariate.from_dict(instance.to_dict())\n assert instance.to_dict() == result.to_dict()", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test__pickle_unpickle(self):\n pass", "def test_serialize_object(self):\n test_obj = self.TestObject(prop1='x', prop2=1234)\n\n with self.assertRaises(TypeError):\n serialize(test_obj)", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def test_predict_transient_smoke_new(self):\n self.check_predict_transient_smoke()", "def test_instmap_picklable(self):\n instmap = FakeAthens().defaults().instruction_schedule_map\n\n ser_obj = pickle.dumps(instmap)\n deser_instmap = pickle.loads(ser_obj)\n\n self.assertEqual(instmap, deser_instmap)", "def test_predict_transient_smoke_old(self):\n self.check_predict_transient_smoke()", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def test_deserialize_missing_data(self):\n data = {\"product_id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_no_fit_predict() -> None:\n mapie = MapieClassifier(estimator=DummyClassifier())\n with pytest.raises(NotFittedError, match=r\".*not fitted.*\"):\n mapie.predict(X_toy)", "def IgnorePersistedDecision(self) -> bool:", "def pickle_fake_estimator():\n estimator = FakeEstimator()\n data = FakeEstimatorData()\n\n estimator.fit(*data.train_data)\n estimator.dump(filepath=\"src/tests/fixtures/fake_estimator.pkl\")", "def test_fit_without_saving(self) -> type(None):\n X, y = get_dataset_for_regression()\n rgr = StackingRegressor(keep_meta_X=False)\n rgr.fit(X, y)\n self.assertFalse(hasattr(rgr, 'meta_X_'))\n self.assertTrue(hasattr(rgr, 'meta_estimator_'))", "def test_data_object_vaporise(self):\n pass", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_pickle(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n plane = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n \n p2 = pickle.loads(pickle.dumps(plane))\n np.testing.assert_almost_equal(plane.origin, p2.origin)\n np.testing.assert_almost_equal(plane.normal, p2.normal)\n np.testing.assert_almost_equal(plane.basis_u, p2.basis_u)\n np.testing.assert_almost_equal(plane.basis_v, p2.basis_v)", "def test_default(self):\n self.assertEqual(self.model.frozen(), False)", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_save_load(self):\n features = np.array([[0, 0], [0.1, 0.1], [0.4, 0.4], [1, 1]])\n labels = np.array([0, 0.1, 0.4, 1])\n num_inputs = 2\n qnn = TwoLayerQNN(\n num_inputs,\n feature_map=ZZFeatureMap(num_inputs),\n ansatz=RealAmplitudes(num_inputs),\n observable=PauliSumOp.from_list([(\"Z\" * num_inputs, 1)]),\n quantum_instance=self.qasm_quantum_instance,\n )\n regressor = NeuralNetworkRegressor(qnn, optimizer=COBYLA())\n regressor.fit(features, labels)\n\n # predicted labels from the newly trained model\n test_features = np.array([[0.5, 0.5]])\n original_predicts = regressor.predict(test_features)\n\n # save/load, change the quantum instance and check if predicted values are the same\n with tempfile.TemporaryDirectory() as dir_name:\n file_name = os.path.join(dir_name, \"regressor.model\")\n regressor.save(file_name)\n\n regressor_load = NeuralNetworkRegressor.load(file_name)\n loaded_model_predicts = regressor_load.predict(test_features)\n\n np.testing.assert_array_almost_equal(original_predicts, loaded_model_predicts)\n\n # test loading warning\n class FakeModel(SerializableModelMixin):\n \"\"\"Fake model class for test purposes.\"\"\"\n\n pass\n\n with self.assertRaises(TypeError):\n FakeModel.load(file_name)", "def test_invalid_prefit_estimator(estimator: ClassifierMixin) -> None:\n mapie = MapieClassifier(estimator=estimator, cv=\"prefit\")\n with pytest.raises(NotFittedError):\n mapie.fit(X_toy, y_toy)", "def test_no_fit_predict() -> None:\n mapie = MapieRegressor(estimator=DummyRegressor())\n with pytest.raises(NotFittedError, match=r\".*not fitted.*\"):\n mapie.predict(X_toy)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def testPredict_AsIterableFalse(self):\n sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20)\n feature_columns = [\n tf.contrib.layers.embedding_column(sparse_column, dimension=1)\n ]\n\n classifier = tf.contrib.learn.DNNClassifier(\n n_classes=3,\n feature_columns=feature_columns,\n hidden_units=[3, 3],\n config=tf.contrib.learn.RunConfig(tf_random_seed=3))\n\n classifier.fit(input_fn=_input_fn, steps=100)\n\n scores = classifier.evaluate(input_fn=_input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n self.assertLess(scores['loss'], 0.3)\n predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)\n self.assertListEqual(list(predictions), [1, 0, 0])\n predictions = classifier.predict_proba(input_fn=_input_fn,\n as_iterable=False)\n self.assertAllClose(\n predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.1)", "def test_valid_serialization_fit_model(self):\n instance = GammaUnivariate()\n instance.fit(np.array([1, 2, 3, 2, 1]))\n result = GammaUnivariate.from_dict(instance.to_dict())\n assert instance.to_dict() == result.to_dict()", "def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False" ]
[ "0.58206624", "0.58120334", "0.5762754", "0.57272524", "0.56799954", "0.56695384", "0.56055886", "0.5602577", "0.5546996", "0.5540443", "0.5539814", "0.55207306", "0.54866004", "0.5472597", "0.5461503", "0.5412684", "0.53859967", "0.537395", "0.535977", "0.53534746", "0.53437835", "0.5326557", "0.53215855", "0.53127384", "0.5302242", "0.5302011", "0.52975065", "0.5284721", "0.5273268", "0.5271901" ]
0.7470269
0
Adds player calendar to team_cal and returns filled teams
def match_with_player(self, name, player_cal): updated_team_cal = self.team_cal.copy() filled_team_keys = [] for loc in player_cal.stack().index: current_player_count = self.team_cal.at[loc] if self.price_cal.at[loc] <= player_cal.at[loc]: if current_player_count < self.team_size * 2: updated_team_cal.at[loc] += 1 self.team_dict[f'{loc[1]}-{loc[0]}'].append(name) if current_player_count == self.team_size * 2 - 1: filled_team_keys.append(f'{loc[1]}-{loc[0]}') else: continue # team is filled self.team_cal = updated_team_cal return filled_team_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_teams():", "def _create_teams(self):\n\t\tself.teamsDict = {}\n\t\tself.teamNamesList = []\n\t\tfor team in range(self.numberOfTeams):\n\t\t\tname = 'TEAM_'+str(team+1)\n\t\t\tself.teamNamesList.append(name)\n\t\t\tself.teamsDict[name] = app.game.team.Team(sport_type=self.gameData['sportType'])", "def create_teams(a_season):\n\n team_a = Team(name=\"Team A\", season=a_season)\n team_a.save()\n for p in create_players(7, 0):\n team_a.players.add(p)\n team_a.save()\n team_b = Team(name=\"Team B\", season=a_season)\n team_b.save()\n for p in create_players(7, 16):\n team_b.players.add(p)\n team_b.save()", "def get_player_games(self, year, use_local=True):", "def add_league_teams(league_diction, team_count, host, root, password):\r\n teams_diction = scrape_teams(league_diction, team_count)\r\n create_teams(host, root, password, dict_to_read=teams_diction)\r\n\r\n return teams_diction", "def get_player_stats_from_game(team, year, week):", "def add_players_on_floor(self):\n for period in self.Periods:\n # set current players to be period starters\n current_players = period.Starters.copy()\n for pbp_event in period.Events:\n if pbp_event.is_substitution():\n coming_in = pbp_event.player2_id\n going_out = pbp_event.player_id\n team_id = pbp_event.team_id\n current_players[team_id] = [coming_in if player == going_out else player for player in current_players[team_id]]\n pbp_event.current_players = current_players.copy()", "def get_teams_and_schedule():\n start_time = timedelta(hours=19)\n time_to_add = timedelta(minutes=15)\n teams = session.query(Team).all()\n\n for team in teams:\n team.time = str(start_time)\n start_time += time_to_add\n yield team", "def collect_teams(year: int = 2005) -> None:\n\n\twith open('../resources/config.json') as config_file, open('../resources/secrets.json') as secrets_file:\n\t\tconfig_json = json.load(config_file)\n\t\tsecrets_json = json.load(secrets_file)\n\n\t\turl = '/'.join(['http:', '', config_json['base_url'], config_json['fbs_teams_endpoint']])\n\t\tapi_key = secrets_json['api_key']\n\n\theaders = {'Authorization': api_key}\n\tparams = {'year': year}\n\n\tresponse = requests.get(url, headers = headers, params = params).json()\n\n\t# dict of one array for json dump\n\tteam_names = {'teamNames': list(map(lambda r: r['school'], response))}\n\n\twith open('../resources/teams.json', 'w') as teams_file:\n\t\tjson.dump(team_names, teams_file)", "def getAllTeams(self):\n return []", "def add_all_teams_and_players_in_league(league_dict, con, host, root, password):\r\n with con.cursor() as cur:\r\n cur.execute(\"\"\"SELECT MAX(id) FROM teams\"\"\")\r\n team_counter = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM players\"\"\")\r\n player_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM injuries\"\"\")\r\n injury_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM player_season\"\"\")\r\n player_season_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM player_team\"\"\")\r\n player_team_count = cur.fetchall()[0][0]\r\n\r\n teams_dict = add_league_teams(league_dict, team_counter, host, root, password)\r\n\r\n add_teams_players(teams_dict, player_count, injury_count, player_season_count,\r\n player_team_count, host, root, password)", "def get_contracted_players(self, team):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's contracted players\n doc = self.get_html_document(team, 'contracts')\n\n # returning empty list if no system page could be found\n if doc is None:\n return players\n\n # collecting player names and links to capfriendly pages for different\n # player groups\n cf_links = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/@href\")\n cf_names = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/text()\")\n\n for lnk, name in zip(cf_links, cf_names):\n # retrieving capfriendly id from player page link\n cf_id = lnk.split(\"/\")[-1]\n # trying to find player in database\n plr = Player.find_by_capfriendly_id(cf_id)\n # trying to find player using suggestions\n if plr is None:\n last_name, first_name = name.split(\", \")\n suggested_players = self.get_suggested_players(\n last_name, first_name)\n for suggested_player in suggested_players:\n (\n sugg_plr_id, sugg_pos,\n sugg_last_name, sugg_first_name, _\n ) = (\n suggested_player\n )\n if (last_name, first_name) == (\n sugg_last_name, sugg_first_name):\n plr = Player.find_by_id(sugg_plr_id)\n if plr is None:\n plr = self.create_player(\n sugg_plr_id, last_name, first_name, sugg_pos)\n\n if plr is None:\n print(\"Unable to find player with name %s\" % name)\n else:\n players.append(plr)\n\n return players", "def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)", "def collect_teams(year):\n\n team_list = Team.objects.filter(year=year).order_by('location')\n teams = []\n for t in team_list:\n team = {\n 'id': t.abbreviation,\n 'team': t,\n }\n teams.append(team)\n return teams", "def load_premiership_teams():\n # list of PremTeams to add\n team_list = [\n {'name': 'Arsenal', 'code': 'ARS', 'is_prem': True},\n {'name': 'Aston Villa', 'code': 'AVL', 'is_prem': True},\n {'name': 'Brighton and Hove Albion', 'code': 'BTN', 'is_prem': True},\n {'name': 'Brentford', 'code': 'BRE', 'is_prem': True},\n {'name': 'Bournemouth', 'code': 'BOU', 'is_prem': False},\n {'name': 'Burnley', 'code': 'BUR', 'is_prem': True},\n {'name': 'Cardiff City', 'code': 'CAR', 'is_prem': False},\n {'name': 'Chelsea', 'code': 'CHE', 'is_prem': True},\n {'name': 'Crystal Palace', 'code': 'CRY', 'is_prem': True},\n {'name': 'Everton', 'code': 'EVE', 'is_prem': True},\n {'name': 'Fulham', 'code': 'FUL', 'is_prem': False},\n {'name': 'Hull', 'code': 'HUL', 'is_prem': False},\n {'name': 'Huddersfield Town', 'code': 'HUD', 'is_prem': False},\n {'name': 'Leeds United', 'code': 'LEE', 'is_prem': True},\n {'name': 'Leicester City', 'code': 'LEI', 'is_prem': True},\n {'name': 'Liverpool', 'code': 'LIV', 'is_prem': True},\n {'name': 'Manchester City', 'code': 'MCY', 'is_prem': True},\n {'name': 'Manchester United', 'code': 'MUN', 'is_prem': True},\n {'name': 'Middlesbrough', 'code': 'MID', 'is_prem': False},\n {'name': 'Newcastle United', 'code': 'NEW', 'is_prem': True},\n {'name': 'Norwich City', 'code': 'NOR', 'is_prem': True},\n {'name': 'Queens Park Rangers', 'code': 'QPR', 'is_prem': False},\n {'name': 'Sheffield United', 'code': 'SHF', 'is_prem': False},\n {'name': 'Southampton', 'code': 'SOT', 'is_prem': True},\n {'name': 'Stoke City', 'code': 'STO', 'is_prem': False},\n {'name': 'Sunderland', 'code': 'SUN', 'is_prem': False},\n {'name': 'Swansea City', 'code': 'SWA', 'is_prem': False},\n {'name': 'Tottenham Hotspur', 'code': 'TOT', 'is_prem': True},\n {'name': 'Watford', 'code': 'WAT', 'is_prem': True},\n {'name': 'West Bromwich Albion', 'code': 'WBA', 'is_prem': False},\n {'name': 'West Ham United', 'code': 'WHM', 'is_prem': True},\n {'name': 'Wolverhampton Wanderers', 'code': 'WLV', 'is_prem': True},\n ]\n\n for team in team_list:\n print(PremTeam.objects.update_or_create(\n name=team['name'],\n code=team['code'],\n defaults={'is_prem': team['is_prem']}\n ))\n # print(pt, created)", "def add_teams_players(teams_dictionary, player_counter, injury_counter, player_season_counter,\r\n player_team_counter, host, root, password):\r\n players, injuries, players_seasons, players_teams = \\\r\n scrape_players(teams_dictionary, player_counter, injury_counter, player_season_counter,\r\n player_team_counter)\r\n if players:\r\n create_players(host, root, password, dict_to_read=players)\r\n create_injuries(host, root, password, dict_to_read=injuries)\r\n create_players_by_team(host, root, password, dict_to_read=players_teams)\r\n create_players_by_season(host, root, password, dict_to_read=players_seasons)", "def fa_finder(league_no, team_name):\n ros_proj_b_list = BatterProjection.objects.all()\n ros_proj_p_list = PitcherProjection.objects.all()\n player_comp = {}\n pitching_fa_list = yahoo_players(league_no, \"P\")\n batting_fa_list = yahoo_players(LEAGUE_NO, \"B\")\n avail_pitching_fas = rate_avail_players(pitching_fa_list, ros_proj_p_list)\n yahoo_team = get_single_yahoo_team(league_no, team_name)\n team_pitching_values = rate_team(yahoo_team, ros_proj_p_list)\n avail_batting_fas = rate_avail_players(batting_fa_list, ros_proj_b_list)\n team_batting_values = rate_team(yahoo_team, ros_proj_b_list)\n\n player_comp['Team Name'] = yahoo_team['team_name']\n player_comp['Pitching FAs'] = avail_pitching_fas\n player_comp['Pitching Team'] = team_pitching_values\n player_comp['Batting FAs'] = avail_batting_fas\n player_comp['Batting Team'] = team_batting_values\n\n return player_comp", "def teams(teamid):\n team_summary = team.TeamSummary(teamid)\n team_summary_info = team_summary.info()\n team_season_ranks = team_summary.season_ranks()\n\n team_common_roster = team.TeamCommonRoster(teamid)\n roster = team_common_roster.roster()\n coaches = team_common_roster.coaches()\n\n season = team_summary_info[0][\"SEASON_YEAR\"]\n\n team_game_log = team.TeamGameLogs(teamid,\n season=season)\n team_games = team_game_log.info()\n\n playoffs_teamgamelogs = team.TeamGameLogs(teamid,\n season=season,\n season_type=\"Playoffs\")\n playoffs_team_games = playoffs_teamgamelogs.info()\n\n team_season = team.TeamSeasons(teamid)\n team_season_info = team_season.info()\n\n for i in team_season_info:\n if (i[\"YEAR\"] == season):\n current_season_info = i\n\n return render_template(\"teams.html\",\n title=team_summary_info[0][\"TEAM_CITY\"] + \" \" + team_summary_info[0][\"TEAM_NAME\"],\n teamid=teamid,\n team_summary_info=team_summary_info,\n team_season_ranks=team_season_ranks,\n season=season,\n team_games=team_games,\n playoffs_team_games=playoffs_team_games,\n team_season=team_season_info,\n roster=roster,\n coaches=coaches,\n current_season_info=current_season_info,\n team_img=TEAM_ID_DATA)", "def _team_init(self):\r\n\t\tfor team_type, team_info in self._teams.items():\r\n\t\t\tteam_info.team_type = team_type\r\n\t\t\tteam_info.maze_pos_finder = \\\r\n\t\t\t\tself._maze_manager.get_finder_by_name(team_type.__str__())", "def enter_game_played(self, players_names, winners_names, game, date, group):\n try:\n game_played = GamePlayed()\n game_played.game = Game.objects.get(name__exact=game)\n game_played.date = date\n game_played.group = group\n game_played.save()\n\n for player in players_names:\n game_played.players.add(Player.objects.get(user__first_name__exact=player))\n for winner in winners_names:\n game_played.winners.add(Player.objects.get(user__first_name__exact=winner))\n except:\n print(\"Error entering game\", game)\n pass", "def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)", "def instantiate_team_and_player_data(self, game_summary):\n self.HomeTeamId = str(game_summary['g']['hls']['tid'])\n self.VisitorTeamId = str(game_summary['g']['vls']['tid'])\n self.HomeTeamAbbreviation = str(game_summary['g']['hls']['ta'])\n self.VisitorTeamAbbreviation = str(game_summary['g']['vls']['ta'])\n self.GameDate = game_summary['g']['gdte']\n\n self.Players = {\n self.HomeTeamId: {\n str(player['pid']): player['fn'] + ' ' + player['ln']\n for player in game_summary['g']['hls']['pstsg'] if player['totsec'] > 0 # only keep track of stats for players who played\n },\n self.VisitorTeamId: {\n str(player['pid']): player['fn'] + ' ' + player['ln']\n for player in game_summary['g']['vls']['pstsg'] if player['totsec'] > 0 # only keep track of stats for players who played\n },\n }\n if self.GameId in PLAYERS_MISSING_FROM_BOXSCORE.keys():\n for team_id in PLAYERS_MISSING_FROM_BOXSCORE[self.GameId].keys():\n for player_id in PLAYERS_MISSING_FROM_BOXSCORE[self.GameId][team_id].keys():\n self.Players[team_id][player_id] = PLAYERS_MISSING_FROM_BOXSCORE[self.GameId][team_id][player_id]", "async def add_to_team(self, player : Player, team):\r\n if player in self.remaining:\r\n self.teams[team].append(player)\r\n self.remaining.remove(player)\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"{} has been drafted to team {}\".format(get_member_name(player,lower=False), \":a:\" if team == \"A\" else \":b:\"))\r\n else:\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",description=\"Sorry, {} is already drafted\".format(get_member_name(player)))", "def get_teams():\n teams = []\n for teamId in range(1, 68):\n t = requests.get(TEAMS_URL.format(teamId)).json()\n team_list = t.get('teams')\n if team_list is None or len(team_list) == 0:\n continue\n teams.append(Team.from_json(team_list[0]))\n return teams", "def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def __init__(self, manager):\n self.player = manager\n manager.career.seasons.append(self)\n self.team = manager.team\n self.league = self.team.league\n self.year = self.team.cosmos.year", "def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str", "def add_team_player():\n if request.form['add_template'] == 'Add Team':\n return render_template('addteam.html')\n elif request.form['add_template'] == 'Add Player':\n teams = get_team()\n return render_template('addplayer.html', teams=teams)\n else:\n return getAllPlayers()" ]
[ "0.6536402", "0.59650755", "0.5887284", "0.58515286", "0.5829555", "0.579289", "0.5745127", "0.57173425", "0.57036144", "0.56878537", "0.56028706", "0.55922323", "0.5529244", "0.5521643", "0.54728687", "0.54638255", "0.54560107", "0.5452628", "0.5441508", "0.5441441", "0.5440784", "0.5407121", "0.5395928", "0.53905064", "0.5351544", "0.534404", "0.534404", "0.5337229", "0.5323738", "0.5321518" ]
0.6215539
1
Sends message to RabbitMQ exchange
def send_message(msg, exchange, key=None): print(msg) connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq')) channel = connection.channel() exchange_type = 'direct' if exchange == 'other' else 'topic' channel.exchange_declare(exchange=exchange, exchange_type=exchange_type) if key is not None and exchange == 'logs': routing_key = f'scheduler.{key}' else: routing_key = '' channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg) connection.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_rabbit_message (params ):\n print \"sending message to rabbitmq exchange\"\n logging.basicConfig()\n rabbitmq_host = params.get( 'host' )\n rabbitmq_port = params.get( 'port' )\n rabbitmq_username = params.get( 'user-name' )\n rabbitmq_password = params.get( 'password' )\n exchange_name = params.get( 'exchange' )\n routing_key = params.get( 'routing' )\n message = params.get( 'message' )\n \n amqp_url='amqp://'+rabbitmq_username+':'+rabbitmq_password+'@'+rabbitmq_host+':'+rabbitmq_port+'/%2f'\n amqp_url = str(amqp_url)\n parameters = pika.URLParameters(amqp_url)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n \n channel.basic_publish(exchange=exchange_name,routing_key=routing_key,body=message)\n ## close connection at the end \n connection.close()", "def send(self):\n if self._stopping:\n return\n\n mytype = 'text/plain'\n\n try:\n if isinstance(json.loads(self.message),dict):\n mytype = 'application/json'\n except (TypeError,json.JSONDecodeError):\n if (isinstance(self.message,dict)):\n mytype = 'application/json'\n self.message = json.dumps(self.message)\n else:\n self.message = str(self.message)\n\n properties = pika.BasicProperties(app_id='sender',\n content_type=mytype)\n\n self._channel.basic_publish(self.exchange, self.routing_key, self.message, properties)\n self._message_number += 1\n self._deliveries.append(self._message_number)\n self.logger.info('published message # %i', self._message_number)", "def send(self, json):\n try:\n retval = self._channel.basic_publish(\n exchange=self.exchange_config['name'],\n routing_key=self.queue_config['name'],\n body=json,\n mandatory=False,\n properties=self._msg_properties\n )\n\n if retval == False:\n raise exceptions.MessageNotSentException(\"Message not sent, enable pika logging for more information\")\n except Exception as e:\n raise exceptions.ConnectionException(\"Connection error\", e)", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send_message(self, message):\n self.client.queue.put(message)", "def send_message(self, message):\n self.send_message_queue.put(message)", "def publish_message(message: str, broker_ip: str, exchange_name: str, exchange_type: str):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=broker_ip))\n channel = connection.channel()\n channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type, durable=True)\n channel.basic_publish(exchange=exchange_name, routing_key='', body=message)\n print(f'Published {message} to the exchange')\n connection.close()", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def send_msg(self, msg):\n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(msg)))", "def send(self, msg):\n self._mailbox.put(msg)", "def send_msg(self, my_queue, my_msg):", "def send(self, msg):\n return self._channel_action(msg, 1)", "def send(event):\n\n\tid = get_hostname()\n\n\tmessage = str(id) + \"|\" + str(event)\n\n\tif mq is None: # if no mq exists\n\t\tprint \"mq is None\"\n\n\telse: # if mq exists\n\t\ttry:\n\n\t\t\tmq.send(message)\n\t\t\tprint 'completed sending message'\n\n\t\texcept Exception as e:\n\n\t\t\tprint 'failed to send message: {}'.format(e)", "def publish(self, queue, message):\n # 1. Setup the channel to use to publish message\n channel_handler = ChannelHandler(self._connection)\n\n # 2. Open the channel before using it\n channel_handler.open_channel()\n\n # 3. Send the message via the channel\n channel_handler.send_message(self._exchange_name, queue, message)\n\n # 4. Close the channel after publishing the message\n channel_handler.close_channel()\n LOGGER.info('Bellow message `%s` is published in `%s`', message, queue)", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "async def send_event(\n self,\n payload: bytes,\n exchange_name: Optional[str] = None,\n routing_key: Optional[str] = None,\n **kwargs,\n ) -> None:\n exchange_name = exchange_name or os.getenv(\n \"PUBLISH_EXCHANGE_NAME\", \"default.in.exchange\"\n )\n routing_key = routing_key or os.getenv(\"PUBLISH_ROUTING_KEY\", \"#\")\n try:\n await self.channel.publish(\n payload=payload,\n exchange_name=exchange_name,\n routing_key=routing_key,\n **kwargs,\n )\n except ChannelClosed as err:\n await self.configure()\n if err.message.find(\"no exchange\") > 0:\n raise ExchangeNotFound(exchange_name) # type: ignore", "def send_mail(self, msg):\n mail_queue.put(msg)", "def send_message(self, message):\n self.outbox.put(message)\n if message.TYPE_STRING != \"ack\":\n self.awaiting_ack.put((message, time.time()))", "def send(self, message):\n pass", "async def send(self, message):", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def send_message(self, message):\n pass", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def send(self, message):\n if isinstance(message, basestring):\n self.send_queue.put(message)\n else:\n self.send_queue.put(struct.pack(\"!B\", message.type_id) +\n message.pack())", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def on_reply(self, msg: str):\n self._logger.debug(f\"Got msg: {msg}\")\n self._rabbit_channel.basic_publish(exchange='', routing_key=QueueName.MSG_REPLY, body=str(msg))", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")" ]
[ "0.7955153", "0.73282826", "0.69862974", "0.6899452", "0.68242073", "0.676501", "0.6764902", "0.6729278", "0.669033", "0.668374", "0.66331697", "0.66054446", "0.65912455", "0.6582951", "0.6516775", "0.64865685", "0.6484264", "0.64665216", "0.64554477", "0.6451518", "0.643939", "0.6429638", "0.64238006", "0.64164287", "0.6393193", "0.6393193", "0.6388049", "0.63857627", "0.63579065", "0.63451535" ]
0.75768226
1
Email summary of results to user.
def EmailResults(recipient, error_mesg, topdir, dumpfile, logfile, motcor_summary): #********************************************************************************* if recipient is None: return elif 'noname' in recipient: return sender = 'preprocess' if 'Abnormal' in error_mesg > 0: subject = 'Problem while preprocessing %s' % topdir else: subject = 'Preprocessing complete for %s' % topdir mssg = error_mesg if logfile is not None and isinstance(logfile, str): f = open(logfile, 'r') lines = f.readlines() f.close() logged_errors = '' for i in xrange(len(lines)): if 'rror' in lines[i]: mssg += ''.join(lines[i-1:]) break mssg += motcor_summary if dumpfile is not None: f = open(dumpfile,'r') mssg += '\nSummary of processing:\n' mssg += f.read() f.close() send_email(recipient, subject, mssg, sender)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def publish_summary(self, jobs):\n pass", "def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()", "def message_user_results(self, request, successes, failures, action):\n\n self.message_user_success(request, successes, action)\n self.message_user_failure(request, failures, action)", "def composeSummaryEmail(self):\r\n message = \"\"\"From: Douglas Gregor <[email protected]>\r\nTo: [email protected]\r\nReply-To: [email protected]\r\nSubject: [Report] \"\"\"\r\n message += str(self.numFailures()) + \" failures on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nBoost regression test failures\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists all regression test failures on high-priority platforms.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n\\n'\r\n\r\n if self.numFailures() == 0:\r\n message += \"No failures! Yay!\\n\"\r\n return message\r\n \r\n # List the platforms that are broken\r\n any_broken_platforms = self.numReportableFailures() < self.numFailures()\r\n if any_broken_platforms:\r\n message += \"\"\"The following platforms have a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n message += \"\"\"\r\nFailures on these \"broken\" platforms will be omitted from the results below.\r\nPlease see the full report for information about these failures.\r\n\r\n\"\"\"\r\n \r\n # Display the number of failures\r\n message += (str(self.numReportableFailures()) + ' failures in ' + \r\n str(len(self.libraries)) + ' libraries')\r\n if any_broken_platforms:\r\n message += (' (plus ' + str(self.numFailures() - self.numReportableFailures())\r\n + ' from broken platforms)')\r\n \r\n message += '\\n'\r\n\r\n # Display the number of failures per library\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n num_failures = library.numFailures()\r\n message += ' ' + library.name + ' ('\r\n \r\n if library.numReportableFailures() > 0:\r\n message += (str(library.numReportableFailures())\r\n + \" failures\")\r\n \r\n if library.numReportableFailures() < num_failures:\r\n if library.numReportableFailures() > 0:\r\n message += ', plus '\r\n \r\n message += (str(num_failures-library.numReportableFailures()) \r\n + ' failures on broken platforms')\r\n message += ')\\n'\r\n pass\r\n\r\n message += '\\n'\r\n\r\n # Provide the details for the failures in each library.\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n if library.numReportableFailures() > 0:\r\n message += '\\n|' + library.name + '|\\n'\r\n for test in library.tests:\r\n if test.numReportableFailures() > 0:\r\n message += ' ' + test.name + ':'\r\n for failure in test.failures:\r\n platform = failure.platform\r\n if not platform.isBroken():\r\n message += ' ' + platform.name\r\n message += '\\n'\r\n\r\n return message", "def email_article_summary(to_address, summary_filename, start_year, start_month, end_year, end_month, num_articles):\n \n host = HOST\n from_address = FROM_ADDRESS\n body = \"\"\"\n Good morning,\n \n There were %i peer-reviewed papers produced by researchers at this institute between %i/%i and %i/%i. A summary file containing the front page from each article is attached with this email. Please print out these summary pages, highlight the author(s) on each article and pin them to the monthly papers noticeboard.\n \n Thanks a bunch,\n \n Skynet.\n \n \"\"\" % (num_articles, start_month, start_year, end_month, end_year, )\n \n recipients = [to_address, ADMIN_ADDRESS]\n \n logging.info(\"Preparing summary email report for %s\" % (', '.join(recipients), ))\n \n successful = True\n for recipient in recipients:\n \n message = MIMEMultipart()\n message[\"From\"] = from_address\n message[\"To\"] = recipient\n message[\"Subject\"] = \"Refereed papers summary between %i/%i and %i/%i\" % (start_month, start_year, end_month, end_year, )\n message[\"Date\"] = formatdate(localtime=True)\n \n message.attach(MIMEText(textwrap.dedent(body).lstrip()))\n \n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(summary_filename, 'rb').read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(summary_filename))\n message.attach(part)\n \n server = smtplib.SMTP(host)\n \n try:\n failed = server.sendmail(from_address, to_address, message.as_string())\n server.close()\n \n except Exception as e:\n logging.critical(\"Unable to send email to %s. Error: %s\" % (recipient, str(e), ))\n successful = False\n \n else:\n logging.info(\"Email successfully sent to %s\" % recipient)\n \n \n return successful", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, '[email protected]',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def post(self, request, *args, **kwargs):\n self.form = self.get_form()\n self.form.full_clean()\n results = self.get_queryset()\n nb_results = results.count()\n first_results = results[:10]\n site = get_current_site(self.request)\n querystring = self.get_form_data().urlencode()\n scheme = 'https'\n search_url = reverse('search_view')\n full_url = '{scheme}://{domain}{search_url}?{querystring}'.format(\n scheme=scheme,\n domain=site.domain,\n search_url=search_url,\n querystring=querystring)\n results_body = render_to_string('emails/search_results.txt', {\n 'user_name': self.request.user.full_name,\n 'aids': first_results,\n 'nb_results': nb_results,\n 'full_url': full_url,\n 'scheme': scheme,\n 'domain': site.domain,\n })\n send_mail(\n self.EMAIL_SUBJECT,\n results_body,\n settings.DEFAULT_FROM_EMAIL,\n [self.request.user.email],\n fail_silently=False)\n return HttpResponse('')", "def send_results(self, collected_results: list):\n\n for scan in collected_results:\n raw_scan = scan.original_results\n scan_time = raw_scan.scan_info.scan_start_time.ToJsonString()\n logger.info('Scan: ' + raw_scan.tool_name + ' run at ' + scan_time)\n for issue in raw_scan.issues:\n logger.info('Issue: ' + str(issue))", "def summary_print(self):\r\n self.ensure_one()\r\n self.sent = True\r\n #return self.env['ir.actions.report'].report_action(self, 'proandsys_purchase_14.summary_landed_report')\r\n return self.env.ref('proandsys_purchase_14.summary_landedcost').report_action(self)", "def printSummary(self):\n pass", "def summaryView(request):\n\n alert_errors = []\n alert_infos = []\n alert_filters = []\n\n runs = get_runs_from_request_filters(\n request, alert_errors, alert_infos, alert_filters\n )\n\n summary = SummaryReport(runs)\n\n context = {\n \"refs\": summary.reference_runs(),\n \"runs\": summary.runs_checked_per_type(),\n \"tk_maps\": summary.tracker_maps_per_type(),\n \"certified_runs\": summary.certified_runs_per_type(),\n \"sums\": summary.sum_of_quantities_per_type(),\n \"alert_errors\": alert_errors,\n \"alert_infos\": alert_infos,\n \"alert_filters\": alert_filters,\n }\n\n return render(request, \"certhelper/summary.html\", context)", "def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')", "def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()", "def display_summary(self, *args):\n logger.debug(u\"{} Summary\".format(self.joueur))\n yield(self.remote.callRemote(\n \"display_summary\", self.currentperiod.todict()))\n self.joueur.info(\"Ok\")\n self.joueur.remove_waitmode()", "def trigger_result_email(\n self, project_id: str, topic_name: str,\n operation_counts_dict: Mapping[str, operation_counts.OperationCounts]\n ) -> None:\n topic = f'projects/{project_id}/topics/{topic_name}'\n message = {\n 'attributes': {\n 'content_api_results':\n json.dumps(\n operation_counts_dict,\n default=_convert_operation_counts_into_json)\n }\n }\n try:\n self._client.publish(topic, json.dumps(message).encode('utf-8'))\n except exceptions.GoogleCloudError as cloud_error:\n logging.exception('PubSub to mailer publish failed: %s', cloud_error)", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")", "def send_status_mail(self):\n from django.core.mail import send_mail\n subject = \"App Load Status | %s - %s | S%02d/-R%02d\" % (\n self.publication.account, self.title, self.load_count, self.reject_count)\n body = \"Account: %s\\nPublication: %s\\nStatus: %s\\nTime:%s - %s\\nLoaded: %02d\\nRejected: %02d\\n\\nComments\\n:%s\\n\" % (\n self.publication.account, self.publication, self.load_status, self.start_time, self.end_time, self.load_count, self.reject_count, self.comments)\n body = body + settings.EMAIL_DEFAULT_SIGNATURE\n if self.publication.id in [2,3,60,61,62,63,370,39]:\n return send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.ADMIN_EMAIL, '[email protected]', '[email protected]', '[email protected]'], fail_silently=True)\n else:\n return send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.ADMIN_EMAIL], fail_silently=True)", "def results_summary(self, num_models=10, sort_metric=None):\n if self.state.dry_run:\n info(\"Dry-Run - no results to report.\")\n return\n\n # FIXME API documentation\n _results_summary(input_dir=self.state.host.results_dir,\n project=self.state.project,\n architecture=self.state.architecture,\n num_models=num_models,\n sort_metric=sort_metric)", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def display_results():\n pass", "def report(self, result):\n raise NotImplementedError", "def _print_aggregate_results(\n task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]]\n) -> None:\n aggregate_task_result = aggregate_nvs_results(task_results[task])\n print(\"\")\n print(f\"Aggregate results for task={task}:\")\n pretty_print_nvs_metrics(aggregate_task_result)\n print(\"\")", "def show_results(self):\n print(\"Survey results:\")\n for response in self.responses:\n print('- ' + response)", "def print_results(self):\n pass", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def report(self, **options):\n pass", "def dump_total_results(statistic_entries):\n individual_tests = sum([entry['correct answers'] + entry['wrong answers']\n for entry in statistic_entries])\n average_per_test = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(individual_tests)\n average_per_run = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(len(statistic_entries))\n\n best_time = min([entry['best time (s)'] for entry in statistic_entries])\n worst_time = max([entry['worst time (s)'] for entry in statistic_entries])\n\n print(\"\\nSummary for all done tests:\")\n print(\" %5d total test runs\" % len(statistic_entries))\n print(\" %5d individual tests\" % individual_tests)\n print(\" %5.1f individual tests per run\" % (individual_tests / float(len(statistic_entries))))\n print(\" %5.2f seconds per answer (average)\" % average_per_test)\n print(\" %5.2f seconds per run (average)\" % average_per_run)\n print(\" %5.2f seconds was best time.\" % best_time)\n print(\" %5.2f seconds was worst time.\" % worst_time)", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)" ]
[ "0.6340017", "0.6205421", "0.6195181", "0.61352056", "0.60605377", "0.60505825", "0.5933892", "0.5929879", "0.5912047", "0.57762545", "0.57740766", "0.57343775", "0.56606674", "0.56550497", "0.5646071", "0.56351393", "0.5634105", "0.5610652", "0.5604074", "0.56035924", "0.55907", "0.55873597", "0.55791926", "0.5572864", "0.5547156", "0.5494798", "0.5490097", "0.5475123", "0.54725397", "0.5471057" ]
0.648858
0
Synthesize yaml header filename from directory name.
def _yaml_filename(self, path): fullpath = os.path.abspath(path) if not os.path.isdir(fullpath): dirname = os.path.dirname(fullpath) else: dirname = path if dirname.endswith('/'): dirname = dirname[:-1] fname = dirname.split('/')[-1] + '.yaml' return dirname, fname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def file_title(self):\n basename = os.path.basename(self.__path)\n index_dot = basename.rfind(\".\")\n if index_dot == 0:\n return basename[1:]\n return basename if index_dot < 0 else basename[:index_dot]", "def _filename(self, key):\n return os.path.join(self.root, key[:2], key)", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def generate_header(name: str) -> str:\n return MARKDOWN_HEADER.format(name.capitalize(), date.today())", "def ifdef_name(filename):\n return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\"", "def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'", "def make_filename(key, extension):\n key = unicode(key.strip())\n return '{}.{}'.format(slugify(key), extension)", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def _get_filename(self, type_: str, name: str) -> str:\n if not os.path.isdir(self._datadir):\n os.mkdir(self._datadir, mode=0o700)\n\n type_dir = os.path.join(self._datadir, type_)\n if not os.path.isdir(type_dir):\n os.mkdir(type_dir, mode=0o700)\n\n fn = os.path.join(type_dir, name) + '.yaml'\n return fn", "def format_filename(prefix, suffix, seq_len, uncased):\n seq_str = \"seq-{}\".format(seq_len)\n if uncased:\n case_str = \"uncased\"\n else:\n case_str = \"cased\"\n\n file_name = \"{}.{}.{}.{}\".format(prefix, seq_str, case_str, suffix)\n\n return file_name", "def root_name(file_name, file_id):\n if file_id is not None:\n return \"{}{}\".format(R_DIR, file_name.format(file_id))\n else:\n return \"{}{}\".format(R_DIR, file_name)", "def genBaseName(fileName):\n return fileName.split(\"_\")[0].split(\".\")[0]", "def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def get_file_name(replay_dir, template_name):\n suffix = '.json' if not template_name.endswith('.json') else ''\n file_name = f'{template_name}{suffix}'\n return os.path.join(replay_dir, file_name)", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def format_name(name_dir):\n if(name_dir.endswith('/')):\n name_dir = name_dir.rstrip('/')\n return(name_dir)", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def title(self):\n return os.path.basename(self.__path) if self.isdir() else self.file_title", "def content_file_name(self, filename):\n ext = filename.split('.')[-1]\n filename = \"%s_%s.%s\" % (filename, self.id, ext)\n return os.path.join('pictures/static/pictures/', filename)", "def filename(lang):\n filename = lang.replace('-', '_')\n filename = filename.lower()\n return filename", "def _title(self, path):\n title = os.path.basename(os.path.splitext(path)[0])\n return title", "def extract_dir_name(input_file):\r\n fname = PurePath(input_file).__str__()\r\n s = fname.split('.')\r\n name = '.'.join(s[:-1])\r\n return name", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname", "def collected_filename(cfg, collect_dir, i=None):\n if i is not None:\n file = cfg[\"files\"][i]\n else:\n file = cfg[\"file\"]\n ext = path.splitext(file)[1]\n name = cfg[\"id\"]\n if i is not None:\n name += \"_\" + str(i)\n return path.join(collect_dir, name + ext)", "def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)", "def _make_fname(song, ext=None, av=None, subdir=None):\n # pylint: disable=E1103\n # Instance of 'bool' has no 'extension' member (some types not inferable)\n ddir = os.path.join(Config.DDIR.get, subdir) if subdir else Config.DDIR.get\n if not os.path.exists(ddir):\n os.makedirs(ddir)\n\n if ext:\n extension = ext\n\n else:\n stream = streams.select(streams.get(song),\n audio=av == \"audio\", m4a_ok=True)\n extension = stream['ext']\n\n # filename = song.title[:59] + \".\" + extension\n filename = song.title + \".\" + extension\n filename = os.path.join(ddir, mswinfn(filename.replace(\"/\", \"-\")))\n filename = filename.replace('\"', '')\n return filename" ]
[ "0.63299215", "0.6177597", "0.6154436", "0.6145138", "0.61193216", "0.6117771", "0.60900265", "0.6084294", "0.59812474", "0.59652424", "0.5949886", "0.59462756", "0.5918559", "0.59006333", "0.5891024", "0.58728975", "0.5864215", "0.5860745", "0.5852123", "0.58312166", "0.5803451", "0.5797999", "0.5788537", "0.5786971", "0.5777936", "0.5775427", "0.5753256", "0.57517606", "0.5700192", "0.56959397" ]
0.6754749
0
Create list of epis in pfile format (epi_series) and of epis in dicom format (epirt_paths)
def _EpiInfo(self, info, path): epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \ 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']} for key in self.epi_keys.keys(): if self.epi_keys[key] != str(epi_vals[key]): # Return None, which will cause these data to be ignored. return None # Early versions of the EPIC software saved p-files for the setup epis. # Don't process these (or any epi with fewer than eight useable frames). if self.hdr['tdim'] < (8 + self.skip): return None info['slice_order'] = self.shdr.get('SliceOrder', 'altplus') if self.shdr['EffEchoSpacing'] is not None: info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000. else: info['echo_spacing'] = 0. if info['data_filetype'] == 'dicom': # Entry is name of dirctory for dicom images. if not os.path.isdir(path): entry = os.path.dirname(path) else: entry = path else: # Otherwise it is the name of a directory containing p-files. entry = path if info['data_filetype'] == 'ge_data' and info['type'] is not None: # Found a pfile. Add it to the list. if entry not in self.pfiles and info['tdim'] > 2: self.pfiles.append(entry) self.entry_map['epi'].append(entry) if info['series'] not in self.epi_series: self.epi_series.append(info['series']) elif info['data_filetype'] == 'dicom' and \ info['psdname'] == 'epibold': # This is the initial EPI done during setup. info['outdir'] = self.episetup_dir info['type'] = 'first_epi' self.entry_map['first_epi'].append(entry) info['imgfile'] = '%s/first_epi_%d' % \ (self.episetup_dir, len(self.entry_map['first_epi'])) elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \ info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2: # This is an epi reconstructed on the scanner. self.epi_series.append(info['series']) self.entry_map['epi'].append(entry) if not os.path.isdir(path): tmp_path = os.path.dirname(path) else: tmp_path = path self.epirt_paths.append(tmp_path) if self.fsl_flip: info['filetype'] = 'brik' else: info['filetype'] = self.tmplt['epi_file_format'] info['TR'] = self.hdr['tsize'] if self.tmplt['acq_tr'] is None: info['acq_tr'] = float(info['TR']) else: info['acq_tr'] = float(self.tmplt['acq_tr']) return OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def dicom_load():\n # Identify folders with EPI data\n dirs = [i for i in os.listdir(dcm_dir) if os.path.isdir(os.path.join(dcm_dir, i))]\n d_cnt = 0\n for d in dirs:\n dcm_file = os.path.join(dcm_dir,d,os.listdir(os.path.join(dcm_dir,d))[0])\n try:\n dcm_data = pydicom.dcmread(dcm_file)\n except:\n pass\n else:\n # If data is EPI then get start time, etc\n if 'EPI' in dcm_data.ImageType:\n dcm_dict[d_cnt] = {}\n dcm_dict[d_cnt]['dcm_file'] = dcm_file\n dcm_dict[d_cnt]['task_name'] = dcm_data.SeriesDescription\n dcm_dict[d_cnt]['task_name'] = dcm_dict[d_cnt]['task_name'].replace('_','-')\n date = dcm_data.SeriesDate\n start = dcm_data.SeriesTime\n start_time = '%s-%s-%s %s:%s:%s'%(date[0:4],date[4:6],date[6:],start[0:2],start[2:4],start[4:])\n dcm_dict[d_cnt]['start_time'] = datetime.fromisoformat(start_time)\n dcm_dict[d_cnt]['run_length'] = dcm_data[0x0019,0x105a].value/1000\n dcm_dict[d_cnt]['end_time'] = dcm_dict[d_cnt]['start_time'] + timedelta(milliseconds=dcm_dict[d_cnt]['run_length'])\n d_cnt = d_cnt+1", "def get_endpoints(self, epg_dn):\n result = []\n for item in filter(lambda x: type(x).__name__ == 'CEp', self.query_child_objects(epg_dn)):\n # Creates a dynamic object type.\n endpoint = type('endpoint', (object,), {})\n\n # Filter the endpoint in memory looking for the object that contains the interface where the endpoint is\n # attached\n endpoint_connection_mo = filter(lambda x: type(x).__name__ == 'RsCEpToPathEp',\n self.query_child_objects(item.dn))[0]\n\n # Format the string to be human readable\n endpoint_connection_interface = str(endpoint_connection_mo.tDn).replace('topology/pod-1/paths','node').\\\n replace('pathep-[', '').replace(']','')\n\n # Add attributes to the object\n endpoint.ip = item.ip\n endpoint.mac = item.mac\n endpoint.name = item.name\n endpoint.interface = endpoint_connection_interface\n\n # Append it to the list\n result.append(endpoint)\n return result", "def list_all_ephemerides_files(self) -> Dict:\n ephs = self.list_result_ephemerides_files()\n while 'nextPageToken' in ephs:\n next_page_token = ephs['nextPageToken']\n _, e = self.list_result_ephemerides_files(page_token=next_page_token)\n ephs['ephemerisResourcePath'].extend(e['ephemerisResourcePath'])\n return ephs", "def readEpi_fromSequence(fpath, position=0, direction='h'):\n assert isinstance(fpath, str)\n\n fnames = []\n for f in glob(fpath + \"*.png\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.jpg\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.bmp\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.ppm\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.tif\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.bmp\"):\n fnames.append(f)\n fnames.sort()\n\n im = misc.imread(fnames[0])\n channels = 1\n if len(im.shape) == 3:\n channels = 3\n\n if direction == 'h':\n epi = np.zeros((len(fnames), im.shape[1], channels))\n if direction == 'v':\n epi = np.zeros((len(fnames), im.shape[0], channels))\n\n for n,f in enumerate(fnames):\n im = misc.imread(fnames[n])\n if direction == 'h':\n if len(im.shape) == 3:\n epi[n, :, 0:3] = im[position, :, 0:3]\n else:\n epi[n, :, 0] = im[position, :]\n if direction == 'v':\n if len(im.shape) == 3:\n epi[n, :, 0:3] = im[ :, position, 0:3]\n else:\n epi[n, :, 0] = im[:, position]\n\n return epi[:, :, 0:channels]", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())", "def _get_ais_paths(self) -> list:\n ais_files = []\n year = self.year\n end_year = self.year\n for month in range(1, 13):\n end_month = month + 1\n if month == 12:\n end_year += 1\n end_month = 1\n\n for vessel_type in self.vessel_types:\n path_template = f\"{vessel_type}_{year}{month:02}01-{end_year}{end_month:02}01_total.tif\"\n fname = self.dir / path_template\n ais_files.append(fname)\n\n return ais_files", "def get_filepaths_and_exts(self):\n filepaths = [prod.filepath for prod in self.products]\n exts = [prod.ext for prod in self.products]\n\n return filepaths, exts", "def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes", "def _get_pex_paths(self) -> list:\r\n pex_paths: list = []\r\n\r\n for object_name, script_path in self.psc_paths.items():\r\n pex_path = os.path.join(self.options.output_path, object_name.replace('.psc', '.pex'))\r\n\r\n # do not check if file exists, we do that in _find_missing_script_paths for a different reason\r\n if pex_path not in pex_paths:\r\n pex_paths.append(pex_path)\r\n\r\n return pex_paths", "def get_2Dtodo(loc=BASE):\n toproc = []\n for ff in Path(loc).glob(\"**/*.d/proces*.mscf\"):\n if (ff.parent/'ser').exists():\n toproc.append(ff)\n if DEBUG:\n print('get_2Dtodo:')\n pprint([str(i.parent.name) for i in toproc])\n return toproc", "def extract_mediapackage_endpoints(mp_client, mp_channel_id_list):\n emp_endpoint_list = {}\n for channel in mp_channel_id_list:\n emp_endpoint_list[str(channel)] = []\n response = mp_client.list_origin_endpoints()\n for endpoint in response['OriginEndpoints']:\n if str(endpoint[\"ChannelId\"]) in mp_channel_id_list:\n emp_endpoint_list[str(endpoint[\"ChannelId\"])].append(str(endpoint['Id']))\n return emp_endpoint_list", "def list_result_ephemerides_files(\n self, page_size: int = 100, page_token: str = None) -> Dict:\n params = {}\n if page_size < 0 or page_size > 100:\n page_size = 100\n params['pageSize'] = page_size\n if page_token:\n params['pageToken'] = page_token\n ephs = self._rp._rest.get(\n f'/projects/{self._rp._project}/jobs/{self._job_uuid}'\n f'/ephemerides?{urllib.parse.urlencode(params)}')\n return ephs", "def _GetEpiOrder(self):\n self.epi_series.sort()\n for series in self.epi_series:\n self.GetEpiAcqTimes(series)\n self.AssignEpiNames()", "def path_convert(self):\n pub_path = Exp_msg()\n for i in self.path:\n epoint = Cordi()\n (epoint.x, epoint.y) = i\n pub_path.bliss.append(epoint)\n return(pub_path)", "def _get_fsevent_image_files(self):\r\n # Print the header columns to the output file\r\n Output.print_columns(self.l_all_fsevents)\r\n \r\n scan_path_spec = None\r\n scanner = source_scanner.SourceScanner()\r\n scan_context = source_scanner.SourceScannerContext()\r\n scan_context.OpenSourcePath(self.meta['source'])\r\n\r\n scanner.Scan(\r\n scan_context,\r\n scan_path_spec=scan_path_spec\r\n )\r\n\r\n for file_system_path_spec, file_system_scan_node in scan_context._file_system_scan_nodes.items():\r\n t_files = 0\r\n self.all_files_count = 0\r\n self.error_file_count = 0\r\n self.all_records_count = 0\r\n self.parsed_file_count = 0\r\n \r\n try:\r\n location = file_system_path_spec.parent.location\r\n except:\r\n location = file_system_path_spec.location\r\n \r\n print(\" Processing Volume {}.\\n\".format(location))\r\n\r\n fs_event_path_spec = path_spec_factory.Factory.NewPathSpec(\r\n file_system_path_spec.type_indicator,\r\n parent=file_system_path_spec.parent,\r\n location=\"/.fseventsd\"\r\n )\r\n\r\n file_entry = resolver.Resolver.OpenFileEntry(\r\n fs_event_path_spec\r\n )\r\n \r\n if file_entry != None:\r\n\r\n t_files = file_entry.number_of_sub_file_entries\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n t_files -= 1\r\n\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n counter = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n counter += 1\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(counter, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Name of source fsevent file\r\n self.src_filename = sub_file_entry.name\r\n self.src_fullpath = self.meta['source'] + \": \" + location + sub_file_entry.path_spec.location\r\n\r\n stat_object = sub_file_entry.GetStat()\r\n\r\n # UTC mod date of source fsevent file\r\n self.m_time = datetime.datetime.fromtimestamp(\r\n stat_object.mtime).strftime(\r\n '%Y-%m-%d %H:%M:%S') + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(self.src_filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n file_object = sub_file_entry.GetFileObject()\r\n\r\n compressedFile = io.StringIO.BytesIO()\r\n compressedFile.write(file_object.read())\r\n compressedFile.seek(0)\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(fileobj=compressedFile, mode='rb')\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_filename)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)\r\n \r\n else:\r\n print('Unable to process volume or no fsevent files found')\r\n continue\r\n\r\n print('\\n\\n All Files Attempted: {}\\n All Parsed Files: {}\\n Files '\r\n 'with Errors: {}\\n All Records Parsed: {}'.format(\r\n self.all_files_count,\r\n self.parsed_file_count,\r\n self.error_file_count,\r\n self.all_records_count))", "def _get_hostendpoints(self, host, intf_ep, config):\n\n for uuid in intf_ep.keys():\n\n intf = intf_ep[uuid][0]\n iftype = intf_ep[uuid][1]\n\n host_endpoints = dict()\n hep_name = host.hostname + \"-\" + intf.ifname + \"-if-hep\"\n\n host_endpoints[\"apiVersion\"] = \"crd.projectcalico.org/v1\"\n host_endpoints[\"kind\"] = \"HostEndpoint\"\n host_endpoints.update({\"metadata\": dict()})\n host_endpoints[\"metadata\"].update({\"name\": hep_name})\n host_endpoints[\"metadata\"].update({\"labels\": dict()})\n host_endpoints[\"metadata\"][\"labels\"].update({\"nodetype\": host.personality})\n host_endpoints[\"metadata\"][\"labels\"].update({\"ifname\":\n f\"{host.hostname}.{intf.ifname}\"})\n host_endpoints[\"metadata\"][\"labels\"].update({\"iftype\": iftype})\n\n host_endpoints.update({\"spec\": dict()})\n host_endpoints[\"spec\"].update({\"node\": host.hostname})\n interfaceName = puppet_intf.get_interface_os_ifname(self.context, intf)\n host_endpoints[\"spec\"].update({\"interfaceName\": interfaceName})\n\n # adding only for OAM for compatibility with old implementation\n if constants.NETWORK_TYPE_OAM in iftype:\n hep_name = host.hostname + \"-oam-if-hep\"\n host_endpoints[\"metadata\"][\"name\"] = hep_name\n self._add_hep_expected_ip(host, constants.NETWORK_TYPE_OAM, host_endpoints)\n\n config[hep_name] = copy.copy(host_endpoints)", "def make_photon_arrays(path, numevents):\n xcoords = []\n zcoords = []\n \n nsipmarrays = []\n nabsarrays = []\n \n for filename in os.listdir(path):\n\n photondata = np.loadtxt(path+'/'+filename,delimiter=',',usecols=[1,4])\n\n coords = filename[0:8]\n\n arraylen = len(photondata.flatten('F'))\n \n nsipmphotons = photondata.flatten('F')[numevents:arraylen]\n #print(len(nsipmphotons))\n nabsphotons = photondata.flatten('F')[0:numevents] \n \n nsipmarrays.append(nsipmphotons)\n nabsarrays.append(nabsphotons)\n \n x = re.findall('(-[0-9]+)x',coords) \n \n if bool(x) == False:\n x = re.findall('([0-9]+)x', coords)\n \n z = re.findall('(-[0-9]+)z',coords) \n\n if bool(z) == False:\n z = re.findall('([0-9]+)z',coords)\n\n xcoords.append(x[0])\n zcoords.append(z[0])\n \n xcoords = np.array(xcoords).astype(np.float)\n zcoords = np.array(zcoords).astype(np.float)\n \n return xcoords, zcoords, nsipmarrays, nabsarrays", "def partid2eids(self, partid, etype=...):\n ...", "def partid2eids(self, partid, etype=...):\n ...", "def process_output_files(pst, pst_path=\".\"):\n if not isinstance(pst, pyemu.Pst):\n raise Exception(\n \"process_output_files error: 'pst' arg must be pyemu.Pst instance\"\n )\n series = []\n for ins, out in zip(pst.instruction_files, pst.output_files):\n ins = os.path.join(pst_path, ins)\n out = os.path.join(pst_path, out)\n if not os.path.exists(out):\n warnings.warn(\"out file '{0}' not found\".format(out), PyemuWarning)\n f = os.path.join(pst_path, ins)\n i = InstructionFile(ins, pst=pst)\n try:\n s = i.read_output_file(out)\n series.append(s)\n except Exception as e:\n warnings.warn(\"error processing output file '{0}': {1}\".format(out, str(e)))\n if len(series) == 0:\n return None\n series = pd.concat(series)\n # print(series)\n return series", "def partid2eids(self, partid, etype): # -> None:\n ...", "def as_exons(self,input={}):\n # handle potentially applied input argument\n self._handle_input_subdict(input)\n # parse data in the AbgpGeneLocusDir\n self.parseinputgff()\n self.rungetorf()\n # we need abgp_geneconfirmation.geneconfirmation first!\n geneconfirmation( { self._create_auto_key(): self.input } )\n\n # get only the CDS-type of tracks that define the coding sequence\n genecdstracks = filtergffs4fmethod( self._obtain_gene_gff(), GFF_CDS_FMETHOD ) \n\n if len(genecdstracks) == 1:\n # deal with SingleExonOnOrf -> TSS + donor\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n return [ SingleExonOnOrf(tss,genecdstracks[-1][4],orf,gff={}) ]\n\n elif len(genecdstracks) == 0:\n # no tracks !?\n return []\n elif not self.input['orfid-genestructure']:\n # not mappable on Orfs / or no genestructure provided\n return []\n else:\n # list with exons,introns to return \n exons = []\n introns = []\n exonsandintrons = []\n\n # deal with FirstExonOnOrf -> TSS + donor\n try:\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n except:\n print self.input.keys(), self.input['proteinfref']\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n donor = self._gene_cds_track_2_donor( genecdstracks[0], orf )\n donor.phase = ( genecdstracks[0][4]-genecdstracks[0][3]-1 ) % 3\n exons.append( FirstExonOnOrf(tss,donor,orf,gff={}) )\n exonsandintrons.append( exons[-1] )\n\n # deal with internal ExonOnOrf(s): -> acceptor + donor\n for pos in range(1,len(genecdstracks)-1):\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][pos])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[pos], orf )\n accep.phase = exons[-1].donor.phase\n donor = self._gene_cds_track_2_donor( genecdstracks[pos], orf )\n donor.phase = ( genecdstracks[pos][4]-genecdstracks[pos][3]-1+accep.phase ) % 3\n exons.append( ExonOnOrf(accep,donor,orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf, exons[-2].orf,\n exons[-1].acceptor, exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # deal with FinalExonOnOrf -> acceptor + StopCodon\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][-1])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[-1], orf )\n accep.phase = exons[-1].donor.phase\n exons.append( FinalExonOnOrf(accep,genecdstracks[-1][4],orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf,exons[-2].orf,\n exons[-1].acceptor,exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # return list of exons&introns\n return exonsandintrons", "def plotSpectrums2D(path_fsrcnn, path_liif, path_msrn, path_esrgan, path_bicubic, out_path):\n radial_FSRCNN = []\n radial_LIIF = []\n radial_MSRN = []\n radial_ESRGAN = []\n radial_BICUBIC = []\n\n for FSRCNN_path, LIIF_path, MSRN_path, ESRGAN_path, bicubic_path in zip(sorted(glob.glob(path_fsrcnn)), sorted(glob.glob(path_liif)), sorted(glob.glob(path_msrn)), sorted(glob.glob(path_esrgan)), sorted(glob.glob(path_bicubic))):\n print(FSRCNN_path)\n radialProfile_FSRCNN = spectrum2D(FSRCNN_path)\n print(len(radialProfile_FSRCNN))\n radial_FSRCNN.append(radialProfile_FSRCNN)\n print(LIIF_path)\n radialProfile_LIIF = spectrum2D(LIIF_path)\n radial_LIIF.append(radialProfile_LIIF)\n print(MSRN_path)\n radialProfile_msrn = spectrum2D(MSRN_path)\n radial_MSRN.append(radialProfile_msrn)\n print(ESRGAN_path)\n radialProfile_esrgan = spectrum2D(ESRGAN_path)\n radial_ESRGAN.append(radialProfile_esrgan)\n print(bicubic_path)\n radialProfile_bicubic = spectrum2D(bicubic_path)\n radial_BICUBIC.append(radialProfile_bicubic)\n\n image_name = FSRCNN_path.split('/')[-1]\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n\n plt.figure()\n plt.plot(radialProfile_FSRCNN, label='FSRCNN')\n plt.plot(radialProfile_LIIF, label='LIIF')\n plt.plot(radialProfile_msrn, label='MSRN')\n plt.plot(radialProfile_esrgan, label='ESRGAN')\n plt.plot(radialProfile_bicubic, label='BICUBIC')\n plt.legend()\n plt.title(image_name)\n plt.yscale('log')\n plt.savefig(out_path + image_name)\n\n mean_FSRCNN = tolerant_mean(radial_FSRCNN)\n mean_LIIF = tolerant_mean(radial_LIIF)\n mean_MSRN = tolerant_mean(radial_MSRN)\n mean_ESRGAN = tolerant_mean(radial_ESRGAN)\n mean_BICUBIC = tolerant_mean(radial_BICUBIC)\n plt.figure()\n plt.plot(mean_FSRCNN, label='FSRCNN')\n plt.plot(mean_LIIF, label='LIIF')\n plt.plot(mean_MSRN, label='MSRN')\n plt.plot(mean_ESRGAN, label='ESRGAN')\n plt.plot(mean_BICUBIC, label='BICUBIC')\n plt.legend()\n plt.title('MEAN')\n plt.yscale('log')\n plt.savefig(out_path + 'mean')\n #return radialProfile_FSRCNN.shape, radialProfile_LIIF.shape, radialProfile_msrn.shape", "def list_files(tag='', inst_id='', data_path='', format_str=None,\n supported_tags=None, file_cadence=dt.timedelta(days=1),\n two_digit_year_break=None, delimiter=None):\n\n if format_str is None:\n # pyast performs a check against `inst_id` and `tag` before calling\n # `list_files`. However, supported_tags is a non-pysat input.\n try:\n format_str = supported_tags[inst_id][tag]\n except KeyError as kerr:\n raise ValueError(' '.join(('Unknown inst_id or tag:',\n str(kerr))))\n\n # Get the series of files\n out = pysat.Files.from_os(data_path=data_path, format_str=format_str,\n two_digit_year_break=two_digit_year_break,\n delimiter=delimiter)\n\n # If the data is not daily, pad the series. Both pds.DateOffset and\n # dt.timedelta contain the 'days' attribute, so evaluate using that\n if not out.empty and not is_daily_file_cadence(file_cadence):\n emonth = out.index[-1]\n out.loc[out.index[-1] + file_cadence\n - dt.timedelta(days=1)] = out.iloc[-1]\n new_out = out.asfreq('D')\n\n for i, out_month in enumerate(out.index):\n if(out_month.month == emonth.month\n and out_month.year == emonth.year):\n out_month = emonth\n\n crange = pds.date_range(start=out_month, periods=2,\n freq=file_cadence)\n irange = pds.date_range(*crange.values, freq=\"D\").values[:-1]\n sel_range = new_out.index.intersection(irange)\n new_out[sel_range] = out.loc[out_month]\n\n # Assign the non-NaN files to out and add days to the filenames\n out = new_out.dropna()\n out = out + '_' + out.index.strftime('%Y-%m-%d')\n\n return out", "def calculate(self):\r\n #process_data = psxview.PsXview(self._config).calculate()\r\n #for offset, eprocess, ps_sources in process_data:\r\n # method = \"Process\"\r\n # pid = eprocess.UniqueProcessId\r\n # name = (eprocess.ImageFileName or '')\r\n # path = ' # check volshell > dt(\"_EPROCESS\") for attrib?\r\n # yield method, pid, name, '-'\r\n\r\n \"\"\" Look at Internet paths \"\"\"\r\n internet_data = iehistory.IEHistory(self._config).calculate()\r\n for process, record in internet_data:\r\n method = \"Internet\"\r\n proc = process.ImageFileName\r\n pid = process.UniqueProcessId\r\n fpath = record.Url\r\n if record.FileOffset > 0:\r\n fpath = fpath +' | '+record.File\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, proc, method):\r\n continue\r\n yield method, pid, proc, fpath\r\n\r\n for task in taskmods.DllList.calculate(self):\r\n pid = task.UniqueProcessId\r\n proc = str(task.ImageFileName)\r\n\r\n \"\"\" Look at the Handle file paths \"\"\"\r\n if task.ObjectTable.HandleTableList:\r\n for handle in task.ObjectTable.handles():\r\n\r\n if not handle.is_valid():\r\n continue\r\n\r\n method = \"Handle\"\r\n object_type = handle.get_object_type()\r\n if object_type == \"File\":\r\n # Only look at \"File\" object_type's\r\n file_obj = handle.dereference_as(\"_FILE_OBJECT\")\r\n fpath = str(file_obj.file_name_with_device())\r\n #fname = str(fpath).rsplit('\\\\',1)[1] # might get IndexError\r\n if fpath:\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, None, method):\r\n continue\r\n if not self._has_extension(fpath):\r\n continue\r\n if self._is_blacklisted(fpath, None, method):\r\n yield method, pid, proc, fpath\r\n\r\n \"\"\" Look at file paths in processes CLI args \"\"\"\r\n cmdline = \"\"\r\n if task.Peb:\r\n method = \"CLI\"\r\n fpath = \"{0}\".format(str(task.Peb.ProcessParameters.CommandLine or '')).strip()\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, proc, method):\r\n continue\r\n if not self._has_extension(fpath):\r\n continue\r\n if self._is_blacklisted(fpath, proc, method):\r\n yield method, pid, proc, fpath\r\n\r\n \"\"\" Look at Service file paths \"\"\"\r\n scanner = svcscan.SvcScan(self._config)\r\n for service in scanner.calculate():\r\n method = \"Service\"\r\n name = str(service.ServiceName.dereference() or '')\r\n if service.Binary:\r\n fpath = service.Binary.strip('\"')\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, name, method):\r\n continue\r\n if self._is_blacklisted(fpath, name, method):\r\n yield method, \"-\", name, fpath\r\n\r\n \"\"\" Look at file paths \"\"\"\r\n scanner = filescan.FileScan(self._config)\r\n for fobj in scanner.calculate():\r\n method = \"File\"\r\n fpath = str(fobj.file_name_with_device() or '')\r\n if fpath:\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, None, method):\r\n continue\r\n if not self._has_extension(fpath):\r\n continue\r\n if self._is_blacklisted(fpath, None ,method):\r\n yield method, '-', '-', fpath\r\n\r\n \"\"\" Look at ShimCache file paths \"\"\"\r\n shimcache_data = shimcache.ShimCache(self._config).calculate()\r\n if shimcache_data:\r\n method = \"Shim\"\r\n for path, last_modified, last_updated in shimcache_data:\r\n fpath = str(path).strip()\r\n yield method, '-', '-', fpath\r\n\r\n # takes a long time...\r\n \"\"\" Look at Shellbag file paths \"\"\"\r\n #shellbag_data = shellbags.ShellBags(self._config).calculate()\r\n #if shellbag_data:\r\n # method = \"Shellbag\"\r\n # try:\r\n # for item, shell, path in shellbag_data:\r\n # yield method, '-', '-', path\r\n # except Exception as err:\r\n # print err\r\n # for item, num, shell, path in shellbag_data:\r\n # yield method, '-', '-', path\r\n\r\n \"\"\" Look at SymLink file paths \"\"\"\r\n #scanner = filescan.SymLinkScan(self._config)\r\n #for symlink in scanner.calculate():\r\n # method = \"SymLink\"\r\n # fpath = str(symlink.LinkTarget or '')\r\n # yield method, '-', '-', fpath\r\n\r\n \"\"\" Look at Driver file paths \"\"\"\r\n #scanner = filescan.DriverScan(self._config)\r\n #for driver in scanner.calculate():\r\n # method = \"Driver\"\r\n # fpath = str(driver.DriverName or '')\r\n # yield method, '-', '-', fpath\r" ]
[ "0.6314016", "0.59568083", "0.5881365", "0.5844795", "0.5802068", "0.5752258", "0.56392115", "0.54253495", "0.5387592", "0.5376296", "0.53625387", "0.5316459", "0.52880996", "0.5277681", "0.52515364", "0.52128714", "0.5204417", "0.5186836", "0.5180629", "0.5144857", "0.51443803", "0.514352", "0.50614315", "0.50614315", "0.50500137", "0.5040265", "0.50369966", "0.50259423", "0.50053895", "0.49987602" ]
0.63749075
0
Pair up each epi with a fieldmap.
def _SetFmapInfo(self): for epi in self.pfiles + self.epirt_paths: self.info[epi]['fmapname'] = None self.info[epi]['fmap_entry'] = None for entry in self.entry_map['fmap']: fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix'] if self.info[entry]['plane'] == self.info[epi]['plane']: # Use the fieldmap acquired at the same plane. self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break else: # for fmap in self.fmaps.keys(): for entry in self.entry_map['fmap']: # No fmap at same orientation, look for fmaps in other planes. # There won't be more than one, so it isn't much of a choice. fmap_name = self.info[entry]['imgfile'] + \ self.info[entry]['suffix'] if self.info[entry]['plane'] == 'sagittal': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'axial': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'coronal': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'oblique': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry self.info[epi]['plane'] = 'oblique' break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MakeFieldmaps(self):\n if self.verbose:\n print 'Compute fieldmaps.'\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n if self.info[entry]['imgfile'] == None:\n# Fieldmap data not found.\n return\n# Make a magnitude image for use in checking registration.\n cmd = 'convert_file -f0 -m0 %s %s nii' % \\\n (entry, self.info[entry]['magfile'])\n self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii'])\n\n# Make fieldmap. Use separate loop in case make_fmap aborts.\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n fmapname = self.info[entry]['imgfile']\n if not os.path.exists('%s.nii' % fmapname) or self.redo:\n# Couldn't find or existing fmap, compute a new one.\n if self.verbose:\n extra_args = '-v'\n else:\n extra_args = ''\n if self.info[entry]['correct_fmap_phase'] == 'force':\n extra_args += ' --force-slicecorr'\n elif self.info[entry]['correct_fmap_phase'] == 'omit':\n extra_args += ' --omit-slicecorr'\n cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname)\n# error = self.ExecCmd(cmd, halt_on_error=False)\n if self.no_fmapcorr:\n halt_on_error = False\n else:\n halt_on_error = True\n error = self.CheckExec(cmd, ['%s.nii' % fmapname], \\\n halt_on_error=halt_on_error)\n if error:\n self.info[entry]['valid'] = False\n del self.fmaps[entry]", "def _map_field_names(self, members):\n result = []\n for member in members:\n mapped_info = {}\n for entry_key, entry_value in member.iteritems():\n if not entry_key in self.DATA_MAPPING: # skip the entry if there is no mapping\n continue\n mapped_info[self.DATA_MAPPING[entry_key]] = entry_value\n result.append(mapped_info)\n return result", "def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map", "def map():", "def field_mapping(self):\n fields = self.fields\n if self.target_field is not None:\n del fields[self.target_field.get('name')]\n field_labels = list(self.fields.keys())\n\n field_mapping = {\n name: (\n field_labels.index(name),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DataField'\n }\n\n field_mapping.update({\n name: (\n field_labels.index(self.find(e, 'FieldRef').get('field')),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DerivedField'\n })\n\n return field_mapping", "def _extract_field(in_file, epi_meta):\n from nipype.utils.filemanip import fname_presuffix\n import numpy as np\n import nibabel as nb\n from sdcflows.utils.epimanip import get_trt\n\n fieldnii = nb.load(in_file[0])\n trt = get_trt(epi_meta[1], in_file=epi_meta[0])\n data = (\n np.squeeze(fieldnii.get_fdata(dtype=\"float32\"))[\n ..., \"ijk\".index(epi_meta[1][\"PhaseEncodingDirection\"][0])\n ]\n / trt\n * (-1.0 if epi_meta[1][\"PhaseEncodingDirection\"].endswith(\"-\") else 1.0)\n )\n out_file = fname_presuffix(in_file[0], suffix=\"_fieldmap\")\n nii = nb.Nifti1Image(data, fieldnii.affine, None)\n nii.header.set_xyzt_units(fieldnii.header.get_xyzt_units()[0])\n nii.to_filename(out_file)\n return out_file", "def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)", "def edge_mapping(self):\n ...", "def mapper(self, _, doc):\n ret = doc.split('\\t')\n key = ret[2]\n values = {}\n try:\n values[\"ts_ini\"] = datetime.utcfromtimestamp(float(ret[0]))\n except:\n values[\"ts_ini\"] = None\n try:\n values[\"ts_end\"] = datetime.utcfromtimestamp(float(ret[1]))\n except:\n values[\"ts_end\"] = None\n try:\n values[\"value\"] = ret[3]\n except:\n values[\"value\"] = None\n try:\n values[\"energytype\"] = ret[4]\n except:\n values[\"energytype\"] = None\n try:\n values[\"source\"] = ret[5]\n except:\n values[\"source\"] = None\n\n yield key, values", "def _do_mapping(self):\n pass", "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def mapper(record):\n personA = record[0]\n personB = record[1]\n mr.emit_intermediate(personA, personB)", "def pair_items_mapper(self, user_id, values):\r\n\t pass #your code here\r", "def compose_fieldmap(rf1, rf2):\n offset1, size1, step1 = rf1\n offset2, size2, step2 = rf2\n\n size = tuple((size2c - 1) * step1c + size1c\n for size1c, step1c, size2c in zip(size1, step1, size2))\n offset = tuple(offset2c * step1c + offset1c\n for offset2c, step1c, offset1c in zip(offset2, step1, offset1))\n step = tuple(step2c * step1c\n for step1c, step2c in zip(step1, step2))\n return (offset, size, step)", "def _build_participant_pairing_map(self, files: List[ConsentFile]) -> Dict[int, ParticipantPairingInfo]:\n participant_ids = {file.participant_id for file in files}\n participant_pairing_data = self.participant_dao.get_pairing_data_for_ids(participant_ids)\n return {\n participant_id: ParticipantPairingInfo(hpo_name=hpo_name, org_name=org_name, site_name=site_name)\n for participant_id, hpo_name, org_name, site_name in participant_pairing_data\n }", "def traj_fields_map(self, func, fields, args,\n map_func=map, idxs=False, traj_sel=None):\n\n # check the args and kwargs to see if they need expanded for\n # mapping inputs\n #first go through each run and get the number of cycles\n n_cycles = 0\n for run_idx in self.run_idxs:\n n_cycles += self.num_run_cycles(run_idx)\n\n mapped_args = []\n for arg in args:\n # make a generator out of it to map as inputs\n mapped_arg = (arg for i in range(n_cycles))\n mapped_args.append(mapped_arg)\n\n # make a generator for the arguments to pass to the function\n # from the mapper, for the extra arguments we just have an\n # endless generator\n map_args = (self.iter_trajs_fields(fields, traj_sel=traj_sel, idxs=False),\n *(it.repeat(arg) for arg in args))\n\n results = map_func(func, *map_args)\n\n if idxs:\n if traj_sel is None:\n traj_sel = self.run_traj_idx_tuples()\n return zip(traj_sel, results)\n else:\n return results", "def result_field_map():\n return {\n \"[run number]\": \"run_number\",\n \"map-file\": \"map_file\",\n \"People\": \"people\",\n \"person_path_weight\": \"person_path_weight\",\n \"Slow\": \"slow\",\n \"Medium\": \"medium\",\n \"Fast\": \"fast\",\n \"display-path-cost?\": \"display_path_cost_p\",\n \"add-person-spacing?\": \"add_person_spacing_p\",\n \"people-wait?\": \"people_wait_p\",\n \"equal-diagonal-weight?\": \"equal_diagonal_weight_p\",\n \"Slow-Speed\": \"slow_speed\",\n \"Medium-Speed\": \"medium_speed\",\n \"Fast-Speed\": \"fast_speed\",\n \"set-fire?\": \"set_fire_p\",\n \"Fire_Speed\": \"fire_speed\" ,\n \"mean-escape-time\": \"mean_escape_time\",\n }", "def parse_pairs(pairs, extra_lines):\n onoff_pairs = pairs[12:-8]\n keyval_pairs = pairs[:12] + pairs[-8:]\n\n # \"Additional notes\" at the end of the file\n # We append that to the key-value pair list and parse it as any other\n notes = '\\n'.join(extra_lines[1:]).strip()\n keyval_pairs.append(('notes', notes))\n\n # Parsed key-value pairs as dictionary\n items = {}\n for pair, plan_step in zip(keyval_pairs, presto_inf_parsing_plan):\n descr, value = pair\n keyname, keytype = plan_step\n items[keyname] = keytype(value)\n return items", "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def _get_feature2field(self):\n fea_id = 0\n for names in self.feature_names:\n if names is not None:\n for name in names:\n self.feature2id[name] = fea_id\n fea_id += 1\n\n if self.fields is None:\n field_id = 0\n for key, value in self.feature2id.items():\n self.feature2field[self.feature2id[key]] = field_id\n field_id += 1\n else:\n for key, value in self.fields.items():\n for v in value:\n try:\n self.feature2field[self.feature2id[v]] = key\n except:\n pass", "def _SetBaseEpi(self):\n tinfo = {}\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n if self.info[entry]['fmap_entry'] is None:\n tgt = info['anat_tgt']\n else:\n tgt = info['fmap_entry']\n tgt_time = self.info[tgt]['acqtime']\n\n plane = info['plane']\n if not tinfo.has_key(plane):\n tinfo[plane] = {}\n tdiff = abs(info['acqtime'] - tgt_time)\n tinfo[plane][tdiff] = (entry, 'start')\n tdiff = abs(info['acqtime'] + info['TR']*info['tdim']/1000 - tgt_time)\n tinfo[plane][tdiff] = (entry, 'end')\n\n bases = {}\n for plane in tinfo.keys():\n tdiffs = tinfo[plane].keys()\n tdiffs.sort()\n bases[plane] = tinfo[plane][tdiffs[0]]\n\n for epi in self.entry_map['epi']:\n plane = self.info[epi]['plane']\n base_entry, base = bases[plane]\n self.info[epi]['base_entry'] = base_entry\n self.info[epi]['base'] = base\n self.info[epi]['basefile'] = '%s'%(self.info[base_entry]['imgfile'])", "def _hacked_transform(typemap, node):\n entries = []\n groupindices = {}\n types = {}\n\n # step 1: traverse all fields and collect field types and content\n for field in node:\n fieldname, fieldbody = field\n try:\n # split into field type and argument\n fieldtype, fieldarg = fieldname.astext().split(None, 1)\n except ValueError:\n # maybe an argument-less field type?\n fieldtype, fieldarg = fieldname.astext(), ''\n typedesc, is_typefield = typemap.get(fieldtype, (None, None))\n\n # sort out unknown fields\n if typedesc is None or typedesc.has_arg != bool(fieldarg):\n # either the field name is unknown, or the argument doesn't\n # match the spec; capitalize field name and be done with it\n new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]\n if fieldarg:\n new_fieldname += ' ' + fieldarg\n fieldname[0] = nodes.Text(new_fieldname)\n entries.append(field)\n continue\n\n typename = typedesc.name\n\n # collect the content, trying not to keep unnecessary paragraphs\n if _is_single_paragraph(fieldbody):\n content = fieldbody.children[0].children\n else:\n content = fieldbody.children\n\n # if the field specifies a type, put it in the types collection\n if is_typefield:\n # filter out only inline nodes; others will result in invalid\n # markup being written out\n content = [n for n in content if isinstance(n, nodes.Inline) or\n isinstance(n, nodes.Text)]\n if content:\n types.setdefault(typename, {})[fieldarg] = content\n continue\n\n # also support syntax like ``:param type name:``\n if typedesc.is_typed:\n try:\n argtype, argname = fieldarg.split(None, 1)\n except ValueError:\n pass\n else:\n types.setdefault(typename, {})[argname] = \\\n [nodes.Text(argtype)]\n fieldarg = argname\n\n translatable_content = nodes.inline(fieldbody.rawsource,\n translatable=True)\n translatable_content.source = fieldbody.parent.source\n translatable_content.line = fieldbody.parent.line\n translatable_content += content\n\n # grouped entries need to be collected in one entry, while others\n # get one entry per field\n if typedesc.is_grouped:\n if typename in groupindices:\n group = entries[groupindices[typename]]\n else:\n groupindices[typename] = len(entries)\n group = [typedesc, []]\n entries.append(group)\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n group[1].append(entry)\n else:\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n entries.append([typedesc, entry])\n\n return (entries, types)", "def process_field_mapping(self, analysis, observable: Observable, result, result_field, result_time=None) -> None:\n pass", "def map_profile_fields(data, fields):\n profile = {}\n for dst, src in fields.items():\n if callable(src):\n value = src(data)\n else:\n value = data.get(src)\n\n if value is not None and value != '':\n profile[dst] = value\n\n return profile", "def receiverMapping():", "def createFieldMapping(sgidPoints):\n # Create field mappings\n sgidFMs = arcpy.FieldMappings()\n\n # Perform some field renaming\n mapPairs = [\n ('State', 'State'),\n ('City', 'Inc_Muni'),\n ('CountyID', 'County'),\n ('ZipCode', 'Zip_Code'),\n ('PrefixDir', 'StN_PreDir'),\n ('StreetName', 'StreetName'),\n ('StreetType', 'StN_PosTyp'),\n ('SuffixDir', 'StN_PosDir'),\n ('AddNum', 'Add_Number'),\n ('LandmarkName', 'landmkName'),\n ('Building', 'Building'),\n ('UnitType', 'Unit'),\n ('AddSource', 'AddAuth'),\n ('AddSystem', 'UniqWithin'),\n ('LoadDate', 'LastUpdate')]\n\n for p in mapPairs:\n print p\n sgidFMs.addFieldMap(getRenameFieldMap(sgidPoints, p[0], p[1]))\n\n return sgidFMs", "def test_fields_to_dict(self):\r\n test_data = \\\r\n \"\"\"0\tR27DLI_4812\tR27DLI_600\tR27DLI_727\tU1PLI_403\tU1PLI_8969\tU1PLI_9080\tU1PLI_9526\tW3Cecum_6642\tW3Cecum_8992\r\n1\tU1PLI_7889\r\n2\tW3Cecum_4858\r\n3\tR27DLI_3243\tR27DLI_4562\tR27DLI_6828\tR27DLI_9097\tU1PLI_2780\tU1PLI_67\tU9PSI_10475\tU9PSI_4341\tW3Cecum_5191\"\"\".splitlines() # output from cd-hit\r\n obs = fields_to_dict(test_data)\r\n exp = {\r\n '0': ['R27DLI_4812', 'R27DLI_600', 'R27DLI_727', 'U1PLI_403',\r\n 'U1PLI_8969', 'U1PLI_9080', 'U1PLI_9526', 'W3Cecum_6642', 'W3Cecum_8992'],\r\n '1': ['U1PLI_7889'],\r\n '2': ['W3Cecum_4858'],\r\n '3': ['R27DLI_3243', 'R27DLI_4562', 'R27DLI_6828', 'R27DLI_9097', 'U1PLI_2780', 'U1PLI_67', 'U9PSI_10475', 'U9PSI_4341', 'W3Cecum_5191']}\r\n self.assertEqual(obs, exp)", "def map(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if happy.flow.isIterable(self.aggkey):\n outkey = ''\n for ak in self.aggkey:\n if record.has_key(ak):\n outkey = outkey + record[ak] + \":\"\n task.collect(outkey, json) \n elif record.has_key(self.aggkey):\n if (record[self.aggkey]):\n task.collect(record[self.aggkey], json)", "def applyMapping(self):\n pass", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANAP').get('abstractTypes')\n exolinks = globalMap.get('ANAP').get('exolinks')\n\n # DataType GraphicsHandlerType\n currentMap = {}\n abstractTypes['GraphicsHandlerType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'] = currentMap\n loadMaps['ANAP.GraphicsHandlerType'] = currentMap\n currentMap['tag'] = 'ANAP.GraphicsHandlerType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnalysisProfile\n currentMap = {}\n abstractTypes['AnalysisProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'analysisProfiles'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnalysisProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnalysisProfile.bgColor\n currentMap = {}\n contentMap['bgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'] = currentMap\n loadMaps['ANAP.AnalysisProfile.bgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.bgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'\n currentMap['name'] = 'bgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#FFFFFF'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.fgColor\n currentMap = {}\n contentMap['fgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'] = currentMap\n loadMaps['ANAP.AnalysisProfile.fgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.fgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'\n currentMap['name'] = 'fgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#000000'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.font\n currentMap = {}\n contentMap['font'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'] = currentMap\n loadMaps['ANAP.AnalysisProfile.font'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.font'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'\n currentMap['name'] = 'font'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.graphicsHandler\n currentMap = {}\n contentMap['graphicsHandler'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'] = currentMap\n loadMaps['ANAP.AnalysisProfile.graphicsHandler'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.graphicsHandler'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'\n currentMap['name'] = 'graphicsHandler'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'Tk'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001')\n\n # Attribute AnalysisProfile.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnalysisProfile.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'] = currentMap\n loadMaps['ANAP.AnalysisProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AnalysisProfile.panView\n currentMap = {}\n contentMap['panView'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'] = currentMap\n loadMaps['ANAP.AnalysisProfile.panView'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.panView'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'\n currentMap['name'] = 'panView'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.sendBugReports\n currentMap = {}\n contentMap['sendBugReports'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile.sendBugReports'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.sendBugReports'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'\n currentMap['name'] = 'sendBugReports'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'maybe'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2010-11-17-16:21:33_00001')\n\n # Attribute AnalysisProfile.transientDialogs\n currentMap = {}\n contentMap['transientDialogs'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientDialogs'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientDialogs'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'\n currentMap['name'] = 'transientDialogs'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.transientWindows\n currentMap = {}\n contentMap['transientWindows'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientWindows'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientWindows'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'\n currentMap['name'] = 'transientWindows'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.twoCharShortcuts\n currentMap = {}\n contentMap['twoCharShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'] = currentMap\n loadMaps['ANAP.AnalysisProfile.twoCharShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.twoCharShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'\n currentMap['name'] = 'twoCharShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useCrosshair\n currentMap = {}\n contentMap['useCrosshair'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useCrosshair'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useCrosshair'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'\n currentMap['name'] = 'useCrosshair'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useGlobalShortcuts\n currentMap = {}\n contentMap['useGlobalShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useGlobalShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useGlobalShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'\n currentMap['name'] = 'useGlobalShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.userEmail\n currentMap = {}\n contentMap['userEmail'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userEmail'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userEmail'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'\n currentMap['name'] = 'userEmail'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute AnalysisProfile.userName\n currentMap = {}\n contentMap['userName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userName'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'\n currentMap['name'] = 'userName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.userOrganisation\n currentMap = {}\n contentMap['userOrganisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userOrganisation'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userOrganisation'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'\n currentMap['name'] = 'userOrganisation'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.webBrowser\n currentMap = {}\n contentMap['webBrowser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'] = currentMap\n loadMaps['ANAP.AnalysisProfile.webBrowser'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.webBrowser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'\n currentMap['name'] = 'webBrowser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role AnalysisProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnalysisProfile.colorSchemes\n currentMap = {}\n contentMap['colorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'] = currentMap\n loadMaps['ANAP.AnalysisProfile.colorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.colorSchemes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'\n currentMap['name'] = 'colorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.macros\n currentMap = {}\n contentMap['macros'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'] = currentMap\n loadMaps['ANAP.AnalysisProfile.macros'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.macros'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'\n currentMap['name'] = 'macros'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.marksColor\n currentMap = {}\n contentMap['marksColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'] = currentMap\n loadMaps['ANAP.AnalysisProfile.marksColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.marksColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'\n currentMap['name'] = 'marksColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n\n # Role AnalysisProfile.refExpProfiles\n currentMap = {}\n contentMap['refExpProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'] = currentMap\n loadMaps['ANAP.AnalysisProfile.refExpProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.refExpProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'\n currentMap['name'] = 'refExpProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.residueProfiles\n currentMap = {}\n contentMap['residueProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'] = currentMap\n loadMaps['ANAP.AnalysisProfile.residueProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.residueProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'\n currentMap['name'] = 'residueProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.rulersColor\n currentMap = {}\n contentMap['rulersColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'] = currentMap\n loadMaps['ANAP.AnalysisProfile.rulersColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.rulersColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'\n currentMap['name'] = 'rulersColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n # End of AnalysisProfile\n\n currentMap = abstractTypes.get('AnalysisProfile')\n aList = ['createdBy', 'graphicsHandler', 'guid', 'isModifiable', 'lastUnlockedBy', 'name', 'panView', 'sendBugReports', 'transientDialogs', 'transientWindows', 'twoCharShortcuts', 'useCrosshair', 'useGlobalShortcuts', 'userEmail', 'webBrowser']\n currentMap['headerAttrs'] = aList\n aList = ['bgColor', 'fgColor', 'font', 'userName', 'userOrganisation', 'marksColor', 'rulersColor']\n currentMap['simpleAttrs'] = aList\n aList = ['residueProfiles', 'refExpProfiles', 'macros', 'colorSchemes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['colorSchemes', 'macros', 'refExpProfiles', 'residueProfiles']\n currentMap['children'] = aList\n\n # Class ColorScheme\n currentMap = {}\n abstractTypes['ColorScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'] = currentMap\n loadMaps['ANAP.ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'colorSchemes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ColorScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ColorScheme.colors\n currentMap = {}\n contentMap['colors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'] = currentMap\n loadMaps['ANAP.ColorScheme.colors'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.colors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'\n currentMap['name'] = 'colors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute ColorScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'] = currentMap\n loadMaps['ANAP.ColorScheme.name'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ColorScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ColorScheme\n\n currentMap = abstractTypes.get('ColorScheme')\n aList = ['colors', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Macro\n currentMap = {}\n abstractTypes['Macro'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'] = currentMap\n loadMaps['ANAP.Macro'] = currentMap\n currentMap['tag'] = 'ANAP.Macro'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'macros'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Macro.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Macro.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'] = currentMap\n loadMaps['ANAP.Macro.details'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Macro.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'] = currentMap\n loadMaps['ANAP.Macro.function'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.isInMenu\n currentMap = {}\n contentMap['isInMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'] = currentMap\n loadMaps['ANAP.Macro.isInMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'\n currentMap['name'] = 'isInMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.isInMouseMenu\n currentMap = {}\n contentMap['isInMouseMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'] = currentMap\n loadMaps['ANAP.Macro.isInMouseMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMouseMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'\n currentMap['name'] = 'isInMouseMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.module\n currentMap = {}\n contentMap['module'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'] = currentMap\n loadMaps['ANAP.Macro.module'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.module'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'\n currentMap['name'] = 'module'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'] = currentMap\n loadMaps['ANAP.Macro.name'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Macro.ordering\n currentMap = {}\n contentMap['ordering'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'] = currentMap\n loadMaps['ANAP.Macro.ordering'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.ordering'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'\n currentMap['name'] = 'ordering'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.path\n currentMap = {}\n contentMap['path'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'] = currentMap\n loadMaps['ANAP.Macro.path'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.path'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'\n currentMap['name'] = 'path'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00003')\n\n # Attribute Macro.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'] = currentMap\n loadMaps['ANAP.Macro.serial'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.shortcut\n currentMap = {}\n contentMap['shortcut'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'] = currentMap\n loadMaps['ANAP.Macro.shortcut'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.shortcut'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'\n currentMap['name'] = 'shortcut'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Macro.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Macro\n\n currentMap = abstractTypes.get('Macro')\n aList = ['function', 'isInMenu', 'isInMouseMenu', 'module', 'ordering', 'serial', 'shortcut']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'path']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefExpProfile\n currentMap = {}\n abstractTypes['RefExpProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'] = currentMap\n loadMaps['ANAP.RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refExpProfiles'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefExpProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefExpProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'] = currentMap\n loadMaps['ANAP.RefExpProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute RefExpProfile.peakSymbolColors\n currentMap = {}\n contentMap['peakSymbolColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakSymbolColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakSymbolColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'\n currentMap['name'] = 'peakSymbolColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.peakTextColors\n currentMap = {}\n contentMap['peakTextColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakTextColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakTextColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'\n currentMap['name'] = 'peakTextColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.refExpNames\n currentMap = {}\n contentMap['refExpNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'] = currentMap\n loadMaps['ANAP.RefExpProfile.refExpNames'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.refExpNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'\n currentMap['name'] = 'refExpNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role RefExpProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefExpProfile.negColorSchemes\n currentMap = {}\n contentMap['negColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'] = currentMap\n loadMaps['ANAP.RefExpProfile.negColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.negColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'\n currentMap['name'] = 'negColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role RefExpProfile.posColorSchemes\n currentMap = {}\n contentMap['posColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'] = currentMap\n loadMaps['ANAP.RefExpProfile.posColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.posColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'\n currentMap['name'] = 'posColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of RefExpProfile\n\n currentMap = abstractTypes.get('RefExpProfile')\n aList = ['name']\n currentMap['headerAttrs'] = aList\n aList = ['peakSymbolColors', 'peakTextColors', 'refExpNames', 'negColorSchemes', 'posColorSchemes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ResidueProfile\n currentMap = {}\n abstractTypes['ResidueProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'] = currentMap\n loadMaps['ANAP.ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'residueProfiles'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ResidueProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ResidueProfile.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'] = currentMap\n loadMaps['ANAP.ResidueProfile.ccpCode'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.guiName\n currentMap = {}\n contentMap['guiName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'] = currentMap\n loadMaps['ANAP.ResidueProfile.guiName'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.guiName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'\n currentMap['name'] = 'guiName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'] = currentMap\n loadMaps['ANAP.ResidueProfile.molType'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ResidueProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ResidueProfile\n\n currentMap = abstractTypes.get('ResidueProfile')\n aList = ['ccpCode', 'guiName', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnalysisProfile\n currentMap = {}\n exolinks['AnalysisProfile'] = currentMap\n loadMaps['ANAP.exo-AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-AnalysisProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['name'] = 'AnalysisProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to ColorScheme\n currentMap = {}\n exolinks['ColorScheme'] = currentMap\n loadMaps['ANAP.exo-ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ColorScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['name'] = 'ColorScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Macro\n currentMap = {}\n exolinks['Macro'] = currentMap\n loadMaps['ANAP.exo-Macro'] = currentMap\n currentMap['tag'] = 'ANAP.exo-Macro'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['name'] = 'Macro'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to RefExpProfile\n currentMap = {}\n exolinks['RefExpProfile'] = currentMap\n loadMaps['ANAP.exo-RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-RefExpProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['name'] = 'RefExpProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ResidueProfile\n currentMap = {}\n exolinks['ResidueProfile'] = currentMap\n loadMaps['ANAP.exo-ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ResidueProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['name'] = 'ResidueProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))" ]
[ "0.5794583", "0.56985533", "0.56411123", "0.5450113", "0.5425905", "0.5395057", "0.53146863", "0.5253061", "0.52433765", "0.5232632", "0.5187637", "0.51365435", "0.511374", "0.5107384", "0.51051044", "0.51040184", "0.50853544", "0.50818384", "0.50797653", "0.5053735", "0.5050864", "0.5049981", "0.5048164", "0.50020194", "0.49679694", "0.49636382", "0.4890645", "0.4858326", "0.48517907", "0.4825758" ]
0.6167625
0
Find the hires structural image that was acquired nearest to "acqtime"
def _FindNearestAnat(self, acqtime): tdiff_min = 1e6 for anat in self.entry_map['anat']: if self.info[anat]['type'] == 'T1High' and \ self.info[anat]['InversionTime'] > 0.: tdiff = abs(acqtime - self.info[anat]['acqtime']) if tdiff < tdiff_min: tdiff_min = tdiff anat_min = anat return anat_min
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_in_time(images, target):\n\n tgt_mjd = fits.getheader(target, ext=1)['mjd-obs']\n mjds = np.array([fits.getheader(i, ext=1)['mjd-obs'] for i in images])\n\n return images[abs(mjds - tgt_mjd).argsort()[0]]", "def find(image):\n keypoint, description = describe(image)\n # load keypoints, descriptions from mongodb\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n best_match_size = float(\"inf\")\n best_match_index = -1\n match_index = 0\n best_matches = 0\n\n for desc in descriptions:\n matches = bf.match(desc,description)\n matches = sorted(matches, key = lambda x:x.distance)\n if len(matches) > 0:\n match_size = sum(x.distance for x in matches[:10])\n\n print \"match size is \", match_size\n if match_size < best_match_size:\n best_match_size = match_size\n best_match_index = match_index\n best_matches = matches\n\n match_index += 1\n\n needle_color = cv2.imread('needle-stripped.png')[:,:,::-1] # needle\n best_match_image = cv2.imread(\"haystack/\"+files[best_match_index])\n print \"best match is \", files[best_match_index]\n\n # Draw first 10 matches.\n outImg = cv2.imread(\"output/outImg.png\")\n match = cv2.drawMatches(needle_color,keypoint,best_match_image[:,:,::-1],keypoints[best_match_index],best_matches[-20:],outImg, flags=6)\n\n plt.imshow(match),plt.show()\n return", "def look_for_reference_image(image):\n match_list = []\n thresh = 8\n final_value = -1\n references = import_reference_images()\n # Initialize the ORB detector algorithm\n orb = cv2.ORB_create()\n\n # Now detect the keypoints and compute\n # the descriptors for the query image\n imgKeypoints, imgDescriptors = orb.detectAndCompute(image, None)\n try:\n for ref in references:\n # Now detect the keypoints and compute\n # the descriptors for the train image\n ref.refKeypoints, ref.refDescriptors = orb.detectAndCompute(ref.img, None)\n\n # Initialize the Matcher for matching\n # the keypoints and then match the\n # keypoints\n matcher = cv2.BFMatcher()\n matches = matcher.knnMatch(imgDescriptors, ref.refDescriptors, k=2)\n\n for m, n in matches:\n if m.distance < 0.75 * n.distance:\n ref.refMatches.append([m])\n\n match_list.append(len(ref.refMatches))\n except:\n pass\n if len(match_list) != 0:\n if max(match_list) > thresh:\n final_value = match_list.index(max(match_list))\n\n return references[final_value].name", "def next_hit(self, ray):\n hit_candidates = [(i.time_to_bound(ray), i) for i in self._bounds]\n try:\n # WARNING - A hard cut on 'times' smaller than 10^-9 is made to exclude\n # a beam reinteracting with the same barrier. This cuts out any legitimate\n # interactions closer than 1nm of the beam position.\n return (sorted([(time, surface) for time, surface in hit_candidates\n if time is not None and time > 1e-9 and all(\n [b.contains(ray.propagate(time).position) for b in self._bounds\n if b is not surface])])[0])\n except IndexError:\n return None", "def closest_on_screen_point(trajectory, viewpoint, yaw, gaze_on_screen):\n\n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n #pprint(traj_angles)\n\n #onscreen_idx, dists, *_ = find_closest_index(traj_angles, gaze_on_screen)\n #idx = closest_node(traj_angles, gaze_on_screen)\n idx = find_closest_index(traj_angles, gaze_on_screen)\n # print(idx)\n\n #traj_ref = trajectory[idx, :]\n screen_ref = traj_angles[idx, :]\n world_ref = trajectory[idx, :]\n\n path_dist = ab_path_length(trajectory, viewpoint, world_ref)\n path_dist /= 8.0 #time headway\n\n #plot_traj(screen_ref, gaze_on_screen, traj_angles)\n\n return(idx, screen_ref, world_ref, path_dist)#, traj_angles)", "def find_closest_frame(point, trajs, cv_evals):\n\n closest_frame = None\n closest_distance = 1e10\n for i, t in enumerate(trajs):\n dists = np.linalg.norm(point - cv_evals[i], axis=1)\n # print(dists.shape, len(t))\n mindist_index = dists.argmin()\n mindist = dists[mindist_index]\n if mindist < closest_distance:\n # logger.debug(\"Found frame in %s at time %s\", simulation.id, t)\n closest_frame = t[mindist_index]\n closest_distance = mindist\n return closest_frame", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def closest(data):\n\n images, pc_projections, pcs = data.pca.load()\n\n pc_projections_truncated = pc_projections[:, :data.analysis.config.pc_projection_count]\n\n closest_group_count = int(round(data.analysis.config.closest_group * images.shape[0], 0))\n representative_count = int(round(data.analysis.config.representative * images.shape[0], 0))\n\n closest_group = kclosest.k_closest(closest_group_count, pc_projections_truncated)\n representative = closest_group[kclosest.k_closest(representative_count, pc_projections_truncated[closest_group, :])]\n\n data.analysis.save_closest(closest_group, representative)", "def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a", "def calc_nearest_ind(self, robot_pose):\n pass", "def get_interest_map(far):\n\n # --- horizontal locations on 5 meter high in world coordinate\n height = -3.5\n x = np.arange(-4, 12, 1)\n x = x.reshape((-1, 1))\n high_horizon = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n # --- {3, 7, 11} meters right and 2.5 meter high in world coordinate\n height = -1.\n x = np.arange(3, 12, 4)\n x = x.reshape((-1, 1))\n right_candidate = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n p_world = np.concatenate([high_horizon, right_candidate], 0)\n p_img = project_pts3_to_image(p_world, K)\n\n # --- if close, search for top region in image coordinate\n if far < 8:\n x = np.arange(600, 1280, 50)\n x = x.reshape((-1, 1))\n y = 5\n close = np.concatenate([x, np.ones_like(x) * y], 1)\n p_img = np.concatenate([p_img, close], 0)\n\n # --- consider only locations in image\n ll = np.array([0, 0]) # lower-left\n ur = np.array([img_width, img_height]) # upper-right\n inidx = np.all(np.logical_and(ll <= p_img, p_img <= ur), axis=1)\n inbox = p_img[inidx]\n inbox = inbox.astype(np.int)\n\n interest = np.zeros((img_height, img_width))\n interest[inbox[:, 1], inbox[:, 0]] = 1\n interest = scipy.ndimage.morphology.distance_transform_edt(interest-1)\n interest = np.exp(-interest / 30**2)\n interest = (interest - np.min(interest)) / (np.max(interest) - np.min(interest))\n return interest", "def closest_on_screen_point_optim(trajectory, viewpoint, yaw, gaze_on_screen):\n \n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n \n #pprint(traj_angles)\n\n dist, idx = closest_node_tree(traj_angles, gaze_on_screen)\n ml_screen_ref = traj_angles[idx] \n\n return(idx, ml_screen_ref)", "def get_closest_record(self, time):\n dist = 10000000\n record = -1\n # TODO: optimise a bit\n for i, itime in enumerate(self.times):\n if (abs(time-itime)) < dist:\n dist = abs(time-itime)\n record = i\n\n return record", "def find_hrc_calib_obsid(inst):\n##\n##--- create a list of already processed data\n##\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/6* > '+ zspace\n# os.system(cmd)\n# with open(zspace, 'r') as f:\n# ftest = f.read()\n# wrd = str(inst) + '/61'\n# mc = re.search(wrd, ftest)\n# if mc is not None:\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/61* >' + zspace\n# os.system(cmd)\n#\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/62* >' + zspace\n# os.system(cmd)\n#\n# data = mcf.read_data_file(zspace, remove=1)\n# prev_list = []\n# for ent in data:\n# atemp = re.split('\\/', ent)\n# prev_list.append(int(float(atemp[-1])))\n#\n##\n##--- find today's date and set checking range for the last 30 days\n##\n# today = time.strftime('%Y:%j:%H:%M:%S', time.gmtime())\n# today = int(Chandra.Time.DateTime(today).secs)\n# start = today - 10 * 86400\n##\n##--- extract hrc obsid information\n##\n# line = 'operation=browse\\n'\n# line = line + 'dataset=flight\\n'\n# line = line + 'level=1\\n'\n# line = line + 'detector=hrc\\n'\n# line = line + 'filetype=evt1\\n'\n# line = line + 'tstart=' + str(start) + '\\n'\n# line = line + 'tstop=' + str(today) + '\\n'\n# line = line + 'go\\n'\n#\n# with open('zline', 'w') as fo:\n# fo.write(line)\n#\n# cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > ' + zspace\n# os.system(cmd)\n#\n# mcf.rm_files('./zline')\n#\n# data = mcf.read_data_file(zspace, remove=1)\n##\n##--- select obsids with 61* and 62* starting\n##\n# h_list = []\n# for ent in data:\n# mc = re.search('hrcf', ent)\n# if mc is not None:\n# atemp = re.split('hrcf', ent)\n# btemp = re.split('_', atemp[1])\n# obsid = int(float(btemp[0]))\n# if obsid > 61000 and obsid < 63000:\n##\n##--- if it is already observed skip it\n##\n# if obsid in prev_list:\n# continue\n##\n##--- check which instrument\n##\n# chk = check_inst(obsid)\n# if chk == inst:\n# h_list.append(obsid)\n\n\n\n h_list = ['62410', '62423', '62435', '62437', '62439', '62441', '62443', '62635', '62637', '62649', '62973', '62997', '62422', '62426', '62436', '62438', '62440', '62442', '62446', '62636', '62638', '62796', '62991']\n\n\n return h_list", "def cacheFindEntry(cache, cameraID, desiredTime):\n if not cameraID in cache:\n return None\n cameraTimes = cache[cameraID]\n closestEntry = min(cameraTimes, key=lambda x: abs(x['time'] - desiredTime))\n if abs(closestEntry['time'] - desiredTime) < 30:\n # logging.warning('close: %s', str(closestEntry))\n return os.path.join(cache['readDir'], closestEntry['fileName'])\n else:\n # logging.warning('far: %s, %s', str(desiredTime), str(closestEntry))\n return None", "def find_scene(orig_scene, match):\n \n image_to_compare = orig_scene.copy()\n \n r,c,_ = match.shape\n ir, ic, _ = image_to_compare.shape\n min_ssd = None\n\n\n for x in range(r):\n for y in range(c):\n # compare to sample image to start off with...\n # mse(imageA, imageB, mask=0) \n\n# if x % 25 == 0 and y == 50:\n# print x\n\n # assume x,y is top left corner, \n imageA = match[x:x+ir, y:y+ic, :]\n\n if imageA.shape[0] != ir or imageA.shape[1] != ic:\n continue\n\n # add the mask \n\n current_ssd = ssd(imageA, image_to_compare)\n if current_ssd == None:\n pass\n elif min_ssd == None:\n min_ssd = current_ssd\n best_sample = imageA\n best_x = x\n best_y = y\n elif min_ssd > current_ssd:\n min_ssd = current_ssd\n best_sample = imageA\n best_x = x\n best_y = y\n return best_x, best_y, best_sample", "def closest_card(model, img):\r\n features = preprocess(img)\r\n closest_match = sorted(model.values(), key=lambda x: img_compare(x[1], features))[0]\r\n return closest_match[0]", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def image_search_in_image(base_image, looking_for_img):\n base_image = cv2.imread(base_image)\n looking_for_img = cv2.imread(looking_for_img)\n # result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_SQDIFF_NORMED)\n result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_CCOEFF)\n (_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n print(result)\n (waldoHeight, waldoWidth) = looking_for_img.shape[:2]\n topLeft = maxLoc\n botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\n roi = base_image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n mask = np.zeros(base_image.shape, dtype=\"uint8\")\n puzzle = cv2.addWeighted(base_image, 0.25, mask, 0.75, 0)\n puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\n cv2.imshow(\"Puzzle\", puzzle)\n cv2.imshow(\"Waldo\", looking_for_img)\n cv2.waitKey(0)", "def locate_source(p,d):\n # M = sensors, n = dimensions\n M, n = p.shape\n p = np.matrix( p ).T\n\n # pick closest receiver\n c = np.argmin(d)\n #sensors delta time relative to sensor c\n d = d - min(d)\n\n indices = list(range(M))\n del indices[c]\n\n A = np.zeros([M-2,n])\n b = np.zeros([M-2,1])\n\n i = indices[0]\n for row,j in enumerate(indices[1:]):\n A[row,:] = 2*( (d[j])*(p[:,i]-p[:,c]).T - \\\n (d[i])*(p[:,j]-p[:,c]).T )\n b[row,0] = (d[i])*((d[j])**2-p[:,j].T*p[:,j]) + \\\n ((d[i])-(d[j]))*p[:,c].T*p[:,c] + \\\n (d[j])*(p[:,i].T*p[:,i]-(d[i])**2)\n\n\n x = np.asarray( np.linalg.lstsq(A,b)[0] )[:,0]\n return x", "def getfirstscam(image_list, starttime, instr, primary_mode, bvid):\n stime=starttime-datetime.timedelta(seconds=2*3600.0)\n for img in image_list:\n if img[4]>stime and img[5]==instr:\n return img[4]+datetime.timedelta(seconds=2*3600.0)\n return None", "def extract_blobs_closest_points(this_robot, in_image, active_mask):\n\n out_image = PointSampleImage(in_image.calib_array, in_image.neighbour_array)\n\n G = nx.Graph()\n\n # First add all nodes, where each node consists of an index into\n # calib_array for one of the active pixels.\n for i in range(in_image.n_rows):\n G.add_node(i)\n\n # We will add edges between neighbouring pixels. See\n # sensors/pointsamplecam for the definition of neighbouring.\n node_list = G.nodes()\n n = len(node_list)\n for i in range(n):\n if in_image.masks[i] & active_mask != 0:\n (ixi, iyi) = in_image.calib_array[i,0], in_image.calib_array[i,1]\n for j in in_image.neighbour_array[i]:\n if in_image.masks[j] & active_mask != 0:\n G.add_edge(i, j)\n\n clusters = nx.connected_component_subgraphs(G, copy=False)\n n_clusters = 0\n for cluster in clusters:\n n_clusters += 1\n # Find the closest pixel to the robot in this cluster. \n closest_i = None\n closest_distance = float('inf')\n for i in cluster.nodes():\n #(xr, yr) = in_image.calib_array[i,2], in_image.calib_array[i,3]\n #d = sqrt(xr*xr + yr*yr)\n\n # The pre-computed distance sqrt(xr*xr + yr*yr)\n d = in_image.calib_array[i,5]\n\n if d < closest_distance:\n closest_i = i\n closest_distance = d\n if closest_i != None:\n out_image.masks[closest_i] = in_image.masks[closest_i]\n\n return out_image", "def getfirstimage(image_list, starttime, instr, primary_mode, bvid):\n stime=starttime-datetime.timedelta(seconds=2*3600.0)\n if instr == 'MOS':\n for img in image_list:\n if img[4]>stime and img[5]=='RSS' and img[10]==bvid:\n if not img[11]=='N/A' and not img[12]=='0 - N/A' and not img[13]=='0 - HOME':\n return img[4]+datetime.timedelta(seconds=2*3600.0)\n for img in image_list:\n if img[4]>stime and img[5]==instr and img[10]==bvid:\n return img[4]+datetime.timedelta(seconds=2*3600.0)\n return None", "def find_target_data(params,star_catalog,lightcurves,image_trios,log):\n\n target = photometry_classes.Star()\n\n if params['target_ra'] != None:\n\n target_location = SkyCoord([params['target_ra']], [params['target_dec']], unit=(u.hourangle, u.deg))\n\n stars = SkyCoord(star_catalog['RA'], star_catalog['DEC'], unit=\"deg\")\n\n tolerance = 2.0 * u.arcsec\n\n match_data = matching.search_around_sky(target_location, stars,\n seplimit=tolerance)\n\n idx = np.argsort(match_data[2].value)\n\n if len(match_data[0]) > 0:\n target.star_index = star_catalog['star_index'][match_data[1][idx[0]]]\n target.ra = star_catalog['RA'][match_data[1][idx[0]]]\n target.dec = star_catalog['DEC'][match_data[1][idx[0]]]\n target.i = star_catalog['cal_ref_mag_ip'][match_data[1][idx[0]]]\n target.sig_i = star_catalog['cal_ref_mag_err_ip'][match_data[1][idx[0]]]\n target.r = star_catalog['cal_ref_mag_rp'][match_data[1][idx[0]]]\n target.sig_r = star_catalog['cal_ref_mag_err_rp'][match_data[1][idx[0]]]\n target.i_inst = star_catalog['ref_mag_ip'][match_data[1][idx[0]]]\n target.sig_i_inst = star_catalog['ref_mag_err_ip'][match_data[1][idx[0]]]\n target.r_inst = star_catalog['ref_mag_rp'][match_data[1][idx[0]]]\n target.sig_r_inst = star_catalog['ref_mag_err_rp'][match_data[1][idx[0]]]\n target.separation = match_data[2][idx[0]].to_string(unit=u.arcsec)\n try:\n target.g = star_catalog['cal_ref_mag_gp'][match_data[1][idx[0]]]\n target.sig_g = star_catalog['cal_ref_mag_err_gp'][match_data[1][idx[0]]]\n target.g_inst = star_catalog['ref_mag_gp'][match_data[1][idx[0]]]\n target.sig_g_inst = star_catalog['ref_mag_err_gp'][match_data[1][idx[0]]]\n except AttributeError:\n pass\n\n log.info('\\n')\n log.info('Target identified as star '+str(target.star_index)+\\\n ' in the combined ROME catalog, with parameters:')\n log.info('RA = '+str(target.ra)+' Dec = '+str(target.dec))\n log.info('Measured ROME photometry, instrumental:')\n log.info(target.summary(show_mags=False, show_instrumental=True))\n log.info('Measured ROME photometry, calibrated to the VPHAS+ scale:')\n log.info(target.summary(show_mags=True))\n\n target.set_delta_mag(params)\n\n log.info('Assigned delta mag offsets between DanDIA lightcurve and pyDANDIA reference frame analysis:')\n for f in ['g', 'r', 'i']:\n log.info('Delta m('+f+') = '+str(getattr(target, 'delta_m_'+f))+' +/- '+str(getattr(target, 'sig_delta_m_'+f)))\n\n if target.i != None and target.r != None:\n\n target.compute_colours(use_inst=True)\n\n log.info(target.summary(show_mags=False,show_colours=True))\n\n target.transform_to_JohnsonCousins()\n\n log.info(target.summary(show_mags=False,johnsons=True))\n\n for f in ['i', 'r', 'g']:\n\n if f in lightcurves.keys():\n\n images = []\n hjds = []\n mags = []\n magerrs = []\n fluxes = []\n fluxerrs = []\n\n for i in image_trios[f+'_images']:\n name = str(i).replace('\\n','').replace('.fits','')\n\n idx = np.where(lightcurves[f]['images'] == name)[0]\n\n if len(idx) > 0:\n images.append(lightcurves[f]['images'][idx][0])\n hjds.append(lightcurves[f]['hjd'][idx][0])\n mags.append(lightcurves[f]['mag'][idx][0])\n magerrs.append(lightcurves[f]['mag_err'][idx][0])\n (flux,ferr) = mag_to_flux_pylima(lightcurves[f]['mag'][idx][0],\n lightcurves[f]['mag_err'][idx][0])\n fluxes.append(flux)\n fluxerrs.append(ferr)\n\n else:\n images.append(name)\n hjds.append(9999999.999)\n mags.append(99.999)\n magerrs.append(-9.999)\n fluxes.append(9999999.999)\n fluxerrs.append(-9999999.999)\n\n lc = Table()\n lc['images'] = images\n lc['hjd'] = hjds\n lc['mag'] = mags\n lc['mag_err'] = magerrs\n lc['flux'] = fluxes\n lc['flux_err'] = fluxerrs\n\n target.lightcurves[f] = lc\n\n return target", "def find_circles_thres(current_frame_gray, num_of_rafts, radii_hough=[17, 19],\n thres_value=70, sigma_canny=1.0, low_threshold_canny=25, high_threshold_canny=127,\n min_sep_dist=20, raft_center_threshold=60,\n top_left_x=390, top_left_y=450, width_x=850, height_y=850):\n # key data set initialization\n raft_centers = np.zeros((num_of_rafts, 2), dtype=int)\n raft_radii = np.zeros(num_of_rafts, dtype=int)\n\n # crop the image\n image_cropped = current_frame_gray[top_left_y: top_left_y + height_y, top_left_x: top_left_x + width_x]\n\n # threshold the image\n retval, image_thres = cv.threshold(image_cropped, thres_value, 255, 0)\n\n # find edges\n image_edges = canny(image_thres, sigma=sigma_canny, low_threshold=low_threshold_canny,\n high_threshold=high_threshold_canny)\n\n # use Hough transform to find circles\n hough_results = hough_circle(image_edges, np.arange(*radii_hough))\n accums, cx, cy, radii = hough_circle_peaks(hough_results, np.arange(*radii_hough))\n\n # assuming that the first raft (highest accumulator score) is a good one\n # raft_centers[0,0] = cx[0]\n # raft_centers[0,1] = cy[0]\n # raft_radii[0] = radii[0]\n raft_count = 0 # starting from 1!\n\n # remove circles that belong to the same raft and circles that happened to be in between rafts\n for accum_score, detected_cx, detected_cy, detected_radius in zip(accums, cx, cy, radii):\n new_raft = 1\n if image_cropped[detected_cy, detected_cx] < raft_center_threshold:\n new_raft = 0\n elif image_cropped[detected_cy - detected_radius // 2: detected_cy + detected_radius // 2,\n detected_cx - detected_radius // 2:detected_cx + detected_radius // 2].mean() \\\n < raft_center_threshold:\n new_raft = 0\n # elif (detected_cx - width_x/2)**2 + (detected_cy - height_y/2)**2 > lookup_radius**2:\n # new_raft = 0\n else:\n cost_matrix = scipy_distance.cdist(np.array([detected_cx, detected_cy], ndmin=2),\n raft_centers[:raft_count, :], 'euclidean')\n if np.any(cost_matrix < min_sep_dist): # raft still exist\n new_raft = 0\n if new_raft == 1:\n raft_centers[raft_count, 0] = detected_cx\n # note that raft_count starts with 1, also note that cx corresponds to columns number\n raft_centers[raft_count, 1] = detected_cy # cy is row number\n raft_radii[raft_count] = detected_radius\n raft_count = raft_count + 1\n if raft_count == num_of_rafts:\n # error_message = 'all rafts found'\n break\n\n # convert the xy coordinates of the cropped image into the coordinates of the original image\n raft_centers[:, 0] = raft_centers[:, 0] + top_left_x\n raft_centers[:, 1] = raft_centers[:, 1] + top_left_y\n\n return raft_centers, raft_radii, raft_count", "def locate_tracker(self, debug):\n\n # tmp_image =\n # tmp_image = cv2.GaussianBlur(self.frame, (11, 11), 0) # Experiment with this\n\n hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Convert to HSV Color Space. This is temporary for testing using colored objects)\n\n mask = cv2.inRange(hsv, self.hueLower, self.hueUpper)\n\n try:\n mask = cv2.inRange(hsv, self.hueLower2, self.hueUpper2) + mask\n except AttributeError:\n pass\n\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n if debug:\n tmpMask = imutils.resize(mask, width=1000, height=1000)\n cv2.imshow(\"mask\", tmpMask)\n\n\n # find contours in the mask and initialize the current (x, y) center of the object\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n # if radius > 10:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n if debug:\n cv2.drawContours(self.frame, c, -1, (0, 255, 0), 20)\n return center, radius\n # update the points queue\n cv2.imshow(\"mask\", imutils.resize(mask, width=1000, height=1000))\n cv2.imshow(\"frame\", imutils.resize(self.frame, width=1000, height=1000))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n raise OpenCVError(\"Could not find tracker!\")\n\n # return (1, 1), 1", "def findNearestTime(foamCase, time):\n times = list(getTimeFolders(foamCase,returnType=\"float\"))\n strTimes = np.array(getTimeFolders(foamCase,returnType=\"string\"))\n if time in times:\n try:\n intTime = int(strTimes[times.index(time)])\n return int(time)\n except:\n return time\n else:\n nearestTime = times[np.argmin(np.abs(np.array(times)-time))]\n print(\"Time %f is not available, choosing nearest time %f\" % ( time, nearestTime))\n try:\n intTime = int(strTimes[times.index(nearestTime)])\n return int(nearestTime)\n except:\n return nearestTime", "def nearest_test_pulse(self):", "def getNearestTime(time_query):\n\n # Convert datetime object to string, for lookup in database.\n tstamp_query = coils.time2string(time_query)\n\n # Retrieve image timestamps.\n try:\n tstamp_left = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time <= tstamp_query).\\\n order_by(mapping.Image.time.desc()).limit(1)\n tstamp_left = tstamp_left[0].time\n delta_left = abs(coils.string2time(tstamp_left) - time_query)\n except:\n tstamp_left = None\n delta_left = dt.timedelta.max\n \n try:\n tstamp_right = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time >= tstamp_query).\\\n order_by(mapping.Image.time).limit(1)\n tstamp_right = tstamp_right[0].time\n delta_right = abs(coils.string2time(tstamp_right) - time_query)\n except:\n tstamp_right = None\n delta_right = dt.timedelta.max\n \n # The nearest value has the smallest delta from the query.\n result = tstamp_left if (delta_left < delta_right) else tstamp_right\n return result", "def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]" ]
[ "0.61136955", "0.5495255", "0.53449804", "0.53088015", "0.5267729", "0.5264703", "0.52540445", "0.51978594", "0.51893973", "0.5173132", "0.5172842", "0.517216", "0.5133897", "0.51184076", "0.50679076", "0.50616145", "0.5060293", "0.5051561", "0.5042053", "0.5037389", "0.50302774", "0.49955773", "0.49773225", "0.49667892", "0.49656028", "0.49587765", "0.49531177", "0.49246022", "0.49225846", "0.49138683" ]
0.58393806
1
Create structures defining acquisition time for fieldmaps and anatomicals. First find the fieldmap (or hires structural if no fieldmap was collected) nearest (on average) to the epis. Then define this series as the one that should be in register with the epis.
def _SetAnatTgts(self): anat_candidates = {} fmap_candidates = {} for entry in self.entry_map['anat']: if self.info[entry]['type'] == 'T1High': anat_candidates[entry] = self.info[entry]['acqtime'] # Find the valid anatomical acquired nearest to fieldmap. tdiff_min = 1e6 if len(self.entry_map['fmap']) > 0: for entry in self.entry_map['fmap']: anat_tgt = self. _FindNearestAnat(self.info[entry]['acqtime']) self.info[entry]['anat_ref'] = anat_tgt else: # No fieldmaps were collected. Find the structural nearest the # beginning of the EPIs. if len(self.entry_map['anat']) == 1: anat_tgt = self.entry_map['anat'][0] else: epi_start = [] tmin = 1e6 for anat in self.entry_map['anat']: if self.info[anat]['type'] != 'T1High': continue tsum1 = 0; tsum2 = 0; for epi in self.entry_map['epi']: # Difference from start of structural and first epi tsum1 += abs(self.info[anat]['acqtime'] - \ self.info[epi]['acqtime']) # Difference from start of structural and last epi tsum2 += abs(self.info[anat]['acqtime'] - \ (self.info[epi]['acqtime'] +\ self.info[epi]['TR']*self.info[epi]['tdim'])) if tsum1 < tmin or tsum2 < tmin: tmin = min(tsum1, tsum2) anat_tgt = anat # Resolve anatomical names and links. self._SetAnatNames(anat_tgt) # Set appropriate attributes in the entry for each EPI. for epi in self.entry_map['epi']: if len(self.entry_map['fmap']) > 0 and not self.no_fmapcorr: fmap_entry = self.info[epi]['fmap_entry'] anat_ref = self.info[fmap_entry]['anat_ref'] self.info[epi]['anat_tgt'] = fmap_entry self.info[epi]['anat_matfile'] = self.info[fmap_entry]['matfile'] if self.align_fmaps or (not self.no_align_fmaps and \ self._SetCatMotionFmapMats(fmap_entry, anat_ref)): # Concatenate motion-correction matrices with tranform from # fieldmap to structural. Use the registered fieldmap. self.info[epi]['catmats'] = True fmap_info = self.info[self.info[epi]['fmap_entry']] self.info[epi]['fmapname'] = \ fmap_info['imgfile_r'] + fmap_info['suffix'] else: # Assume fieldmap is in register with the structural. self.info[epi]['catmats'] = False else: self.info[epi]['anat_tgt'] = anat_tgt self.info[epi]['anat_matfile'] = None self.info[epi]['catmats'] = False self.info[epi]['anat_link'] = self.info[anat_tgt]['imgfile'] + \ self.info[anat_tgt]['suffix']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetEpiAcqTimes(self, series):\n# Find minimum and maximum start times for each acquistion in series.\n self.epi_times = {}\n for entry in self.entry_map['epi']:\n# Loop through each file in this series.\n if self.info[entry]['series'] == series and \\\n self.info[entry]['tdim'] > 2:\n# Relate each entry to its time of acquisition.\n self.epi_times[self.info[entry]['acqtime']] = entry", "def generateForcingFields(self, conc_idx, inputs, outputs):\n\n\t\tForcing.log(\"Running %s.generateForcingFields()\"%type(self))\n\n\t\t# Some variable used later\n\t\tscalar = None\n\n\t\tif self.griddedTimeZoneFld == None:\n\t\t\t# Assume all timezones are GMT\n\t\t\tprint \"Warning! No gridded time zone information loaded. Using a field of zeros.\"\n\t\t\ttz = np.zeros((self.ni,self.nj))\n\t\telse:\n\t\t\ttz = self.griddedTimeZoneFld\n\n\t\tif len(self.species) == 0:\n\t\t\traise NoSpeciesException(\"Must specify species\")\n\t\t\treturn\n\n\t\t# We doing time averaging?\n\t\tif self.averaging in ['AVG_MAX', 'AVG_MAX8', 'AVG_MAX24']:\n\t\t\tdo_averaging=True\n\t\t\taveraging_window = self.averaging_window\n\t\telse:\n\t\t\tdo_averaging=False\n\t\t\taveraging_window = None\n\t\t\t#if self.averaging == 'AVG_MASK' or self.averaging == 'AVG_NONE'\n\t\t\tif self.averaging == 'AVG_NONE':\n\t\t\t\t# Ensure this is set right\n\t\t\t\tself.timeMask = range(0,25)\n\t\t\t# If it's the mask, then the timemask should already be set\n\n\t\t# Create zero fields to allocate our arrays\n\t\tfld_empty=np.zeros((len(self.species), self.nt, self.nk_f, self.nj, self.ni), dtype=np.float32)\n\n\t\t# Get the relative days, so [-1 0 1] for [yesterday, today, tomorrow]\n\t\trdays = inputs.keys()\n\t\t# Probably an easiesr way to initalize this since we're only writing later, but for now we'll do it.\n\t\tflds={}\n\t\tfor d in rdays:\n\t\t\tflds[d] = fld_empty.copy()\n\n\t\t# This is NOT efficient. Could probably easily make it\n\t\t# more efficient by implementing some sort of cache though..\n\t\tfor idx_s, species in enumerate(self.species):\n\t\t\t#print \"Iteratiing through species %d=%s\"%(idx_s, species)\n\n\t\t\t# Initialize the data flds. Set to zero if there's a day that doesn't exist\n\t\t\tdatas={}\n\t\t\tfor d in rdays:\n\t\t\t\tif inputs[d] is None:\n\t\t\t\t\tdatas[d] = np.zeros((self.nt, self.nk_f, self.nj, self.ni), dtype=np.float32)\n\t\t\t\telse:\n\t\t\t\t\tdatas[d] = inputs[d].variables[species][:]\n\n\t\t\t# Recall, mask is already considered in these vectors\n\t\t\tfor k in self._layers:\n\t\t\t\t# I think there's a better way to do the next two loops, don't know it though.\n\t\t\t\tfor i in range(0,self.ni):\n\t\t\t\t\tfor j in range(0,self.nj):\n\n\t\t\t\t\t\t# Spatial mask\n\t\t\t\t\t\tif not self.space[j,i]:\n\t\t\t\t\t\t\t# This is masked out. Set to zero and go to the next cell\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tflds[d][idx_s][0:self.nt,k,j,i] = np.zeros((self.nt), dtype=np.float32)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t#else:\n\t\t\t\t\t\t#\t# TEMP HACK!!\n\t\t\t\t\t\t# # This temp hack is used to ensure the mask is working\n\t\t\t\t\t\t#\tfld_yest[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tfld_today[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tfld_tom[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tcontinue\n\n\n\t\t\t\t\t\t# Take averaging into consideration\n\t\t\t\t\t\t# For almost all of these averagings, we'll have to\n\t\t\t\t\t\t# build a vector of all values for all times at that\n\t\t\t\t\t\t# cell. Unfortunately, the data is organized in the \n\t\t\t\t\t\t# opposite way as we want (time is the top index..)\n\t\t\t\t\t\tif do_averaging:\n\t\t\t\t\t\t\tvecs={}\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tvecs[d] = datas[d][:Forcing.dayLen,k,j,i]\n\n\t\t\t\t\t\t\t# REMOVE!\n\t\t\t\t\t\t\t#if i==self.debug_i and j==self.debug_j:\n\t\t\t\t\t\t\t#\tprint \"vec_today[%d,%d]: \"%(self.debug_j, self.debug_i), vec_today\n\n\t\t\t\t\t\t\t# Prepares a vector of values with respect to the\n\t\t\t\t\t\t\t# direction we're going to calculate the average\n\t\t\t\t\t\t\t# (forward/backward), the window size, and time\n\t\t\t\t\t\t\t# zone \n\n\t\t\t\t\t\t\tvec = Forcing.prepareTimeVectorForAvg(vecs, timezone=tz[j][i], winLen=averaging_window, debug=False)\n\t\t\t\t\t\t\t#print \"i=%d,j=%d, preped vec[%d] = %s\"%(i,j,len(vec),\" \".join(map(str, vec)))\n\n\t\t\t\t\t\t\t# Calculate the moving window average\n\t\t\t\t\t\t\tavgs = Forcing.calcMovingAverage(vec, winLen=averaging_window)\n\t\t\t\t\t\t\t#print \"i=%d,j=%d, avg vec[%d] = %s\"%(i,j,len(avgs),\" \".join(map(str, avgs)))\n\n\t\t\t\t\t\t\t# And then, for the 8-hour max to be used for a\n\t\t\t\t\t\t\t# forcing term, generate a vector for yesterday,\n\t\t\t\t\t\t\t# today and tomorrow with the forcing terms in them\n\n\t\t\t\t\t\t\tif self.timeInvariantScalarMultiplcativeFld is not None:\n\t\t\t\t\t\t\t\tscalar = self.timeInvariantScalarMultiplcativeFld[j][i]/averaging_window\n\n\t\t\t\t\t\t\tvecs = Forcing.applyForceToAvgTime(avgs, days=vecs.keys(), winLen=averaging_window, timezone=tz[j][i], min_threshold=self.threshold, forcingValue=scalar)\n\n# This was done blindly\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tflds[d][idx_s][:24,k,j,i] = vecs[d]\n\n\t\t\t\t\t\telif self.averaging == 'AVG_MASK' or self.averaging == 'AVG_NONE':\n# NOT YET TESTED\n\t\t\t\t\t\t\traise NotImplementedError( \"Mask timing or no averaging is not yet tested. Averaging options=%s\"%self.averaging )\n\t\t\t\t\t\t\t# The comments assume timezone = -6\n\t\t\t\t\t\t\tfor t_gmt in self.timeMask:\n\t\t\t\t\t\t\t\t# when t_gmt = 0, t_loc = -6, so we're into yesterday\n\t\t\t\t\t\t\t\tt_loc = t_gmt + tz[j][i]\n\n\t\t\t\t\t\t\t\t# Reference the arrays\n\t\t\t\t\t\t\t\tif t_loc < 0:\n\t\t\t\t\t\t\t\t\tdfld = data_yest\n\t\t\t\t\t\t\t\t\t#ffld = fld_yest\n\t\t\t\t\t\t\t\telif t_loc>0 and t_loc<Forcing.dayLen:\n\t\t\t\t\t\t\t\t\tdfld = data_today\n\t\t\t\t\t\t\t\t\t#ffld = fld_today\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tdfld = data_tomorrow\n\t\t\t\t\t\t\t\t\t#ffld = fld_tomorrow\n\n\t\t\t\t\t\t\t\t# I have to write in GMT\n# This is wrong, as local times can write into another day.. maybe.. but since there's no averaging, another iteration will take care of that..\n\t\t\t\t\t\t\t\tffld = fld_today\n\n\t\t\t\t\t\t\t\t# fld[-6] is fld[18]\n\t\t\t\t\t\t\t\tval=dfld[t_loc,k,j,i]\n\t\t\t\t\t\t\t\tif threshold is not None:\n\t\t\t\t\t\t\t\t\tif val > threshold:\n\t\t\t\t\t\t\t\t\t\tforce = 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tif val > 0.0:\n\t\t\t\t\t\t\t\t\t\tforce = 1\n\n\t\t\t\t\t\t\t\t# Set the field in the referenced forcing field\n\t\t\t\t\t\t\t\tffld[t_loc,k,j,i] = force\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise NotImplementedError( \"Unavailable time averaging method (%s) selected\"%self.averaging )\n\n\t\t\t\t\t\t#endif averaging\n\t\t\t\t\t#endfor j\n\t\t\t\t#endfor i\n\t\t\t#endfor k\n\n\t\t#endfor species\n\n\t\treturn flds", "def _SetBaseEpi(self):\n tinfo = {}\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n if self.info[entry]['fmap_entry'] is None:\n tgt = info['anat_tgt']\n else:\n tgt = info['fmap_entry']\n tgt_time = self.info[tgt]['acqtime']\n\n plane = info['plane']\n if not tinfo.has_key(plane):\n tinfo[plane] = {}\n tdiff = abs(info['acqtime'] - tgt_time)\n tinfo[plane][tdiff] = (entry, 'start')\n tdiff = abs(info['acqtime'] + info['TR']*info['tdim']/1000 - tgt_time)\n tinfo[plane][tdiff] = (entry, 'end')\n\n bases = {}\n for plane in tinfo.keys():\n tdiffs = tinfo[plane].keys()\n tdiffs.sort()\n bases[plane] = tinfo[plane][tdiffs[0]]\n\n for epi in self.entry_map['epi']:\n plane = self.info[epi]['plane']\n base_entry, base = bases[plane]\n self.info[epi]['base_entry'] = base_entry\n self.info[epi]['base'] = base\n self.info[epi]['basefile'] = '%s'%(self.info[base_entry]['imgfile'])", "def main(name, line1, line2, orbital_filename):\n #name = \"TERRA\"\n #line1 = \"1 25994U 99068A 16048.43680378 .00000258 00000-0 67198-4 0 9999\"\n #line2 = \"2 25994 98.1982 124.4247 0001352 105.3907 254.7441 14.57126067859938\"\n satellite = ephem.readtle(name, line1, line2)\n \n\n # Landsat 8\n #name = \"Landsat8\"\n #line1=\"1 39084U 13008A 16051.82349873 .00000188 00000-0 51829-4 0 9999\"\n #line2=\"2 39084 98.1988 123.2603 0001265 89.4360 270.6984 14.57110027160810\"\n #LD8 = ephem.readtle(name, line1, line2)\n \n\n sun = ephem.Sun()\n fov = np.radians(68.6)\n\n \"\"\"\n Make pandas dataframe to store swath information\n \"\"\"\n import pandas as pd\n data = {\"DateTime\": [],\"DOY\":[],\"Month\": [],\n \"orbit_id\":[], \"ground_lat\": [], \n \"ground_lon\": [], \"swath_width\": []}\n swaths = pd.DataFrame(data)\n swaths.set_index(keys=\"DateTime\")\n # generate shapefile\n\n orbit_id = 0\n # need to do splitted by hemisphere unfortunately..\n for orbit in make_an_orbit(satellite):\n #import pdb; pdb.set_trace()\n if len(orbit) > 1:\n \"\"\"\n So worth doing processing on orbit...\n\n \"\"\"\n sun = ephem.Sun()\n\n print(orbit[0].datetime)\n\n for overpass in orbit:\n overpass.only_daytime_overpasses(sun)\n overpass.derive_swath_width(fov)\n \"\"\"\n Create a tempoary dataframe for this orbit\n \"\"\"\n epoch = datetime.datetime(1970, 1, 1)\n #import pdb; pdb.set_trace()\n tmp_d = {\"DateTime\": [(o.datetime - epoch).total_seconds() for o in orbit],\n \"DOY\":[int(o.datetime.strftime('%j')) for o in orbit],\n \"Month\": [o.datetime.month for o in orbit],\n \"orbit_id\": orbit_id * np.ones(len(orbit)),\n \"ground_lat\": [o.lat for o in orbit],\n \"ground_lon\": [o.long for o in orbit],\n \"swath_width\": [o.swath_width for o in orbit]}\n tmp = pd.DataFrame(tmp_d)\n tmp.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n orbit_id +=1 \n \"\"\"\n Append to main dataframe\n \"\"\"\n swaths = swaths.append(tmp)\n #swaths.set_index(keys=\"DateTime\")\n\n \"\"\"\n Save the DataFrame to a file\n \"\"\"\n swaths = swaths.set_index(keys=\"DateTime\")\n #swaths.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n swaths.to_csv(orbital_filename, header=True)", "def create_data_structures(self):\n # Data storage arrays for time and measurement\n # Create the array of zeros and preallocating\n start_time = time.time()\n # The number of data points has to be optimized\n self.data_points = 5000\n # prs_data has three rows, 0 = time, 1 = pressure - tare, 2 = raw_pressure\n self.prs_data = np.zeros([3, self.data_points])\n self.prs_data[0, :] = start_time\n # This queue receives data from the sensors and puts it in the graphs and sends to the \n # LifoQueue\n self.prs_q = Queue()\n # The lifo queue is created to send the data to the piston control thread. The piston\n # control will only read and use the last value, since only the most recent information\n # matters\n self.prs_lifo_q = LifoQueue()\n self.prs_tare = 0\n \n self.flw_data = np.zeros([3, self.data_points])\n self.flw_data[0, :] = start_time\n self.flw_q = Queue()\n self.flw_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.flw_tare = 0\n\n self.vol_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.vol_data = np.zeros([2, self.data_points])\n self.vol_data[0, :] = start_time", "def create(records):\n version = '1.0.0'\n\n iversion = [int(x) for x in version.split('.')]\n if iversion[1] > 0 or iversion[2] > 0:\n raise IOError(\"SEF versions > 0.0 are not supported\")\n\n latitude = 42.331\n longitude = -83.046\n altitude = 'NA'\n\n header = {\n 'SEF': version, 'ID': 'Detroit_Anthon', 'Name': 'Detroit, MI',\n 'Lat': latitude, 'Lon': longitude, 'Alt': altitude, 'Source': 'C3S-DRS',\n 'Link': '', 'Vbl': 'ta', 'Stat': 'point',\n 'Units': 'C', 'Meta': 'Observer=George Christian Anthon',\n }\n\n index_temperatures = 0\n index_times = 0\n\n time_offset = longitude * 12 / 180\n\n temp_dict = defaultdict(list)\n\n temperatures = []\n\n times = [datetime.time(7, 0), datetime.time(12, 0), datetime.time(20, 0)]\n original_time = [\"7:00AM\", \"12:00PM\", \"20:00PM\"]\n\n for index in range(len(records)):\n temperatures.append(records[index][datetime.time(7, 0)])\n temperatures.append(records[index][datetime.time(12, 0)])\n temperatures.append(records[index][datetime.time(20, 0)])\n for time in original_time:\n if isinstance(temperatures[index_temperatures], str):\n value = 'NA'\n else:\n value = round(((float(temperatures[index_temperatures]) - 32) * 5 / 9), 1)\n\n date = str(records[index]['Year']) \\\n + \"-\" \\\n + str(records[index]['Month']) \\\n + \"-\" + str(records[index]['Day']) \\\n + \" \" + str(times[index_times])\n\n date_time = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n utc = date_time - datetime.timedelta(hours=time_offset)\n\n year = str(utc)[:4]\n month = str(utc)[5:7]\n day = str(utc)[8:10]\n hour = str(utc)[11:13]\n minutes = str(utc)[14:16]\n\n data_dict = {\n 'Data': pd.DataFrame({\n 'Year': year,\n 'Month': month,\n 'Day': day,\n 'Hour': hour,\n 'Minute': minutes,\n 'Period': 0,\n 'Value': value,\n 'Meta': \"orig=\" + str(temperatures[index_temperatures])\n + 'F' + \"|orig.time=\" + str(time)\n + \"|orig.date=\" + str(records[index]['Year']) + '-' + str(records[index]['Month'])\n + '-' + str(records[index]['Day'])\n\n }, index=[0])\n }\n temp_dict['Data'].append(data_dict['Data'])\n\n index_times += 1\n if index_times > 2:\n index_times = 0\n\n index_temperatures += 1\n\n header.update(temp_dict)\n\n return header", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def make_temperature_map(time: u.s, field, instr, **kwargs):\n plot_settings = {'cmap': cm.get_cmap('inferno')}\n plot_settings.update(kwargs.get('plot_settings', {}))\n bins, bin_range = instr.make_detector_array(field)\n visible = is_visible(instr.total_coordinates, instr.observer_coordinate)\n hist_coordinates, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=visible)\n with h5py.File(instr.counts_file, 'r') as hf:\n try:\n i_time = np.where(u.Quantity(hf['time'],\n get_keys(hf['time'].attrs), ('unit', 'units')) == time)[0][0]\n except IndexError:\n raise IndexError(f'{time} is not a valid time in observing time for {instr.name}')\n weights = np.array(hf['electron_temperature'][i_time, :])\n units = u.Unit(get_keys(hf['electron_temperature'].attrs, ('unit', 'units')))\n hist, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=weights * visible)\n hist /= np.where(hist_coordinates == 0, 1, hist_coordinates)\n meta = instr.make_fits_header(field, instr.channels[0])\n del meta['wavelnth']\n del meta['waveunit']\n meta['bunit'] = units.to_string()\n meta['detector'] = 'Electron Temperature'\n meta['comment'] = 'Column-averaged electron temperature calculated by synthesizAR'\n\n return GenericMap(hist.T, meta, plot_settings=plot_settings)", "def time_info(input_file):\n original_path = os.getcwd() #set original directory\n save_path = input_file['save_path']\n planet = input_file['exoplanet'] #set exoplanet name\n print '\\nObtain the images .... \\n'\n print 'Change to ', save_path\n os.chdir(save_path) #change to save directory where is our scvience images\n images = sorted(glob.glob('AB'+input_file['exoplanet']+'*.fits'))\n print '\\nImages = \\n',images\n tempo_loc = [] #time object\n SUN = [] #Sun coordinate object\n ra_sun, dec_sun, dsun = np.zeros(len(images)),np.zeros(len(images)),np.zeros(len(images)) #sun coordinates\n JD = np.zeros(len(images)) #julian date from time object\n ST = np.zeros(len(images))\n HJD = np.zeros(len(images))\n #create the exoplanet object coordianate\n exoplanet = SkyCoord(dec=input_file['DEC'],ra=input_file['RA'],unit=('deg','deg'),frame=input_file['frame'])\n print '\\nObtain data info from header ....\\n'\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n UTC = hdr['date-obs']+'T'+hdr['UT'] #string that contain the time in UTC in isot format\n tempo_loc.append(Time(UTC,scale=input_file['scale-time'],format='isot',location=(input_file['lon-obs'],input_file['lat-obs'])))#,input_data['altitude'])))\n JD[i] = tempo_loc[i].jd\n ST[i] = tempo_loc[i].sidereal_time('apparent').hour\n SUN.append(get_sun(tempo_loc[i]))\n ra_sun[i],dec_sun[i] = SUN[i].ra.deg, SUN[i].dec.deg\n dsun[i] = SUN[i].distance.value\n HJD[i] = use.hjd_date(JD[i],dsun[i],dec_sun[i],ra_sun[i],exoplanet.dec.deg,exoplanet.ra.deg,circular_orbit=input_file['circular_orbit'])\n use.update_progress((i+1.)/len(images))\n print '\\n.... done.\\n'\n print '\\n Time from header = \\n'\n #print '\\nImages ** UTC (YYYY-MM-DDTHH:MM:SS) ** JD (7d.5d) ** ST (hours) ** ST (HH:MM:SS) ** Sun Coordinate (epoch,RA,DEC,Distance) (deg,deg,AU) \\n'\n ST_string = []\n for i in range(len(images)):\n ST1 = int(ST[i])\n ST2 = int((ST[i]-ST1)*60.)\n ST3 = (((ST[i]-ST1)*60.)-ST2)*60\n ST_string.append(str(ST1)+':'+str(ST2)+':'+str(ST3))\n tempo_loc[i] = tempo_loc[i].value\n use.update_progress((i+1.)/len(images))\n #print images[i], ' ** ',tempo_loc[i], ' ** ', JD[i], ' ** ', ST[i],' ** ',ST_string[i],' ** ',sun_loc[i],' ** ',HJD[i]\n print '\\nSave data file ... \\n'\n data = DataFrame([images,tempo_loc,list(JD),list(ST),list(ST_string),list(ra_sun),list(dec_sun),list(dsun),list(HJD)]).T\n data.columns=['images','UTC','JD','ST','ST_isot','RA_SUN','DEC_SUN','D_SUN','HJD']\n print data\n data.to_csv('results.csv')\n os.chdir(original_path)\n return", "def fill_dict(self):\n image_time = (self.nl_image - 1) * (self.tcycle * self.dec)\n slc_dict = default_slc_dict()\n ts = self.time_start\n sod = _dt.timedelta(hours=ts.hour, minutes=ts.minute,\n seconds=ts.second, microseconds=ts.microsecond).total_seconds()\n st0 = sod + self.nl_acc * self.tcycle * self.dec + \\\n (self.dec / 2.0) * self.tcycle # include time to center of decimation window\n az_step = self.ang_per_tcycle * self.dec\n prf = abs(1.0 / (self.tcycle * self.dec))\n seq = self.TX_RX_SEQ\n GPRI_TX_z = self.mapping_dict['TX_' + seq[0] + \"_position\"]\n GPRI_RX_z = self.mapping_dict['RX_' + seq[1] + seq[3] + \"_position\"]\n fadc = C / (2. * self.rps)\n # Antenna elevation angle\n ant_elev = _np.deg2rad(self.antenna_elevation)\n # Compute antenna position\n rx1_coord = [0., 0., 0.]\n rx2_coord = [0., 0., 0.]\n tx_coord = [0., 0., 0.]\n #\n # Topsome receiver\n rx1_coord[0] = xoff + ant_radius * _np.cos(\n ant_elev) # local coordinates of the tower: x,y,z, boresight is along +X axis, +Z is up\n rx1_coord[1] = 0.0 # +Y is to the right when looking in the direction of +X\n rx1_coord[2] = GPRI_RX_z + ant_radius * _np.sin(\n ant_elev) # up is Z, all antennas have the same elevation angle!\n # Bottomsome receiver\n rx2_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n rx2_coord[1] = 0.0\n rx2_coord[2] = GPRI_RX_z + ant_radius * _np.sin(ant_elev)\n tx_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n tx_coord[1] = 0.0\n tx_coord[2] = GPRI_TX_z + ant_radius * _np.sin(ant_elev)\n chan_name = 'CH1 lower' if seq[3] == 'l' else 'CH2 upper'\n slc_dict['title'] = str(ts) + ' ' + chan_name\n slc_dict['date'] = self.time_start.date()\n slc_dict['start_time'] = st0\n slc_dict['center_time'] = st0 + image_time / 2\n slc_dict['end_time'] = st0 + image_time\n slc_dict['range_samples'] = self.ns_out\n slc_dict['azimuth_lines'] = self.nl_tot_dec - 2 * self.nl_acc\n slc_dict['range_pixel_spacing'] = self.rps\n slc_dict['azimuth_line_time'] = self.tcycle * self.dec\n slc_dict['near_range_slc'] = self.rmin\n slc_dict['center_range_slc'] = (self.rmin + self.rmax) / 2\n slc_dict['far_range_slc'] = self.rmax\n slc_dict['radar_frequency'] = self.RF_center_freq\n slc_dict['adc_sampling_rate'] = fadc\n slc_dict['prf'] = prf\n slc_dict['chirp_bandwidth'] = self.RF_freq_max - self.RF_freq_min\n slc_dict['receiver_gain'] = 60 - self.IMA_atten_dB\n slc_dict['GPRI_TX_mode'] = self.TX_mode\n slc_dict['GPRI_TX_antenna'] = seq[0]\n slc_dict.add_parameter('GPRI_RX_antennas', seq[1] + seq[3])\n slc_dict['GPRI_tx_coord'] = [tx_coord[0], tx_coord[1], tx_coord[2]]\n slc_dict['GPRI_rx1_coord'] = [rx1_coord[0], rx1_coord[1], rx1_coord[2]]\n slc_dict['GPRI_rx2_coord'] = [rx2_coord[0], rx2_coord[1], rx2_coord[2]]\n slc_dict['GPRI_az_start_angle'] = self.az_start\n slc_dict['GPRI_az_angle_step'] = az_step\n slc_dict['GPRI_ant_elev_angle'] = self.antenna_elevation\n slc_dict['GPRI_ref_north'] = self.geographic_coordinates[0]\n slc_dict['GPRI_ref_east'] = self.geographic_coordinates[1]\n slc_dict['GPRI_ref_alt'] = self.geographic_coordinates[2]\n slc_dict['GPRI_geoid'] = self.geographic_coordinates[3]\n return slc_dict", "def analyze_so(self, zmethod='trough'):\n\n ## create dict of dataframes for slow oscillation analysis\n print('Creating individual dataframes...')\n\n so = {}\n for chan in self.so_events.keys():\n so[chan] = {}\n for i, s in self.so_events[chan].items():\n # create individual df for each spindle\n start = self.so_events[chan][i]['npeak_minus2s']\n end = self.so_events[chan][i]['npeak_plus2s']\n so_data = self.data[chan]['Raw'].loc[start:end]\n so_filtdata = self.sofiltEEG[chan]['Filtered'].loc[start:end]\n spso_filtdata = self.spsofiltEEG[chan]['Filtered'].loc[start:end]\n \n # set new index so that each SO is zero-centered around the negative peak\n ms1 = list(range(-2000, 0, int(1/self.metadata['analysis_info']['s_freq']*1000)))\n ms2 = [-x for x in ms1[::-1]]\n id_ms = ms1 + [0] + ms2\n \n # create new dataframe\n so[chan][i] = pd.DataFrame(index=id_ms)\n so[chan][i].index.name='id_ms'\n \n # if the SO is not a full 2s from the beginning\n if start < self.data.index[0]:\n # extend the df index to the full 2s\n time_freq = str(int(1/self.metadata['analysis_info']['s_freq']*1000000))+'us'\n time = pd.date_range(start=start, end=end, freq=time_freq)\n so[chan][i]['time'] = time\n # append NaNs onto the end of the EEG data\n nans = np.repeat(np.NaN, len(time)-len(so_data))\n data_extended = list(nans) + list(so_data.values)\n so[chan][i]['Raw'] = data_extended\n filtdata_extended = list(nans) + list(so_filtdata.values)\n so[chan][i]['sofilt'] = filtdata_extended\n spsofiltdata_extended = list(nans) + list(spso_filtdata.values)\n so[chan][i]['spsofilt'] = spsofiltdata_extended\n\n # if the SO is not a full 2s from the end\n elif end > self.data.index[-1]:\n # extend the df index to the full 2s\n time_freq = str(int(1/self.metadata['analysis_info']['s_freq']*1000000))+'us'\n time = pd.date_range(start=start, end=end, freq=time_freq)\n so[chan][i]['time'] = time\n # append NaNs onto the end of the EEG data\n nans = np.repeat(np.NaN, len(time)-len(so_data))\n data_extended = list(so_data.values) + list(nans)\n so[chan][i]['Raw'] = data_extended\n filtdata_extended = list(so_filtdata.values) + list(nans)\n so[chan][i]['sofilt'] = filtdata_extended\n spsofiltdata_extended = list(spso_filtdata.values) + list(nans)\n so[chan][i]['spsofilt'] = spsofiltdata_extended\n else:\n so[chan][i]['time'] = so_data.index\n so[chan][i]['Raw'] = so_data.values\n so[chan][i]['sofilt'] = so_filtdata.values\n so[chan][i]['spsofilt'] = spso_filtdata.values\n \n self.so = so\n print('Dataframes created. Slow oscillation data stored in obj.so.')", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def GEEsmos(ptsFile,metric,timeStep,buf,poly,username,folderOut, scalePix = 25000,startYear = None,endYear = None):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n\n lastImage = ee.Image(ee.ImageCollection('NASA_USDA/HSL/soil_moisture')\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection('NASA_USDA/HSL/soil_moisture')\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n \n #startMonth - 1, because time-series starts on Jan 1\n #startYearAll: did't add one, for same reason\n if all([startYear is None,endYear is None]):\n startYear = int(firstImageDate[(len(firstImageDate)-8):(len(firstImageDate)-4)])\n endYear = int(lastImageDate[(len(lastImageDate)-8):(len(lastImageDate)-4)])\n startMonth = int(firstImageDate[(len(firstImageDate)-4):(len(firstImageDate)-2)])-1\n endMonth = int(lastImageDate[(len(lastImageDate)-4):(len(lastImageDate)-2)])-1\n startYearAll = startYear\n endYearAll = endYear - 1\n \n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n elif all([startYear >= 0,endYear >= 0]):\n startYearReal = int(firstImageDate[(len(firstImageDate)-8):(len(firstImageDate)-4)])\n endYearReal = int(lastImageDate[(len(lastImageDate)-8):(len(lastImageDate)-4)]) \n \n years = list(range(max(startYearReal,startYear), (min(endYearReal,endYear) + 1)))\n \n if endYear >= endYearReal:\n endMonth = int(lastImageDate[(len(lastImageDate)-4):(len(lastImageDate)-2)])-1\n endYearReal2 = endYearReal-1\n years2 = len(years)-1\n elif endYear < endYearReal:\n endMonth = 0\n endYearReal2 = endYearReal\n years2 = len(years)\n \n if startYear <= startYearReal:\n startMonth = int(firstImageDate[(len(firstImageDate)-4):(len(firstImageDate)-2)])-1\n elif startYear > startYearReal:\n startMonth = 0\n \n monthsEE = ee.List(list(range(startMonth,(12*years2+endMonth))))\n yearsEE = ee.List(list(range(max(startYearReal,startYear), (min(endYearReal2,endYear) + 1))))\n \n for met in metric:\n SMOS = ee.ImageCollection('NASA_USDA/HSL/soil_moisture').select(met)\n metL = [met]\n \n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (SMOS\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (SMOS\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (SMOS\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (SMOS\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif all([timeStep == 'lowest',endYear is None, startYear is None]):\n\n img_col = SMOS\n \n elif all([timeStep == 'lowest',endYear > 0, startYear > 0]):\n\n img_col = SMOS.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_SMOS_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for SMOS: ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_SMOS_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for SMOS: ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_SMOS_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for SMOS: ' + met)", "def Load_EP_Fullcospectra(path,start_day,end_day,variable):\r\n \r\n # Number of days selected\r\n sday = datetime.strptime(start_day,'%Y-%m-%d')\r\n eday = datetime.strptime(end_day,'%Y-%m-%d')\r\n Nday = (eday-sday).days +1\r\n \r\n if Nday <= 0:\r\n print('WARNING!! End day is before start day!')\r\n \r\n Nvars = len(variable)\r\n\r\n allf = os.listdir(path)\r\n fnames = [f for f in allf if f.endswith('.csv')]\r\n \r\n # Read first file to get info (meta) \r\n spec, timeseries, header, meta1 = read_cospectrum(path,[fnames[0]])\r\n Hz = meta1[0]\r\n avg_period = meta1[3]\r\n nseg = np.int(24*60/avg_period)\r\n ppf = np.int(2**np.floor(np.log2(avg_period*60*Hz/2)))\r\n\r\n df = Hz/2/ppf\r\n freq = np.arange(df,Hz/2+df,df)\r\n \r\n # spec shape: [frequency,time,variables]\r\n spec=np.zeros((ppf,np.int(Nday*(24*60/avg_period)),Nvars))*np.nan\r\n spec_time=[]\r\n\r\n tct = -1 # Time counter\r\n for d in range(Nday):\r\n for h in range(nseg):\r\n tct+=1\r\n curtime = sday+timedelta(d,0,0,0,avg_period*(h+1))\r\n spec_time.append(curtime)\r\n hstr = (curtime).strftime('%H%M')\r\n\r\n daystr = curtime.strftime('%Y-%m-%d')\r\n daystr2 = curtime.strftime('%Y%m%d')\r\n print('Loading... {} {}'.format(daystr,hstr))\r\n\r\n # See if file exists\r\n matchi = np.array(['{}-{}'.format(daystr2,hstr) in f for f in fnames])\r\n\r\n if np.sum(matchi)>0:\r\n matchi = np.where(matchi)[0][0]\r\n spec_day, spec_time_day, header_day, meta_day = read_cospectrum(path,[fnames[matchi]])\r\n spec_day = spec_day[0]\r\n\r\n for vi in range(Nvars):\r\n gasheader = 'f_nat*cospec(w_{})'.format(variable[vi])\r\n vmatchi = np.array([gasheader in h for h in header_day])\r\n if np.sum(vmatchi)>0:\r\n vmatchi = np.where(vmatchi)[0][0]\r\n spec[:,tct,vi] = spec_day[:,vmatchi]\r\n\r\n else:\r\n print('And there was a problem!') \r\n \r\n return spec, spec_time, freq", "def get_time_series(this_lat, this_lon, case, varnames):\n\n cesmdir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly'\n\n if 'LE' in case:\n\n from observational_large_ensemble.params import karen_params_cesm\n\n mode_lag = karen_params_cesm.mode_lag\n cvdp_loc = karen_params_cesm.cvdp_loc\n AMO_cutoff_freq = karen_params_cesm.AMO_cutoff_freq\n\n name_conversion = {'tas': 'TREFHT', 'pr': 'PRECC', 'slp': 'PSL'}\n cesm_names = [name_conversion[v] for v in varnames]\n this_member = int((case).split('-')[-1])\n cvdp_file = '%s/CESM1-CAM5-BGC-LE_#%i.cvdp_data.1920-2018.nc' % (cvdp_loc, this_member)\n\n # Historical filenames for CESM. Will need to append part of RCP8.5 to get full period\n filenames = []\n for var in cesm_names:\n file_str = '%s/%s/b.e11.B20TRC5CNBDRD.f09_g16.%03d.cam.h0.%s.??????-200512.nc' % (cesmdir, var,\n this_member, var)\n this_file = glob(file_str)[0]\n filenames.append(this_file)\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames,\n karen_params_cesm.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n else:\n\n from observational_large_ensemble.params import karen_params_obs\n\n mode_lag = karen_params_obs.mode_lag\n cvdp_loc = karen_params_obs.cvdp_loc\n AMO_cutoff_freq = karen_params_obs.AMO_cutoff_freq\n\n tas_dir = karen_params_obs.tas_dir\n pr_dir = karen_params_obs.pr_dir\n slp_dir = karen_params_obs.slp_dir\n cvdp_file = '%s/HadISST.cvdp_data.1920-2018.nc' % cvdp_loc\n file_dict = {'tas': '%s/Complete_TAVG_LatLong1.nc' % tas_dir,\n 'pr': '%s/full_data_monthly_v2020.nc' % pr_dir,\n 'slp': '%s/prmsl.mon.mean.nc' % slp_dir}\n\n filenames = []\n for var in varnames:\n filenames.append(file_dict[var])\n\n name_conversion = {'tas': 'temperature', 'pr': 'precip', 'slp': 'prmsl'}\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames[0],\n karen_params_obs.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n return this_ts, df_shifted", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def _get_gedi2a_main_data_dict(self) -> dict:\n gedi_l2a_count_start = pd.to_datetime(\"2018-01-01T00:00:00Z\")\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n \"absolute_time\": (\n gedi_l2a_count_start\n + pd.to_timedelta(list(self[\"delta_time\"]), unit=\"seconds\")\n ),\n # Quality data\n \"sensitivity\": self[\"sensitivity\"][:],\n \"quality_flag\": self[\"quality_flag\"][:],\n \"solar_elevation\": self[\"solar_elevation\"][:],\n \"solar_azimuth\": self[\"solar_elevation\"][:],\n \"energy_total\": self[\"energy_total\"][:],\n # DEM\n \"dem_tandemx\": self[\"digital_elevation_model\"][:],\n \"dem_srtm\": self[\"digital_elevation_model_srtm\"][:],\n # Processing data\n \"selected_algorithm\": self[\"selected_algorithm\"][:],\n \"selected_mode\": self[\"selected_mode\"][:],\n # Geolocation data\n \"lon_lowestmode\": self[\"lon_lowestmode\"][:],\n \"longitude_bin0_error\": self[\"longitude_bin0_error\"][:],\n \"lat_lowestmode\": self[\"lat_lowestmode\"][:],\n \"latitude_bin0_error\": self[\"latitude_bin0_error\"][:],\n \"elev_lowestmode\": self[\"elev_lowestmode\"][:],\n \"elevation_bin0_error\": self[\"elevation_bin0_error\"][:],\n \"lon_highestreturn\": self[\"lon_highestreturn\"][:],\n \"lat_highestreturn\": self[\"lat_highestreturn\"][:],\n \"elev_highestreturn\": self[\"elev_highestreturn\"][:],\n } | {f\"rh{i}\": self[\"rh\"][:, i] for i in range(101)}\n return data", "def compute_habitat(particle, fieldset, time):\n if particle.active == 1:\n #Convert dx to lon and lat\n dx_lon = fieldset.grad_dx / (fieldset.deg * cos(particle.lat * math.pi / 180)) \n dx_lat = fieldset.grad_dx / fieldset.deg\n #\n #Get 5 T and 5 NPP\n #\n T0 = [fieldset.T[time, particle.depth, particle.lat, particle.lon],#position\n fieldset.T[time, particle.depth, particle.lat, particle.lon - dx_lon],#left\n fieldset.T[time, particle.depth, particle.lat, particle.lon + dx_lon],#right\n fieldset.T[time, particle.depth, particle.lat - dx_lat, particle.lon],#bottom\n fieldset.T[time, particle.depth, particle.lat + dx_lat, particle.lon]]#top\n \n NPP0 = [fieldset.NPP[time, particle.depth, particle.lat, particle.lon],#position\n fieldset.NPP[time, particle.depth, particle.lat, particle.lon - dx_lon],#left\n fieldset.NPP[time, particle.depth, particle.lat, particle.lon + dx_lon],#right\n fieldset.NPP[time, particle.depth, particle.lat - dx_lat, particle.lon],#bottom\n fieldset.NPP[time, particle.depth, particle.lat + dx_lat, particle.lon]]#top \n #Save T and NPP at particle location\n particle.T = T0[0]\n particle.NPP = NPP0[0]\n #\n #Temperature habitat\n #\n Tmin = particle.Tmin\n Topt = particle.Topt\n T_hab = [0, 0, 0, 0, 0] #position, left, right, bottom and top\n #\n if T0[0] >= Topt:\n T_hab[0] = 1.0\n else:\n T_hab[0] = exp(-2*((T0[0]-Topt)/(Topt-Tmin))**2)\n #\n if T0[1] >= Topt:\n T_hab[1] = 1.0\n else:\n T_hab[1] = exp(-2*((T0[1]-Topt)/(Topt-Tmin))**2)\n #\n if T0[2] >= Topt:\n T_hab[2] = 1.0\n else:\n T_hab[2] = exp(-2*((T0[2]-Topt)/(Topt-Tmin))**2)\n #\n if T0[3] >= Topt:\n T_hab[3] = 1.0\n else:\n T_hab[3] = exp(-2*((T0[3]-Topt)/(Topt-Tmin))**2)\n #\n if T0[4] >= Topt:\n T_hab[4] = 1.0\n else:\n T_hab[4] = exp(-2*((T0[4]-Topt)/(Topt-Tmin))**2)\n #\n #Food habitat\n #\n food_hab = [0, 0, 0, 0, 0] #position, left, right, bottom and top\n #\n if NPP0[0] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[0] = 0\n else:\n food_hab[0] = min(NPP0[0]/particle.PPmax,1)\n #\n if NPP0[1] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[1] = 0\n else:\n food_hab[1] = min(NPP0[1]/particle.PPmax,1)\n #\n if NPP0[2] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[2] = 0\n else:\n food_hab[2] = min(NPP0[2]/particle.PPmax,1)\n #\n if NPP0[3] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[3] = 0\n else:\n food_hab[3] = min(NPP0[3]/particle.PPmax,1)\n #\n if NPP0[4] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[4] = 0\n else:\n food_hab[4] = min(NPP0[4]/particle.PPmax,1)\n #\n #Total habitat\n #\n particle.habT = T_hab[0]\n particle.habPP = food_hab[0]\n particle.hab = particle.habT * particle.habPP\n h_left = T_hab[1] * food_hab[1]\n h_right = T_hab[2] * food_hab[2]\n h_bot = T_hab[3] * food_hab[3]\n h_top = T_hab[4] * food_hab[4]\n #\n #Habitat gradient\n #\n particle.xgradh = (h_right - h_left)/(2 * fieldset.grad_dx)\n particle.ygradh = (h_top - h_bot)/(2 * fieldset.grad_dx)\n #\n #Safety check\n #\n if particle.hab < 0 or particle.hab > 1:\n print(\"Habitat is %f at lon,lat = %f,%f. Execution stops.\"%(particle.hab,particle.lon,particle.lat))\n exit(0)", "def prepare_input(self, only_center = True):\n \n if only_center:\n nx = [0]\n ny = [0]\n else:\n nx = [0,1,-1]\n ny = [0,1,-1]\n gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')), \n compression='gzip', \n assume_missing=True,\n dtype = {'TIMESTAMP':int, 'STATION': str})\n \n gauge = gauge.compute().drop_duplicates()\n gauge = gauge.replace(-9999,np.nan)\n for x in nx:\n for y in ny:\n logging.info('Processing neighbour {:d}{:d}'.format(x, y))\n radar = dd.read_parquet(str(Path(self.db_location, 'radar',\n '*.parquet')))\n refer = dd.read_parquet(str(Path(self.db_location, 'reference', \n '*.parquet')))\n \n # Select only required pixel\n radar = radar.loc[np.logical_and(radar['NX'] == x, \n radar['NY'] == y)]\n refer = refer.loc[np.logical_and(refer['NX'] == x, \n refer['NY'] == y)]\n \n # Convert to pandas and remove duplicates \n radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION',\n 'RADAR',\n 'NX','NY',\n 'SWEEP'])\n \n refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION'])\n \n radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])\n refer = refer.sort_values(by = ['TIMESTAMP','STATION'])\n gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])\n # Get only valid precip data\n gauge = gauge[np.isfinite(gauge['RRE150Z0'])]\n \n # Create individual 10 min - station stamps\n gauge['s-tstamp'] = np.array(gauge['STATION'] + \n gauge['TIMESTAMP'].astype(str)).astype(str)\n radar['s-tstamp'] = np.array(radar['STATION'] + \n radar['TIMESTAMP'].astype(str)).astype(str)\n refer['s-tstamp'] = np.array(refer['STATION'] + \n refer['TIMESTAMP'].astype(str)).astype(str)\n \n # Get gauge and reference only when radar data available\n \n # Find timestamps that are in the three datasets\n ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])\n .intersection(set(refer['s-tstamp'])))))\n ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])\n .intersection(set(ststamp_common)))))\n radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]\n gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]\n refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]\n \n \n # Filter incomplete hours\n stahour = np.array(gauge['STATION'] + \n ((gauge['TIMESTAMP'] - 600 ) - \n (gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)\n \n full_hours = np.array(gauge.groupby(stahour)['STATION']\n .transform('count') == 6)\n \n refer = refer.reindex[full_hours]\n gauge = gauge.reindex[full_hours] \n radar = radar.reindex[radar['s-tstamp'].\n isin(np.array(gauge['s-tstamp']))]\n \n stahour = stahour[full_hours]\n \n # Creating vertical grouping index\n \n _, idx, grp_vertical = np.unique(radar['s-tstamp'],\n return_inverse = True,\n return_index = True)\n # Get original order\n sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]\n # Preserves order and avoids sorting radar_statstamp\n grp_vertical = idx[grp_vertical]\n # However one issue is that the indexes are not starting from zero with increment\n # of one, though they are sorted, they are like 0,7,7,7,15,15,23,23\n # We want them starting from zero with step of one\n grp_vertical = rankdata(grp_vertical,method='dense') - 1\n \n # Repeat operation with gauge hours\n sta_hourly_unique, idx, grp_hourly = np.unique(stahour, \n return_inverse = True,\n return_index = True)\n grp_hourly = idx[grp_hourly]\n \n # Add derived variables height iso0 (HISO) and height above ground (HAG)\n # Radar\n stations = constants.METSTATIONS\n cols = list(stations.columns)\n cols[1] = 'STATION'\n stations.columns = cols\n radar = pd.merge(radar,stations, how = 'left', on = 'STATION',\n sort = False)\n \n radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100\n radar['HAG'] = radar['HEIGHT'] - radar['Z']\n radar['HAG'][radar['HAG'] < 0] = 0\n \n # Gauge\n gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60\n \n # Save all to file\n refer.to_parquet(str(Path(self.input_location, \n 'reference_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n radar.to_parquet(str(Path(self.input_location, \n 'radar_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n grp_idx = {}\n grp_idx['grp_vertical'] = grp_vertical\n grp_idx['grp_hourly'] = grp_hourly\n grp_idx['tstamp_unique'] = sta_tstamp_unique\n \n pickle.dump(grp_idx, \n open(str(Path(self.input_location, \n 'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))\n \n if x == 0 and y == 0:\n # Save only gauge for center pixel since it's available only there\n gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),\n compression = 'gzip', index = False)", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def main(datafilepath):\n #create midline\n sectionsize = 10000\n TrackData = TrackMaker(sectionsize) # 10000\n moving_window = sectionsize*2\n midline = TrackData[0] \n sections = TrackData[2]\n #midline = midline[sections[0]:sections[5],:] #only work with the midline of the trial \n #steergaze_df = pd.read_feather(datafilepath)\n steergaze_df = pd.read_csv(datafilepath, sep=',',header=0)\n #steergaze_df.reset_index()\n master_steergaze = pd.DataFrame()\n datafolder = os.path.split(datafilepath)[0] \n\n #TODO: due to grouping the future path cuts - off at end of slalom, use the continuous trajectory across roadsections for fp mapping\n\n #modes taken from gaze_through_midline_densities.py\n entry = find_closest_index(midline, [-23, 69])\n firstobject = find_closest_index(midline, [25, 52])\n gazemodes = [entry, firstobject]\n\n mid_diff = np.linalg.norm(np.diff(midline, axis=0, prepend = np.array([[0,0]])), axis = 1)\n midline_dist_array = np.cumsum(mid_diff)\n\n tree = spatial.cKDTree(midline)\n\n #for trial in picked_trials:\t\n for block, blockdata in steergaze_df.groupby(['ID','block']):\n\n print(block)\n begin = timer()\n\n\n blockdata = blockdata.copy()\n blockdata.sort_values('currtime', inplace=True)\n # blockdata.reset_index()\n\n ####pick target\n \"\"\"\n condition = blockdata.condition.values[0]\n target_centres = targets.loc[targets['condition']==int(condition),:]\n #pprint(target_centres)\n\n target_centres = target_centres.reset_index(drop=True)\n #pick starting position.\n start_x = np.sign(blockdata['posx']).values[0]\n #select targets with opposite sign for xcentre, these will be the ones encountered in that block\n target_centres = target_centres.loc[np.sign(target_centres['xcentre'])!=start_x,:] \n target_circles = dp.target_position_circles(target_centres)\n\n \"\"\"\n\n traj_x = blockdata['posx'].values\n traj_z = blockdata['posz'].values\n trajectory = np.transpose(np.array([traj_x, traj_z]))\n\n yaw = blockdata['yaw'].values\n \n #gaze_on_screen = blockdata['hangle'].values, blockdata['vangle'].values\n gaze_on_screen = np.transpose(np.array([blockdata['hangle'].values, blockdata['vangle'].values]))\n\n #print(yaw[0])\n #index = i\n #\tviewpoint = blockdata['posx'].values, blockdata['posz'].values\n roadsection = blockdata['roadsection'].values\n\n #find time headway along MIDLINE \n \"\"\"\n start = timer()\n #idx, *_ = find_closest_index(midline, trajectory[0,:])\n idx = [find_closest_index(midline, viewpoint) for viewpoint in trajectory] \n print(idx[:10])\n print(timer()-start)\n \"\"\"\n\n #closest_indexes = [closest_node(midline, viewpoint) for viewpoint in trajectory] \n #closest indexes\n #print(np.take(midline, 5, axis = 0, mode = 'wrap'))\n #print(np.take(midline, len(midline), axis = 0, mode = 'wrap'))\n #print(np.take(midline, 0, axis = 0, mode = 'wrap'))\n _, closest_indexes = tree.query(trajectory) \n\n end_of_view = closest_indexes + moving_window\n\n #futuremid = np.take(midline, range(closest_indexes[0], end_of_view[0]), axis = 0, mode = 'wrap')\n def takemid(c,e):\n return (np.take(midline, range(c, e), axis = 0, mode = 'wrap'))\n\n start = timer()\n ml_idx, ml_screen_refs, ml_world_refs, ml_th = zip(*[\n closest_on_screen_point(takemid(c,e), t, y, g) \n for c, e, t, y, g in zip(closest_indexes, end_of_view, trajectory, yaw, gaze_on_screen)\n ])\n print(timer() - start) \n \n print(ml_screen_refs.shape)\n print(type(ml_screen_refs))\n ml_screen_refs = ml_screen_refs.reshape(-1, 2)\n ml_world_refs = ml_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['midline_ref_onscreen_x'] = ml_screen_refs[:, 0]\n blockdata['midline_ref_onscreen_z'] = ml_screen_refs[:, 1]\n blockdata['midline_ref_world_x'] = ml_world_refs[:, 0]\n blockdata['midline_ref_world_z'] = ml_world_refs[:, 1]\n blockdata['th_along_midline'] = ml_th\n\n #find closest point on FUTURE PATH, with th calc along the path \n \n traj_index = range(len(trajectory))\n fp_idx, fp_screen_refs, fp_world_refs, fp_th = zip(*[\n closest_on_screen_point(trajectory[i:(i+1000),:], t, y, g) \n for i, t, y, g in zip(traj_index, trajectory, yaw, gaze_on_screen)\n ])\n #future_traj = trajectory[index:(index+window_fp), :]\n #fp_world_ref, fp_idx, dists, fp_angles = closest_on_screen_point(future_traj, viewpoint, yaw, gaze_on_screen)\n print(fp_screen_refs.shape)\n print(type(fp_screen_refs))\n fp_screen_refs = fp_screen_refs.reshape(-1, 2)\n fp_world_refs = fp_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['futurepath_ref_onscreen_x'] = fp_screen_refs[:, 0]\n blockdata['futurepath_ref_onscreen_z'] = fp_screen_refs[:, 1]\n blockdata['futurepath_ref_world_x'] = fp_world_refs[:, 0]\n blockdata['futurepath_ref_world_z'] = fp_world_refs[:, 1]\n blockdata['th_along_futurepath'] = fp_th\n \n \n\n #TODO: current method runs into problems if the viewpoint is just before the midline resets (i.e. very large midline_dist_array value).\n #but not a problem for current analysis because trial starts from beginning of midline.\n #th_to_entry\n mid_dist_viewpoint = midline_dist_array[idx]\n\n mid_dist_entry = midline_dist_array[gazemodes[0]]\n th_to_entry = (mid_dist_entry - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_entry'] = th_to_entry\n\n #th_to_object\n mid_dist_object = midline_dist_array[gazemodes[1]]\n th_to_object = (mid_dist_object - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_object'] = th_to_object\t\t\n \n \"\"\"\n trialcode = row['trialcode']\n #plot\t\t\t \n #print(\"th_along_midline\", ml_timeheadway)\n #print('ml_ref', ml_world_ref)\n #print(\"th_along_futurepath\", fp_timeheadway)\n #print(\"fp_ref\", fp_world_ref)\n\n world_gaze = dp.angles_to_world(gaze_on_screen, viewpoint, yaw)\n #print(\"world_gaze\", world_gaze)\n\n plt.ylim(angles_limits_bottom[1],angles_limits_top[1])\n plt.xlim(angles_limits_bottom[0],angles_limits_top[0])\n\n plt.plot(ml_angles[:,0],ml_angles[:,1], 'C3o', markersize = .5, )\n plt.plot(fp_angles[:,0],fp_angles[:,1], 'C2o', markersize = .5)\n plt.plot(ml_screen_ref[0],ml_screen_ref[1], 'C1o', markersize = 5, markeredgecolor = 'k')\n plt.plot(fp_screen_ref[0],fp_screen_ref[1], 'C0o', markersize = 5, markeredgecolor = 'k')\n\n plt.plot(gaze_on_screen[0],gaze_on_screen[1], 'mo', markersize = 5, markeredgecolor = 'k')\n plt.title(str(trialcode))\n\n\n plt.pause(.016) \n plt.cla()\n\n plt.show()\n \"\"\"\n\t\t\n #master_steergaze = pd.concat([master_steergaze, blockdata])\n\n\n compute_time = timer()-begin\n print(\"Processing block took %f seconds\" % compute_time)\n\n\n print(\"APPENDING DATA FRAME\")\n outfilepath = datafolder + '/trout_gazeandsteering_addthfrompath2.csv'\n\n with open(outfilepath, 'a', newline = '') as sgfile:\n blockdata.to_csv(sgfile, mode='a', header=sgfile.tell()==0)\n\n #master_steergaze.to_csv(datafolder + '/trout_gazeandsteering_addthfrompath.csv')\n\n #master_steergaze.to_feather(datafilepath)", "def make_digital_map(self):\n self.uni.home(axis='X')\n time.sleep(10.0)\n azimuths = []\n for x in numpy.arange(self.azimuth.xmin, self.azimuth.xmax + self.azimuth.xinc,\n self.azimuth.xinc):\n if x > self.azimuth.xmax:\n x = self.azimuth.xmax\n azimuths.append(x)\n azimuths = numpy.array(azimuths)\n wait = (abs(azimuths[0]-self.uni.pos_az)/self.azimuth.xslew_vel) + 1.0\n self.uni.set_azimuth(azimuths[0], self.azimuth.xslew_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to start of map\" % wait)\n time.sleep(wait)\n\n fp = open(self.filename, 'w')\n header = self.make_digital_header()\n fp.write(header)\n plt.ion()\n plt.plot([self.azimuth.xmin, self.azimuth.xmax], [0, 0], 'r-')\n plt.xlim(self.azimuth.xmin, self.azimuth.xmax)\n plt.ylim(-0.5, 6)\n plt.draw()\n for az in azimuths:\n wait = (abs(az-self.uni.pos_az)/self.azimuth.xmap_vel) + 1.0\n self.uni.set_azimuth(az, self.azimuth.xmap_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to %.1f degrees\" % (wait, az))\n time.sleep(wait)\n fp.write(\"%.3f\" % az)\n #data = self.take_readings()\n for i, freq in enumerate(self.freq_list):\n self.syn.set_freq(freq)\n for dig_channel in range(8):\n for dig in range(8):\n if dig != dig_channel:\n self.labjack.digital_output(dig, 1)\n time.sleep(0.050)\n self.labjack.digital_output(dig_channel, 0)\n time.sleep(0.050)\n ratio, phase = self.vv.measure_vector_averaged_transmission(self.average)\n fp.write(\",%.6g,%.6g\" % (ratio, phase))\n logger.info(\"Az: %.2f, Freq: %.3f, Ratio: %g; Phase: %g\" % (az, freq/1e9, ratio, phase))\n plt.plot(az, ratio, self.plot_symbols[i])\n plt.draw()\n fp.write('\\n')\n \n time.sleep(10.0)\n self.uni.home(axis='X')\n logger.info(\"Map Completed, Saving data file %s\" % self.filename)\n fp.close()", "def from_field(cls, fieldset, pclass, start_field, size, mode='monte_carlo', depth=None, time=None, repeatdt=None):\n\n if mode == 'monte_carlo':\n if start_field.interp_method == 'cgrid_tracer':\n p_interior = np.squeeze(start_field.data[0, 1:, 1:])\n else: # if A-grid\n d = start_field.data\n p_interior = (d[0, :-1, :-1] + d[0, 1:, :-1] + d[0, :-1, 1:] + d[0, 1:, 1:])/4.\n p_interior = np.where(d[0, :-1, :-1] == 0, 0, p_interior)\n p_interior = np.where(d[0, 1:, :-1] == 0, 0, p_interior)\n p_interior = np.where(d[0, 1:, 1:] == 0, 0, p_interior)\n p_interior = np.where(d[0, :-1, 1:] == 0, 0, p_interior)\n p = np.reshape(p_interior, (1, p_interior.size))\n inds = np.random.choice(p_interior.size, size, replace=True, p=p[0] / np.sum(p))\n xsi = np.random.uniform(size=len(inds))\n eta = np.random.uniform(size=len(inds))\n j, i = np.unravel_index(inds, p_interior.shape)\n grid = start_field.grid\n if grid.gtype in [GridCode.RectilinearZGrid, GridCode.RectilinearSGrid]:\n lon = grid.lon[i] + xsi * (grid.lon[i + 1] - grid.lon[i])\n lat = grid.lat[j] + eta * (grid.lat[j + 1] - grid.lat[j])\n else:\n lons = np.array([grid.lon[j, i], grid.lon[j, i+1], grid.lon[j+1, i+1], grid.lon[j+1, i]])\n if grid.mesh == 'spherical':\n lons[1:] = np.where(lons[1:] - lons[0] > 180, lons[1:]-360, lons[1:])\n lons[1:] = np.where(-lons[1:] + lons[0] > 180, lons[1:]+360, lons[1:])\n lon = (1-xsi)*(1-eta) * lons[0] +\\\n xsi*(1-eta) * lons[1] +\\\n xsi*eta * lons[2] +\\\n (1-xsi)*eta * lons[3]\n lat = (1-xsi)*(1-eta) * grid.lat[j, i] +\\\n xsi*(1-eta) * grid.lat[j, i+1] +\\\n xsi*eta * grid.lat[j+1, i+1] +\\\n (1-xsi)*eta * grid.lat[j+1, i]\n else:\n raise NotImplementedError('Mode %s not implemented. Please use \"monte carlo\" algorithm instead.' % mode)\n\n return cls(fieldset=fieldset, pclass=pclass, lon=lon, lat=lat, depth=depth, time=time, repeatdt=repeatdt)", "def GetMapId(landsat, date, date_range):\n \n def maskClouds(img):\n scored = ee.Algorithms.Landsat.simpleCloudScore(img);\n return img.updateMask(scored.select(['cloud']).lt(20));\n\n def CreateTimeBand(img):\n return maskClouds(img).byte().addBands(img.metadata('system:time_start'))\n\n if landsat == 'l7':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L7)\n l7 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l7Composite = l7.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l7Composite.getMapId({\n 'min': '0,0,0',\n 'max': '255,255,255',\n 'bands': 'B4,B3,B2',\n })\n if landsat == 'l8':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L8)\n l8 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l8Composite = l8.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l8Composite.getMapId({\n 'min': '0',\n 'max': '0.4',\n 'bands': 'B4,B3,B2',\n })", "def __init__(self, t0, t1, hours=(\"00:00\", \"23:45\"),\n forecast_zones=\"DK\", norm=False, TimeResolution=\"15T\"):\n \n self.t0 = t0\n self.t1 = t1\n self.muni_input = forecast_zones\n self.norm = norm\n self.Time = TimeResolution\n self.fc_zones = self._muni_interpreter(self.muni_input)\n self.fc_obj = import_muni_forecast(self.t0, self.t1,\n hours=hours,\n muni_list=self.fc_zones,\n sub_h_freq=self.Time)\n root = return_to_root()\n coef_path = 'scripts/rad_model_development/'\n stem_path = '/data/stem_data/'\n self.all_KNr = np.array(pd.read_excel(root + stem_path +\n 'Kommune_GridNr.xlsx',\n header=0)['Kommune_Nr'])\n \n # Importing season, muni and time parameters\n self.beta = np.load(root + coef_path + 'rad_coef_merge.pickle')\n \n self.season = {}\n self.season['DK'] = self.beta['season']['coef_s'][0:4].reshape((1,4))[0]\n self.season['zones'] = self.beta['season']['coef_s'][4:8].reshape((1,4))[0]\n self.season['munis'] = self.beta['season']['coef_s'][8:12].reshape((1,4))[0]\n \n self.time = {}\n self.time['DK'] = self.beta['time']['coef_t'][0:24].reshape((1,24))[0]\n self.time['zones'] = self.beta['time']['coef_t'][24:48].reshape((1,24))[0]\n self.time['munis'] = self.beta['time']['coef_t'][48:72].reshape((1,24))[0]\n \n self.muni = self.beta['muni']['coef_m'].reshape((1,101))[0]\n \n self.GHI = self.fc_obj.GHI*10**(-3) # Scaled to MW\n self.KNr = self.fc_obj.muninr\n self.hour = (self.fc_obj.GHI.index[0].hour,\n self.fc_obj.GHI.index[-1].hour)\n self.minutes = (self.fc_obj.GHI.index[0].time().minute,\n self.fc_obj.GHI.index[-1].time().minute)\n self.t0 = pd.Timestamp(self.fc_obj.GHI.index[0].date())\n self.t1 = pd.Timestamp(self.fc_obj.GHI.index[-1].date())\n self.IndxSet = self.findIndx()\n self.rng_single_day = pd.date_range(self.t0 +\n pd.Timedelta(hours=self.hour[0],\n minutes=self.minutes[0]),\n self.t0 +\n pd.Timedelta(hours=self.hour[-1],\n minutes=self.minutes[-1]),\n freq=self.Time)\n\n self.rng = pd.date_range(self.t0 + pd.Timedelta(hours=self.hour[0],\n minutes=self.minutes[0]),\n self.t1 + pd.Timedelta(hours=self.hour[-1],\n minutes=self.minutes[-1]),\n freq=self.Time)", "def __init__(self, markers):\n self.markers = markers\n self.last_time = None # Used to keep track of time between measurements \n self.Q_t = np.eye(2)\n self.R_t = np.eye(3)\n # YOUR CODE HERE", "def update_maps(self):\n if self.fmodel is None:\n return\n def fft_map(map_coeffs, resolution_factor = 0.25):\n return map_coeffs.fft_map(resolution_factor = resolution_factor,\n ).apply_sigma_scaling().real_map_unpadded()\n map_types = [\"2mFo-DFc\", \"mFo-DFc\"]\n map_keys = [\"2mFo-DFc\", \"mFo-DFc\"]\n if (self.fmodel.f_obs().anomalous_flag()):\n if (self.params.anom_map_type == \"phaser\"):\n map_types.append(\"llg\")\n elif (self.params.anom_map_type == \"residual\"):\n map_types.append(\"anom_residual\")\n else :\n map_types.append(\"anom\")\n map_keys.append(\"anom\")\n if (self.use_svm):\n map_types.append(\"mFo\")\n map_keys.append(\"mFo\")\n # To save memory, we sample atomic positions immediately and throw out\n # the actual maps (instead of keeping up to 3 in memory)\n sites_frac = self.xray_structure.sites_frac()\n sites_cart = self.xray_structure.sites_cart()\n self._principal_axes_of_inertia = [ None ] * len(sites_frac)\n self._map_variances = [ None ] * len(sites_frac)\n self._map_gaussian_fits = {}\n self.calpha_mean_two_fofc = 0\n for map_type, map_key in zip(map_types, map_keys):\n real_map = self.get_map(map_type)\n if (real_map is not None):\n # Gather values for map peaks at each site\n self._map_values[map_key] = flex.double(sites_frac.size(), 0)\n self._map_gaussian_fits[map_key] = [ None ] * len(sites_frac)\n for i_seq, site_frac in enumerate(sites_frac):\n atom = self.pdb_atoms[i_seq]\n resname = atom.fetch_labels().resname.strip().upper()\n if (resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED or\n atom.segid.strip().upper() in [\"ION\"]):\n value = real_map.eight_point_interpolation(site_frac)\n self._map_values[map_key][i_seq] = value\n if (self.use_svm):\n gaussian_fit = utils.fit_gaussian(\n unit_cell=self.unit_cell,\n site_cart=atom.xyz,\n real_map=real_map)\n self._map_gaussian_fits[map_key][i_seq] = gaussian_fit\n\n if map_type in [\"2mFo-DFc\"]:\n # Gather values on map variance and principal axes of interia\n from cctbx import maptbx\n for i_seq, site_cart in enumerate(sites_cart):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n if resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED:\n # XXX not totally confident about how I'm weighting this...\n p_a_i = maptbx.principal_axes_of_inertia(\n real_map = real_map,\n site_cart = site_cart,\n unit_cell = self.unit_cell,\n radius = self.params.map_sampling_radius)\n self._principal_axes_of_inertia[i_seq] = p_a_i\n variance = maptbx.spherical_variance_around_point(\n real_map = real_map,\n unit_cell = self.unit_cell,\n site_cart = site_cart,\n radius = self.params.map_sampling_radius)\n self._map_variances[i_seq] = variance\n elif (i_seq in self.calpha_sel):\n # Also collect some info in average C_alpha 2FoFc peak heights\n self.calpha_mean_two_fofc += real_map.eight_point_interpolation(\n sites_frac[i_seq])\n del real_map\n\n if (self.calpha_mean_two_fofc > 0):\n n_calpha = len(self.calpha_sel)\n assert (n_calpha > 0)\n self.calpha_mean_two_fofc /= n_calpha\n\n # Gather info on carbons' average Fo peak height for use in estimating other\n # sites' atomic weight\n self.carbon_fo_values = None\n if (len(self.carbon_sel) > 0):\n self.carbon_fo_values = flex.double()\n self._map_values[\"mFo\"] = flex.double(sites_frac.size(), 0)\n fo_map = fft_map(self.fmodel.map_coefficients(\n map_type = \"mFo\",\n exclude_free_r_reflections = True,\n fill_missing = True))\n\n for i_seq, site_frac in enumerate(sites_frac):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n element = self.pdb_atoms[i_seq].element.strip()\n if (element == \"C\") or ((element == \"O\") and (resname in WATER_RES_NAMES)):\n map_value = fo_map.eight_point_interpolation(site_frac)\n self._map_values[\"mFo\"][i_seq] = map_value\n if (element == \"C\"):\n self.carbon_fo_values.append(map_value)\n del fo_map", "def make_df_an_table(an_string, site_name='DSW', min_moon_dist=MIN_MOON_DISTANCE,\n min_hours=MIN_HOURS_OBSERVABLE):\n an_string = str(an_string) # (precaution in case int passed in)\n an_object = Astronight(an_string, site_name)\n # dark_start, dark_end = an_object.ts_dark.start, an_object.ts_dark.end\n mid_dark = an_object.local_middark_utc\n # dark_no_moon_start, dark_no_moon_end = an_object.ts_dark_no_moon.start, an_object.ts_dark_no_moon.end\n mpfile_dict = make_mpfile_dict()\n\n an_dict_list = [] # results to be deposited here, to make a dataframe later.\n for mp in mpfile_dict.keys():\n mpfile = mpfile_dict[mp]\n # an_dict doesn't need to include defaults for case before or after mpfile ephemeris,\n # because making the dataframe should put in NANs for missing keys anyway (check this later):\n an_dict = {'MPnumber': mpfile.number, 'MPname': mpfile.name, 'Motive': mpfile.motive,\n 'Priority': mpfile.priority, 'Period': mpfile.period}\n # Interpolate within ephemeris (because MP is moving in sky); 2 iterations s/be enough:\n data, status, ts_observable, mp_radec = None, None, None, None # keep stupid IDE happy.\n best_utc = mid_dark # best_utc will = mid-observable time at converged RA,Dec.\n\n # Converge on best RA, Dec, observable timespan (they interact, as MP is moving):\n hours_observable = 0.0 # default to keep IDE happy.\n for i in range(2):\n data = mpfile.eph_from_utc(best_utc)\n if data is None:\n if mpfile.eph_range[1] < an_object.ts_dark.start:\n status = 'too late'\n else:\n status = 'too early'\n break\n status = 'ok'\n mp_radec = RaDec(data['RA'], data['Dec'])\n ts_observable = an_object.ts_observable(mp_radec,\n min_alt=MIN_MP_ALTITUDE,\n min_moon_dist=min_moon_dist) # Timespan object\n hours_observable = ts_observable.seconds / 3600.0\n mid_observable = ts_observable.midpoint # for loop exit\n best_utc = mid_observable # update for loop continuation.\n\n # Mark valid MPs that are observable too briefly:\n if status.lower() == 'ok':\n if hours_observable < min_hours:\n status = 'too brief'\n\n # For MPs observable this night, add one line to table:\n # print(mpfile.name, status)\n an_dict['Status'] = status\n if status.lower() == 'ok':\n an_dict['RA'] = data['RA']\n an_dict['Dec'] = data['Dec']\n an_dict['StartUTC'] = ts_observable.start\n an_dict['EndUTC'] = ts_observable.end\n an_dict['TransitUTC'] = an_object.transit(mp_radec)\n an_dict['MoonDist'] = mp_radec.degrees_from(an_object.moon_radec)\n an_dict['PhaseAngle'] = data['Phase']\n an_dict['V_mag'] = data['V_mag']\n an_dict['ExpTime'] = float(round(float(calc_exp_time(an_dict['V_mag'],\n EXP_TIME_TABLE_PHOTOMETRY))))\n if an_dict['Period'] is not None:\n # Duty cycle is % of time spent observing this MP if one exposure per 1/60 of period.\n an_dict['DutyCyclePct'] = 100.0 * ((an_dict['ExpTime'] + EXP_OVERHEAD) / 60.0) / \\\n an_dict['Period']\n else:\n an_dict['DutyCyclePct'] = None\n if status.lower() == 'ok':\n an_dict['PhotrixPlanning'] = 'IMAGE MP_' + mpfile.number + \\\n ' Clear=' + str(an_dict['ExpTime']) + 'sec(***) ' + \\\n ra_as_hours(an_dict['RA'], seconds_decimal_places=1) + ' ' + \\\n degrees_as_hex(an_dict['Dec'], arcseconds_decimal_places=0)\n if an_dict['Period'] is not None:\n an_dict['Coverage'] = make_df_coverage(an_dict['Period'],\n mpfile.obs_jd_ranges,\n (jd_from_datetime_utc(an_dict['StartUTC']),\n jd_from_datetime_utc(an_dict['EndUTC'])))\n an_dict['PhaseCoverage'] = make_df_phase_coverage(an_dict['Period'],\n mpfile.obs_jd_ranges)\n else:\n an_dict['Coverage'] = None\n an_dict_list.append(an_dict)\n if len(an_dict_list) == 0:\n return None\n df_an_table = pd.DataFrame(data=an_dict_list)\n df_an_table.index = df_an_table['MPnumber'].values\n df_an_table = df_an_table.sort_values(by='TransitUTC')\n return df_an_table", "def create_maps(self,data,tod,mjd,coords):\n features = np.log10(self.getFeatures(data))/np.log10(2)\n special_idx = np.where((features==16))[0]\n # This is for getting the stare data on more recent\n # calibration observations.\n point_data = self.get_point_data(data,special_idx)\n \n cel_maps = self.create_single_map(tod,\n coords['ra'],\n coords['dec'],\n self.source_positions['ra'][coords['sky_data_flag']],\n self.source_positions['dec'][coords['sky_data_flag']])\n az_maps = self.create_single_map(tod,\n coords['az'],\n coords['el'],\n self.source_positions['az'][coords['sky_data_flag']],\n self.source_positions['el'][coords['sky_data_flag']])\n cel_maps= self.average_maps(cel_maps)\n az_maps = self.average_maps(az_maps)\n xygrid = np.meshgrid((np.arange(self.Nx)+0.5)*self.dx - self.Nx*self.dx/2.,\n (np.arange(self.Ny)+0.5)*self.dy - self.Ny*self.dy/2.)\n \n \n cel_maps['xygrid']=xygrid\n cel_maps['StareCoords']= {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n az_maps['xygrid']=xygrid\n az_maps['StareCoords'] = {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n return cel_maps,az_maps", "def repeat(start, end, roi_times=None, timeres=2, coords=None, ar=None,\n split_temps=None, em_wlen=None, plotminmax=False, plotstd=False,\n hist_type='plain', loaddata=False):#, output=None):\n #if isinstance(output, str):\n # from matplotlib import use\n # use(output)\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n\n #loaddata = False\n\n print start, end\n start, end = parse(start), parse(end)\n \n s = []\n t = []\n p = []\n \n #if flares == []:\n # return s, t, p\n\n timerange = tr(start, end)\n delta = dt.timedelta(hours=timeres)\n ntimes = int(timerange.seconds()/delta.total_seconds())\n times = [time.start() for time in timerange.split(ntimes)]\n \n ntemps = 141\n tempsovertime = np.zeros((ntemps, ntimes))\n \n means = np.zeros(len(times))\n p95s = np.zeros(len(times))\n loopmeans = np.zeros(len(times))\n if plotminmax:\n maxes = np.zeros(len(times))\n mins = np.zeros(len(times))\n if plotstd:\n stds = np.zeros(len(times))\n loopstds = np.zeros(len(times))\n if em_wlen:\n meanem = np.zeros(len(times))\n if plotminmax:\n maxem = np.zeros(len(times))\n minem = np.zeros(len(times))\n if plotstd:\n stdem = np.zeros(len(times))\n\n for i, date in enumerate(times):\n data_only = True\n try:\n if ar == 'all':\n plotar = None\n else:\n plotar = ar\n results = output_maps(date, plotar, coords, 'data', split_temps,\n subimsize=50, calc_em=em_wlen, data_only=data_only)#True)#, linear=True)\n if isinstance(results, tuple):\n tempmap, emmap = results\n else:\n tempmap = results\n data = tempmap.data\n except DownloadError as de:\n data = np.zeros((512, 512))\n print de.msg\n except:\n print 'KHAAAAAAAN! Some part of the temperature-plotting process failed.'\n raise\n data = np.zeros((512, 512))\n if em_wlen:\n emmap = np.zeros((512, 512))\n \n t.append(np.nanmean(data))\n p.append(np.nanmax(data))\n \n data = data.flatten()\n data2 = data.copy()\n data2[data == 0.0] = np.NaN\n data2 = data2[np.isfinite(data)]\n data2.sort()\n temps, bins = np.histogram(data, bins=ntemps, density=False, range=(5.6, 7.0))\n temps = (temps/float(data.size))*100.0\n tempsovertime[:, i] = temps\n\n #loops = data[data >= split_temps]\n #data = data[data < split_temps]\n\n means[i] = np.nanmean(data2)\n try:\n p95s[i] = data2[round(0.95 * len(data2))-1]\n except IndexError:\n p95s[i] = np.NaN\n #loopmeans[i] = np.nanmean(loops)\n if plotminmax:\n maxes[i] = np.nanmax(data)\n mins[i] = np.nanmin(data)\n if em_wlen:\n maxem[i] = np.nanmax(emmap)\n minem[i] = np.nanmin(emmap)\n if plotstd:\n stds[i] = np.nanstd(data)\n if em_wlen:\n stdem[i] = np.nanstd(emmap)\n #loopstds[i] = np.nanstd(loops)\n \n tempsovertime[tempsovertime <= 0.1] = np.nan\n\n xmin, xmax = mdates.datestr2num([str(start), str(end)])\n fig = plt.figure(figsize=(36, 18))\n ax = fig.add_subplot(111, axisbg='k')\n plot_title = 'Temperature distribution of corona\\n{:%Y/%m/%d %H:%M} - {:%Y/%m/%d %H:%M}'.format(start, end)\n if roi_times:\n plot_title += '\\nRegion observed: {:%Y/%m/%d %H:%M} - {:%Y/%m/%d %H:%M}'.format(*roi_times)\n plt.title(plot_title)\n if hist_type == 'plain':\n plt.imshow(tempsovertime[30:106, :], extent=[xmin, xmax, 5.9, 6.65],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime[65:106, :]),\n vmax=np.nanmax(tempsovertime[65:106, :]))\n elif hist_type == 'loops':\n plt.imshow(tempsovertime[65:106, :], extent=[xmin, xmax, 6.25, 6.65],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime[65:106, :]),\n vmax=np.nanmax(tempsovertime[65:106, :]))\n elif hist_type == 'full':\n plt.imshow(tempsovertime, extent=[xmin, xmax, 5.6, 7.0],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime),\n vmax=np.nanmax(tempsovertime))\n plt.tight_layout()\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.colorbar(orientation='horizontal')\n plt.savefig('/media/huw/temp-time_hists/distribution_over_time_{}'.format(ar))\n plt.close()\n\n\n means[np.where(means == 0.0)] = np.nan\n if plotstd:\n stds[np.where(stds == 0.0)] = np.nan\n loopstds[loopstds == 0.0] = np.nan\n\n try:\n tnums = mdates.date2num([ti for ti in times])\n print maxes\n print len(maxes)\n fig = plt.figure(figsize=(18, 14))\n ax = fig.add_subplot(111)\n plt.title('Variation of temperature over time; AR{}'.format(ar), \n fontsize=32)\n plt.plot(tnums, maxes, label='Maximum temperature', color='red')\n plt.axhline(np.nanmean(maxes))\n print tnums\n print len(tnums)\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.legend(loc=4, fontsize=16)\n plt.xlabel('Date', fontsize=24)\n plt.ylabel('log(T)', fontsize=24)\n #plt.savefig('/media/huw/temp_plots/temp_plot_{}_b'.format(ar))\n plt.savefig('/home/drew/Dropbox/ARs/temps_{}_b'.format(ar))\n plt.close()\n\n \"\"\"diff = ((maxes-p95s)/p95s)*100.0\n fig = plt.figure(figsize=(18, 14))\n ax = fig.add_subplot(1, 1, 1)\n plt.title('Percentage difference between max and 95th %-ile; AR{}'.format(ar), \n fontsize=32)\n plt.plot(tnums, diff, color='black')\n plt.scatter(fldates, [np.nanmax(diff)]*len(fldates))\n for flare in flares:\n ax.text(sunpy.time.parse_time(flare['event_peaktime']), np.nanmax(diff)+0.01, flare['fl_goescls'][0])\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.xlabel('Date', fontsize=24)\n plt.ylabel('log(T)', fontsize=24)\n #plt.savefig('/media/huw/temp_plots/temp_plot_{}'.format(ar))\n plt.savefig('Dropbox/ARs/diffs_{}'.format(ar))\n plt.close()\"\"\"\n \n except:# ValueError:\n print \"Can't plot the temperature graph because matplotlib is being a whiney douche\"\n print tnums\n raise\n\n return s, t, p, times" ]
[ "0.62284875", "0.5733199", "0.54121506", "0.53654045", "0.5362718", "0.53480995", "0.53380924", "0.5298414", "0.5262806", "0.52612984", "0.52312875", "0.5173023", "0.5139651", "0.5129809", "0.5126058", "0.5113684", "0.51000977", "0.5095595", "0.5085868", "0.5080458", "0.5077105", "0.5057523", "0.50567466", "0.5055007", "0.505499", "0.50542855", "0.50395733", "0.5031074", "0.5021808", "0.50195307" ]
0.59192055
1
Create a text string summarizing how the motion correction was done.
def SummarizeMotionTargets(self): text = '\nSummary of motion-correction: \n' for epi in self.entry_map['epi']: info = self.info[epi] text += self.GetBase(epi, '') base = self.GetBase(info['base_entry'], '') text += ' ->3dvolreg-> %s[%s]' % (base, info['base']) if info['fmap_entry'] is not None: fmap = info['fmap_entry'] text += ' ->assume-registered-> %s' % self.GetBase(fmap, '') anat = self.info[fmap]['anat_ref'] if info['catmats']: text += ' ->3dAllineate-> %s' % \ self.GetBase(anat, '') else: text += ' ->assume-registered-> %s' % self.GetBase(anat, '') else: anat = info['anat_tgt'] text += ' ->assume-registered-> %s' % self.GetBase(anat, '') text += '\nEPIs should be in register with %s\n' % \ self.GetBase(self.anatomical, '') return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def getDebugText(self):\n timeDifference = time.time() - self.time_created\n hours = math.floor(timeDifference / 3600)\n minutes = math.floor((timeDifference % 3600) / 60)\n seconds = math.floor(timeDifference % 3600 % 60)\n\n output = \"\\n\" * 50\n output += \"Time started: %s\\n\" % time.ctime(self.time_created)\n output += \"Time now: %s\\n\" % time.ctime()\n output += \"Time elapsed: %02d:%02d:%02d\\n\" % (hours, minutes, seconds)\n output += (\"=\" * 80) + \"\\n\"\n output += \"Health potions used: %d\\n\" % self.hp_pots_used\n output += \"Health potions per hour: %d\\n\" % (self.hp_pots_used / (\n timeDifference / 3600))\n output += \"Mana potions used: %d\\n\" % self.mana_pots_used\n output += \"Mana potions per hour: %d\\n\" % (self.mana_pots_used / (\n timeDifference / 3600))\n return output", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def get_description(self):\n text = \"is a student's t distribution; characterised by its degrees of freedom, which here is\"+str(self.dofs)+\".\"\n return text", "def summarize(self):\n txtSumm = ''\n\n if self.legtype == 'Takeoff':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Landing':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Other':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Observing':\n txtSumm = \"%02d -- %s, RA: %s, Dec: %s, LegDur: %s, ObsDur: %s\" %\\\n (self.legno, self.target, self.ra, self.dec,\n str(self.duration),\n str(self.obsdur))\n txtSumm += \"\\n\"\n if self.nonsid is True:\n txtSumm += \"NONSIDERIAL TARGET -- NAIFID: %d\" % (self.naifid)\n txtSumm += \"\\n\"\n txtSumm += \"(The SOFIA project sincerely hopes you enjoy \"\n txtSumm += \"your observing breaks due to XFORMS crashes)\"\n txtSumm += \"\\n\"\n txtSumm += \"ObsPlan: %s, ObsBlk: %s\" % (self.obsplan, self.obsblk)\n txtSumm += \"\\n\\n\"\n txtSumm += \"Elevation Range: %.1f, %.1f\" % (self.range_elev[0],\n self.range_elev[1])\n txtSumm += \"\\n\\n\"\n txtSumm += \"ROF Range: %.1f, %.1f\" % (self.range_rof[0],\n self.range_rof[1])\n txtSumm += \"\\n\"\n txtSumm += \"ROF Rate Range: %.1f, %.1f %s\" % (self.range_rofrt[0],\n self.range_rofrt[1],\n self.range_rofrtu)\n txtSumm += \"\\n\\n\"\n txtSumm += \"True Heading Range: %.1f, %.1f\" % (self.range_thdg[0],\n self.range_thdg[1])\n txtSumm += \"\\n\"\n txtSumm += \"True Heading Rate Range: %.1f, %.1f %s\" %\\\n (self.range_thdgrt[0],\n self.range_thdgrt[1],\n self.range_thdgrtu)\n txtSumm += \"\\n\"\n txtSumm += \"Moon Angle: %.1f, Moon Illumination: %s\" %\\\n (self.moonangle, self.moonillum)\n\n return txtSumm", "def __str__(self):\n return f'{self.text}: {self.chs}, correct answer: {self.solution}'", "def description_text(self, P=None):\n\n if not P:\n P = self.parameters.values_to_dict()\n\n text = \"Calculated with {hamiltonian}, converged to \"\n # Convergence\n if P[\"convergence\"] == \"normal\":\n text += \"the 'normal' level of 1.0e-04 kcal/mol.\"\n elif P[\"convergence\"] == \"precise\":\n text += \"the 'precise' level of 1.0e-06 kcal/mol.\"\n elif P[\"convergence\"] == \"relative\":\n text += \"a factor of {relative} times the normal criterion.\"\n elif P[\"convergence\"] == \"absolute\":\n text += \"converged to {absolute}.\"\n\n if self.parameters[\"uhf\"].is_expr:\n text += (\n \" Whether to use spin-unrestricted SCF (UHF) for closed-shell molecules\"\n \"will be determined by '{uhf}'.\"\n )\n elif self.parameters[\"uhf\"].get():\n text += \" The SCF will be spin-unrestricted (UHF) for all molecules.\"\n else:\n text += (\n \" The SCF will be restricted for closed-shell molecules (RHF) and \"\n \"spin-unrestricted (UHF) for all others.\"\n )\n\n # MOZYME localized molecular orbitals.\n if P[\"MOZYME\"] == \"always\":\n text += (\n \"\\n\\nThe SCF will be solved using localized molecular orbitals \"\n \"(MOZYME), which is faster than the traditional method for larger \"\n \"systems.\"\n )\n used_mozyme = True\n elif P[\"MOZYME\"] == \"for larger systems\":\n text += (\n \"\\n\\nThe SCF will be solved using localized molecular orbitals \"\n \"(MOZYME) for systems with {nMOZYME} atoms or more. This method is \"\n \"faster than the traditional method for larger systems.\"\n )\n used_mozyme = True\n else:\n used_mozyme = False\n\n if used_mozyme:\n follow_up = P[\"MOZYME follow-up\"]\n if \"exact\" in follow_up:\n text += (\n \" The energy given by MOZYME slowly accumulates error due to the \"\n \"increasing non-orthogonality of the localized orbitals after \"\n \"many iterations. A single point energy using the traditional \"\n \"method will be run to get the correct energy.\"\n )\n elif \"new\" in follow_up:\n text += (\n \" The energy given by MOZYME slowly accumulates error due to the \"\n \"increasing non-orthogonality of the localized orbitals after \"\n \"many iterations. A single point energy using fresh localized \"\n \"orbitals will be run to get the correct energy.\"\n )\n elif follow_up == \"none\":\n text += (\n \" The energy given by MOZYME slowly accumulates error due to the \"\n \"increasing non-orthogonality of the localized orbitals after \"\n \"many iterations. No follow-up calculation will be done, so be \"\n \"careful with the final energies produced.\"\n )\n used_mozyme = False\n else:\n logger.error(f\"Don't recognize the MOZYME follow-up: '{follow_up}'\")\n\n # Handle COSMO\n if self.parameters[\"COSMO\"].is_expr:\n text += (\n \"\\n\\n'{COSMO}' will determine whether to use the COSMO solvation \"\n \"model. If it is used the parameters will be \"\n )\n elif self.parameters[\"COSMO\"].get():\n text += \"\\n\\nThe COSMO solvation model will be used with \"\n\n if self.parameters[\"COSMO\"].is_expr or self.parameters[\"COSMO\"].get():\n text += (\n \"dielectric constant = {eps}, solvent radius = {rsolve}, \"\n \"{nspa} grid points per atom, and a cutoff of {disex}.\"\n )\n\n # And bond orders\n if P[\"bond orders\"] == \"yes\":\n text += \"\\n\\nThe bond orders will be calculated.\"\n elif P[\"bond orders\"] == \"yes, and apply to structure\":\n text += (\n \"\\n\\nThe bond orders will be calculated and used to set the bonding \"\n \"for the structure.\"\n )\n\n return self.header + \"\\n\" + __(text, **P, indent=4 * \" \").__str__()", "def summarize(self):\n txtStr = \"%s to %s, %d flight legs.\" %\\\n (self.origin, self.destination, self.nlegs)\n txtStr += \"\\nTakeoff at %s\\nLanding at %s\\n\" %\\\n (self.takeoff, self.landing)\n txtStr += \"Flight duration of %s including %s observing time\" %\\\n (str(self.flighttime), self.obstime)\n\n return txtStr", "def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)", "def create_analysis(self):\n text = self.input_main.get(\"1.0\", \"end-1c\")\n if not text:\n return \"\"\n if self.ignore_case_value.get():\n text = text.lower()\n\n char_map = calc.char_mapping(text)\n unique_chars = len(char_map)\n entropy = calc.entropy(text)\n metric_entropy = calc.metric_entropy(text)\n optimal = calc.optimal_bits(text)\n\n info = \"\"\"Length: {}\nUnique chars: {}\nEntropy: {}\nMetric entropy: {}\nOptimal bit usage: {}\"\"\".format(\n len(text),\n unique_chars,\n entropy,\n metric_entropy,\n optimal\n )\n\n table_head = \" Char | Probability | Bits | Occurrences \"\n table_body = \"\\n\".join(\n [\n \" {:<4} | {:>11.7f} | {:>11.7f} | {:>11}\".format(\n char,\n prob, calc.prob_to_info(prob),\n text.count(char)\n )\n for char, prob in char_map\n ]\n )\n table = \"\\n\".join([table_head, table_body])\n\n return \"\\n\\n\".join([info, table])", "def get_summary(self):\n \n text = \"word: {}, total_score: {} \\n\".format(self.clue, self.total_score)\n for card, score in self.sorted_card_score_pairs:\n card_text = \"\\t card.name:{} (team:{}), similarity: {} \\n\".format(card.name, card.color, score)\n text += card_text\n return text", "def getOutput(self):\n text = \"\"\n text += \"*\"*self.getLevel() + \" \"\n if self.isTODO():\n text += \"TODO \"\n if self.isDONE():\n text += \"DONE \"\n text += self.getTitle()\n return text", "def prepareExplainerText(amount, ranges):\n text = \"\\n\"\n for currKey in amount:\n text += f\"{currKey}: {ranges[currKey]} | {amount[currKey]}\\n\"\n text += \"\\n\\n\"\n return text", "def result(self):\n return (\"MRR@\" + str(self.length) + \": \"), (self.pos / self.test)", "def summary_string(self) -> str:", "def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"", "def reconcile_output(self):\n final_string = \"\"\n final_string += \"TEXT: {0}\\n\".format(self._text)\n final_string += \"ID: {0}\\n\".format(self._index)\n final_string += \"Count: {0}\\n\".format(self._count)\n\n final_string += \"=Doc Count Begin=\\n\"\n for doc in list(self._doc_count.keys()):\n final_string += \"{0} $!$ {1}\\n\".format(doc, self._doc_count[doc])\n final_string += \"=Doc Count End=\\n\"\n\n final_string += \"=CEs Begin=\\n\"\n for doc in list(self.word_pairs.keys()):\n for ce in set(self.word_pairs[doc]):\n #format: doc $!$ ce \n final_string += \"{0} $!$ {1} $!$ {2}\\n\".format(doc, ce, self.word_pairs[doc].count(ce))\n final_string += \"=CEs End=\\n\"\n\n final_string += \"=Self Tags Begin=\\n\"\n for source in list(self.this_semantic_tags.keys()):\n for tag in set(self.this_semantic_tags[source]):\n final_string += \"{0} $!$ {1} $!$ {2}\\n\".format(source, tag, self.this_semantic_tags[source].count(tag))\n final_string += \"=Self Tags End=\\n\"\n\n final_string += \"=Semantic Begin=\\n\"\n for source in list(self.lexico_semantic.keys()):\n #for some reason, I had each semantic class getting assigned the\n #same overall count?\n #sem_counts = self.getLexicoSemanticCounts(source)\n for ce in self.lexico_semantic[source]:\n for sem_cls in set(self.lexico_semantic[source][ce]):\n c = self.lexico_semantic[source][ce].count(sem_cls)\n final_string += \"{0} $!$ {1} $!$ {2} $!$ {3}\\n\".format(source, ce, sem_cls, c)\n final_string += \"=Semantic End=\\n\"\n final_string += \"$!$\\n\"\n return final_string", "def _get_delta_text_string(self):\n textstring = \"\"\n if (\n self.is_commit_test is True\n ): # include commits if this is an analysis of commit history\n # Write SHA1 commits under examination\n if len(self.delta_fp_string_dict.delta_dict[\"commits\"]) > 0:\n textstring += (\n os.linesep + \"Commit history SHA1 for this analysis:\" + os.linesep\n )\n for sha1_commit in self.delta_fp_string_dict.delta_dict[\"commits\"]:\n textstring += \" \" + sha1_commit + os.linesep\n textstring += os.linesep\n elif (\n self.is_branch_test is True\n ): # include branches if this is a branch v branch analysis\n if len(self.delta_fp_string_dict.delta_dict[\"branches\"]) > 0:\n textstring += os.linesep + \"Branches under analysis:\" + os.linesep\n for branch in self.delta_fp_string_dict.delta_dict[\"branches\"]:\n textstring += \" \" + branch + os.linesep\n textstring += os.linesep\n\n # include added files\n if len(self.delta_fp_string_dict.delta_dict[\"added\"]) > 0:\n for added_file in self.delta_fp_string_dict.delta_dict[\"added\"]:\n add_append_string = \"[A]:\" + added_file + os.linesep\n textstring += add_append_string\n # include deleted files\n if len(self.delta_fp_string_dict.delta_dict[\"deleted\"]) > 0:\n for deleted_file in self.delta_fp_string_dict.delta_dict[\"deleted\"]:\n del_append_string = \"[D]:\" + deleted_file + os.linesep\n textstring += del_append_string\n # include modified files\n if len(self.delta_fp_string_dict.delta_dict[\"modified\"]) > 0:\n for modified_file in self.delta_fp_string_dict.delta_dict[\"modified\"]:\n mod_append_string = \"[M]:\" + modified_file + os.linesep\n textstring += mod_append_string\n\n return textstring", "def get_text(self):\n text_complet = \"\"\n rez_dict = self.__results\n for i in range(0, len(rez_dict[\"text\"])):\n text = rez_dict[\"text\"][i]\n conf = int(rez_dict[\"conf\"][i])\n if conf > self.__min_confidence:\n text_complet += text + \" \"\n return text_complet", "def text(cfg, phase, high=6):\n short = cfg[\"fake\"].sentence(\n nb_words=high, variable_nb_words=True, ext_word_list=None\n )\n return \"{} {}\\n\\n{}\".format(\" \".join(cfg[phase]), short, blurb(cfg))", "def _create_formatted_string(self):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string", "def get_str_metadata(self):\n return \"\\n\".join([\"Guessed by {}\".format(self.guessed_by), \"{} metaphors used\".format(self.metaphors_used)])", "def create_report(self):\n text_string = \"\\n\"\n text_string += \"Donor Name | Total Given | Num Gifts |\\\n Average Gift\\n\\\n ------------------------------------------------------------------\\n\"\n for donor in sorted(self.donors, key=Donor.sort_key, reverse=True):\n text_string += f\"{donor.name:<26} $ {donor.total_donations:>11.2f}\\\n {donor.number_of_donations:>10} $ {donor.average_gift:>11.2f}\\n\"\n\n return text_string", "def gen_analysis_text(num_data, usage_flag, labelled_landmark, landmark_name, error_summary):\n analysis_text = r'<p style=\"color:red;\">Basic information:</p>'\n analysis_text += '<p style=\"color:black;\">Landmark name: {0}.</p>'.format(landmark_name)\n analysis_text += '<p style=\"color:black;\"># cases in total: {0}.</p>'.format(num_data)\n labelled_landmarks_stat = get_landmarks_stat(labelled_landmark)\n \n analysis_text += r'<p style=\"color:black;\"># cases having this landmark (Pos. cases): {0}.</p>'.format(\n len(labelled_landmarks_stat[landmark_name]['pos']))\n analysis_text += r'<p style=\"color:black;\"># cases missing this landmark (Neg. cases): {}.</p>'.format(\n len(labelled_landmarks_stat[landmark_name]['neg']))\n if len(labelled_landmarks_stat[landmark_name]['neg']) > 0:\n missing_cases = copy.deepcopy(labelled_landmarks_stat[landmark_name]['neg'])\n missing_cases.sort()\n analysis_text += r'{}'.format(missing_cases)\n\n if usage_flag == 2:\n tp_cases = error_summary.tp_cases[landmark_name]\n tn_cases = error_summary.tn_cases[landmark_name]\n fp_cases = error_summary.fp_cases[landmark_name]\n fn_cases = error_summary.fn_cases[landmark_name]\n num_pos_cases = len(tp_cases) + len(fn_cases)\n num_neg_cases = len(tn_cases) + len(fp_cases)\n # compute TPR, TNR, FPR, FNR\n TPR = len(tp_cases) / max(1, num_pos_cases) * 100 \\\n if len(tp_cases) != 0 or num_pos_cases != 0 else 100\n TNR = len(tn_cases) / max(1, num_neg_cases) * 100 \\\n if len(tn_cases) != 0 or num_neg_cases != 0 else 100\n FPR = 100 - TNR\n FNR = 100 - TPR\n mean_error = error_summary.mean_error_tp[landmark_name]\n std_error = error_summary.std_error_tp[landmark_name]\n median_error = error_summary.median_error_tp[landmark_name]\n max_error = error_summary.max_error_tp[landmark_name]\n analysis_text += r'<p style=\"color:red;\"> Landmark classification error: </p>'\n analysis_text += r'<p style=\"color:black;\">TP (TPR): {0} ({1:.2f}%)</p>'.format(\n len(tp_cases), TPR)\n analysis_text += r'<p style=\"color:black;\">TN (TNR): {0} ({1:.2f}%)</p>'.format(\n len(tn_cases), TNR)\n analysis_text += r'<p style=\"color:black;\">FP (FPR): {0} ({1:.2f}%)</p>'.format(\n len(fp_cases), FPR)\n analysis_text += r'<p style=\"color:black;\">FN (FNR): {0} ({1:.2f}%)</p>'.format(\n len(fn_cases), FNR)\n analysis_text += r'<p style=\"color:red;\"> Landmark distance error for the {} TP cases (unit: mm): </p>'.format(\n len(tp_cases))\n analysis_text += r'<p style=\"color:black;\">mean (std): {0:.2f} ({1:.2f})</p>'.format(\n mean_error, std_error)\n analysis_text += r'<p style=\"color:black;\">median: {0:.2f}</p>'.format(median_error)\n analysis_text += r'<p style=\"color:black;\">max: {0:.2f}</p>'.format(max_error)\n\n return analysis_text", "def get_intro_message() -> str:\n return \"\"\"You are about to begin a new record.\nType the text sample you want to record.\nThis first sample MUST be typed by the real user (no impostor data).\"\"\"", "def description(self) -> str:\r\n descrip = 'The player must aim to put the most possible units of a ' \\\r\n 'given colour c on the outer perimeter of ' \\\r\n 'the board. The ' \\\r\n 'player’s score is the total number of unit cells ' \\\r\n 'of colour ' \\\r\n 'c that are on the perimeter. There is a ' \\\r\n 'premium on corner ' \\\r\n 'cells: they count twice towards the score. '\r\n return descrip", "def add_details(self):\n\n if self.co.algorithm == \"vv\":\n algo = \"Verlocity Verlot\"\n if self.co.algorithm == \"rk4o\":\n algo = \"Runge Kutta Forth Order\"\n if self.co.algorithm == \"herm\":\n algo = \"Hermite Fourth Order\"\n\n self.algorithm_title = self.ax.text(\n 1.01, 0.65, \"Algorithm:\", transform=self.ax.transAxes\n )\n self.algorithm_text = self.ax.text(\n 1.01, 0.58, algo, transform=self.ax.transAxes\n )\n self.timestep_text = self.ax.text(\n 1.01, 0.51, \"dt =\" + str(self.co.tstep), transform=self.ax.transAxes\n )\n self.length_softening_distance = self.ax.text(\n 1.01,\n 0.44,\n r\"$\\epsilon$ = \" + str(self.co.epsilon),\n transform=self.ax.transAxes,\n )", "def text_report(self):\n\n word_count = self.word_count()\n\n print(\"\\nThere are {} words in the text.\".format(word_count))\n mean, median, mode = self.average_word_length()\n\n print(\"\\nMean, median and mode word length is {}, {}, {}.\".format(mean, median, mode))\n\n if word_count < 10:\n print(\"\\nLongest words:\")\n else:\n print(\"\\n10 longest words:\")\n for s in self.longest_words():\n print(s)\n\n print(\"\\nMost common words:\")\n for s in self.common_words():\n print(\"{} x {}\".format(s[1], s[0]))\n\n longest_grams = []\n\n # find n_longest n-grams\n n_longest = 10\n # strongly doubt that there will be n-grams longer than 50\n for i in range(min(50, word_count), 1, -1):\n if len(longest_grams) >= n_longest:\n break\n grams = self.find_ngrams(i)\n grams_list = sorted(grams, key=grams.get, reverse=True)\n\n for g in grams_list:\n if grams[g] > 4:\n # do not want to include n-grams which are substrings of longer n-grams\n substring = False\n for s in longest_grams:\n if g in s[1]:\n substring = True\n break\n if not substring:\n longest_grams.append([grams[g], g])\n\n print(\"\\nLongest n-grams:\")\n for g in longest_grams:\n print(\"{} x {}\".format(g[0], g[1]))\n print('\\n')", "def GetDescription(cls):\n return textwrap.dedent('''\n This trace step includes a diagram of the Ego long. acceleration in the report.\n ''').strip()", "def __str__(self):\n return \"{}\\n{}\\n{}\\n{}\".format(self.header,self.sequence,self.line3,self.quality)" ]
[ "0.6384936", "0.634679", "0.63305056", "0.63101876", "0.62891483", "0.62351215", "0.62246543", "0.6216952", "0.61375284", "0.6118015", "0.6031679", "0.597089", "0.59577096", "0.59560966", "0.5955112", "0.59385556", "0.5877444", "0.58648694", "0.58440596", "0.58404136", "0.58381337", "0.5812276", "0.58087564", "0.5801023", "0.57973653", "0.57815874", "0.5734633", "0.57278526", "0.57041997", "0.56971264" ]
0.69408065
0
Find the correct ref.dat file for each pfile.
def _GetRefdat(self): for rfile in self.refdats.keys(): # Get times for ref.dat files with a time-stamp. words = rfile.replace('.','_').split('_') if len(words) == 6 and words[-2].count(':') == 20: # This file was time-stamped by the sequence. Get the # date and time. file name format: # ref_Sep_9_2007_11:28:32.dat rtime[rfile] = hms_to_secs(words[-2]) for pfile in self.pfiles: min_difftime = 1.e20 self.info[pfile]['refdat'] = None for rfile in self.refdats.keys(): if rfile[:3] == 'ref' and 'dat' in rfile: # This is a reference data file. First see if the orientation is # appended. If the file has neither a time-stamp nor a plane and # there is more than one ref.dat, the epi reconstruction will # be aborted. rinfo = {} ref_file = None if 'sag' in rfile and self.info[pfile]['plane'] == 'sagittal': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif 'cor' in rfile and self.info[pfile]['plane'] == 'coronal': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif 'axial' in rfile and self.info[pfile]['plane'] == 'axial': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif len(self.refdats.keys()) == 1: # Use the only one if that is all there is. ref_file = rfile epi_time = hms_to_secs(self.info[pfile]['acqtime'].split()[-2]) if epi_time - rtime[rfile] < min_difftime and \ rftime[rfile] > epi_time: # Use the reference file that acquired nearest to the EPI # but before it. min_difftime = epi_time - rtime[rfile] # self.info[pfile]['refdat'] = rfile ref_file = rfile if ref_file: # Found a candidate. if not self.info[pfile]['refdat']: # Haven't found one yet, use it. self.info[pfile]['refdat'] = ref_file else: # Found two. Choose one in the same directory. oldpath = os.path.dirname(self.info[pfile]['refdat']) newpath = os.path.dirname(ref_file) pfile_path = os.path.dirname(pfile) if oldpath == newpath: # Same path, use the old one. self.info[pfile]['refdat'] = ref_file elif newpath == pfile_path: self.info[pfile]['refdat'] = ref_file # else Do nothing, use existing choice. elif not os.path.exists(rfile): self.info[pfile]['refdat'] = None elif os.stat(rfile).st_size > 0: # This path is taken if no info is encoded in the file name. # Don't use empty ref.dat files. self.info[pfile]['refdat'] = rfile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}", "def get_input_file(self, *args, refsep='$', docopy=True):\n # filename = self.get_data(*args, docopy=docopy)\n filename = args[1]\n ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE',\n 'DGEOFILE', 'MDRIZTAB'])\n print(\"Looking for REF_FILES: {}\".format(ref_files))\n\n for ref_file in ref_files:\n if ref_file.strip() == '':\n continue\n if refsep not in ref_file: # Local file\n refname = self.get_data('customRef', ref_file)\n else: # Download from FTP, if applicable\n refname = os.path.join(ref_file)\n if self.use_ftp_crds:\n download_crds(refname, self.timeout)\n return filename", "def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs", "def _find_ref_fname(fname, ref_fname):\n curr_dir = \"\"\n next_dir = os.path.dirname(os.path.abspath(fname))\n while next_dir != curr_dir:\n curr_dir = next_dir\n rcfile = os.path.join(curr_dir, ref_fname)\n if os.path.exists(rcfile):\n return rcfile\n next_dir = os.path.dirname(curr_dir)\n return \"\"", "def step040():\n logger.logMessage('Begin: matching work files')\n sKey = ''\n mKey = ''\n def readFile(f):\n line = f.readline().rstrip()\n if line == '':\n key = 'ZZZZZZZZZZZZZZZZZZZZZZZZZ'\n return None,key\n else:\n sp = line.split(';')\n key = '{0:25s}'.format(sp[1])[0:19]\n return sp,key\n\n m = open(dbDumpFile,'r')\n s = open(sortedCandidatesFile,'r')\n numrecs = 0\n with open(matchFile,'w') as match:\n mFields,mKey = readFile(m)\n sFields,sKey = readFile(s)\n while mFields != None or sFields != None:\n if sKey == mKey:\n match.write('{0:014d};{1:25s};{2:32s};{3:31s}\\n'.format(int(mFields[0]),mKey,sFields[2],sFields[3]))\n numrecs += 1\n if numrecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records matched\".format(numrecs))\n sFields,sKey = readFile(s)\n mFields,mKey = readFile(m)\n elif sKey < mKey:\n sFields,sKey = readFile(s)\n else:\n logger.logMessage(level='WARNING',message='Record not matched: {0}'.format(mFields))\n mFields,mKey = readFile(m)\n logger.logMessage(\"Total matched: {0:d}\".format(numrecs))\n\n m.close()\n s.close()\n logger.logMessage('End : matching work files')", "def find_FEfiles(self):\n \n\t\t# take each item in the list of files to eve.\n for i in self.files_to_find:\n filename1 = i[0] \n file1_dye = i[1]\n filename2 = i[2]\n file2_dye = i[3]\n out_file_prefix=i[4]\n\n # set filename as none to help identify when no match has been found below\n file1_filename = None\n file2_filename = None\n\n # search for a FE file which matches the filename pattern\n for afile in os.listdir(self.chosenfolder):\n # file 1\n if fnmatch.fnmatch(afile, filename1):\n file1_filename = afile\n \n # file 2\n if fnmatch.fnmatch(afile, filename2):\n file2_filename = afile\n \n # if both files have been identified add this to a new list\n if file1_filename and file2_filename:\n self.list_of_files.append((file1_filename, file1_dye, file2_filename, file2_dye,out_file_prefix))\n\t\t\t# if either file could not be identified report this.\n else:\n raise ValueError(\"no match for \" + filename1 + \" and \" + filename2)", "def main():\n \n lookupslocation = 'C:\\\\Users\\\\gwilliams\\\\Desktop\\\\Python Experiments\\\\work projects\\\\FaresIndexSourceData\\\\regulated_fares_data\\\\'\n destination = 'C:\\\\Users\\\\gwilliams\\\\Desktop\\\\Python Experiments\\\\work projects\\\\FaresIndexSourceData\\\\regulated_fares_data\\\\comparison output\\\\'\n lookupfileslist, count = getdata(lookupslocation)\n\n print(f\"there are {count} files found.\")\n\n newlookup = lookupfileslist[0]\n oldlookup = lookupfileslist[1]\n\n #join new to old // old to new\n new_uniquevalues = pd.merge(left=newlookup,right=oldlookup,how='left',\n left_on=['orig','dest','route','ticket'],right_on=['orig','dest','route','ticket'])\n\n old_uniquevalues = pd.merge(left=newlookup,right=oldlookup,how='right',\n left_on=['orig','dest','route','ticket'],right_on=['orig','dest','route','ticket'])\n\n print(\"These are values unique to new lookup\") \n new_uniquevalues = new_uniquevalues[new_uniquevalues.ticketa.isnull()==True]\n exportfile(new_uniquevalues,destination,'unique_new_values',1)\n\n print(\"These are values unique to old lookup\")\n old_uniquevalues = old_uniquevalues[old_uniquevalues.new_flag.isnull()==True]\n exportfile(old_uniquevalues,destination,'unique_old_values',1)", "def readdata(self, reflist , comment = '#' , regexp = None , substr = None, filename = True):\n self.kpunten = []\n datalist = []\n prefixlist = []\n if os.path.isfile(str(reflist)):\n reflist = [reflist] #if we work with only one file this wraps it automatically in right format\n for ref in reflist:\n print('start with the collection of data from file %s' %ref)\n plotf = open(ref, 'r')\n if not filename:\n prefixlist.append( os.path.dirname(ref) + '/')\n else:\n prefixlist.append(re.sub('\\.dat$' , '' , ref))\n try:\n if regexp != None:\n raise ValueError\n dataf = np.loadtxt(plotf,comments = comment)\n print 'we readed data in with np.loadtxt'\n except:\n print('reading in data with numpy loadtxt failed or use reg exp to extract information')\n dataf = np.array([])\n kpuntenf = []\n plotf.seek(0) #go back to beginning of file\n for line in plotf:\n if regexp is not None:\n analyse = re.search(regexp,line)\n if analyse:\n kpuntenf.append((analyse.group(1), len(dataf)-1 ))\n print 'we found the following matches: %s' % analyse.group(0)\n if substr != None: \n line = re.sub(substr, '' , line)\n if line[0] != comment:\n #print line\n pline = np.array(map(float,line.split()))\n if len(dataf) <= 1:\n dataf = pline\n else:\n try:\n dataf = np.vstack((dataf,pline))\n except:\n continue\n self.kpunten.append(kpuntenf)\n datalist.append(dataf)\n\n plotf.close()\n self.datarg = datalist\n self.prefix = prefixlist\n self.reader = dr.ReaderOutput(reflist[0]) #Some plotting functions need a bit more information this info is extracted from the header of the files\n self.reader.depvar['depvar'] += ' (a.u.)'", "def merge_tables():\r\n filename = \"ppxf_results_best.dat\"\r\n s1 = np.genfromtxt(filename, usecols=(0,), dtype=None).tolist()\r\n sref = s1[:]\r\n sref.sort()\r\n x, y = get_positions(sref).T\r\n r = np.sqrt(x * x + y * y)\r\n pa = np.rad2deg(np.arctan2(x, y))\r\n pa[pa < 0.] += 360.\r\n data1 = np.loadtxt(filename, usecols=np.arange(1, 11))\r\n ##########################################################################\r\n # Account for difference in resolution\r\n # Not used anymore because the resolution is now matched in pPXF\r\n # fwhm_dif = (2.5 - 2.1) * c / 5500. / 2.3548\r\n # data1[:,2] = np.sqrt(data1[:,2]**2 - fwhm_dif**2)\r\n ##########################################################################\r\n data1 = match_data(s1, sref, data1)\r\n results = np.column_stack((sref, x, y, r, pa, data1))\r\n header = ['FILE', \"X[kpc]\", \"Y[kpc]\",\r\n \"R[kpc]\", \"PA\",\r\n 'V', 'dV', 'S', 'dS', 'h3', 'dh3',\r\n 'h4', 'dh4', 'chi/DOF', 'S/N']\r\n with open(outtable, \"w\") as f:\r\n for i, field in enumerate(header):\r\n print \"# {0} : {1}\\n\".format(i, field)\r\n f.write(\"# {0} : {1}\\n\".format(i, field))\r\n np.savetxt(f, results, fmt=\"%s\")\r\n return", "def FindFile(self, fd):\n hashes = self._HashFile(fd)\n\n urns_to_check = []\n\n for hash_type, hash_digest in hashes.ListFields():\n hash_digest = str(hash_digest)\n hash_type = hash_type.name\n\n fingerprint_type = \"generic\"\n if hash_type.startswith(\"pecoff_\"):\n fingerprint_type = \"pecoff\"\n hash_type = hash_type[len(\"pecoff_\"):]\n if hash_type not in self.HASH_TYPES[fingerprint_type]:\n continue\n\n file_store_urn = self.PATH.Add(fingerprint_type).Add(\n hash_type).Add(hash_digest)\n\n urns_to_check.append(file_store_urn)\n\n return [data[\"urn\"] for data in aff4.FACTORY.Stat(urns_to_check,\n token=self.token)]", "def phys_match():\n # Get list of physiological files\n ppg_files = glob(phys_dir+'PPGData*')\n resp_files = glob(phys_dir+'RESPData*')\n ecg_files = glob(phys_dir+'ECG2Data*')\n # Match to runs\n for rn in dcm_dict.keys():\n # Initiate dictionary entries\n dcm_dict[rn]['ppg_file'] = 'File missing'\n dcm_dict[rn]['resp_file'] = 'File missing'\n dcm_dict[rn]['ecg_file'] = 'File missing'\n # Match time stamp\n # Using only hour and minute due to second mismatch\n # Need to fix\n time_stamp = dcm_dict[rn]['end_time'].strftime('%m%d%Y%H_%M')\n for ppg in ppg_files:\n if time_stamp in ppg:\n dcm_dict[rn]['ppg_file'] = ppg\n for resp in resp_files:\n if time_stamp in resp:\n dcm_dict[rn]['resp_file'] = resp\n for ecg in ecg_files:\n if time_stamp in resp:\n dcm_dict[rn]['ecg_file'] = ecg", "def _get_refpaths(data_dict, reference_file_types, observatory):\n if not reference_file_types: # [] interpreted as *all types*.\n return {}\n with crds_cache_locking.get_cache_lock():\n bestrefs = crds.getreferences(\n data_dict, reftypes=reference_file_types, observatory=observatory)\n refpaths = {filetype: filepath if \"N/A\" not in filepath.upper() else \"N/A\"\n for (filetype, filepath) in bestrefs.items()}\n return refpaths", "def match(p_file, s_file, matched_p_file, matched_s_file):\n\n log.info('Matching p and s arrivals')\n\n p_arr = pd.read_csv(p_file, header=None, names=column_names, sep=' ')\n s_arr = pd.read_csv(s_file, header=None, names=column_names, sep=' ')\n\n blocks = pd.merge(p_arr[['source_block', 'station_block']],\n s_arr[['source_block', 'station_block']],\n how='inner',\n on=['source_block', 'station_block'])\n matched_P = pd.merge(p_arr, blocks, how='inner',\n on=['source_block', 'station_block'])[column_names]\n matched_S = pd.merge(s_arr, blocks, how='inner',\n on=['source_block', 'station_block'])[column_names]\n matched_P.to_csv(matched_p_file, index=False, header=False, sep=' ')\n matched_S.to_csv(matched_s_file, index=False, header=False, sep=' ')", "def find_reference_files():\n for root, _, files in os.walk(\"./tests/references/\"):\n for basename in fnmatch.filter(files, \"*.tex\"):\n yield os.path.join(root, basename)", "def _parse_reference_fofn( reference_fofn, target_locus ):\n log.info(\"Parsing reference sequence FOFN\")\n with open( reference_fofn, 'r' ) as handle:\n for line in handle:\n filename, locus = line.strip().split()\n print locus, target_locus\n if locus == target_locus:\n return filename\n msg = 'No fasta file for target locus found!'\n log.error( msg )\n raise ValueError( msg )", "def get_tool_version_files():\n similar_files = defaultdict(list)\n for path in Runtime_Datasets.RAW_FILE_PATHS:\n filename = get_file_name(path)\n filename = filename.rsplit('_', 1)[0]\n similar_files[filename].append(path)\n\n Runtime_Datasets.RAW_FILE_PATHS = similar_files", "def get_nsite_DMRfind(inputf,output,samples,path_to_allc=\"\",mc_type=[\"C\"],num_procs=1,use_mc_status=True,min_cov=0):\n #dictionary of sample_name -> file handle\n allc_files = {}\n allc_lines = {}\n allc_fields = {}\n allc_prevbyte = {} #sample_name -> prevbyte (started from) in the file\n with open(inputf,'r') as f, open(output,'w') as g:\n line = f.readline()\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\")\n prefix_len = len(fields) #number of fields in original file\n mc_type = expand_nucleotide_code(mc_type)\n g.write(\"\\t\".join(fields[:prefix_len])+\"\\t\"+\"\\t\".join([\"nsite_\"+sample for sample in samples])+\"\\n\")\n prev_chrom = \"\"\n prev_end = \"\"\n dmr_lines=[]\n methylation_levels = {}\n for line in f:\n line = line.rstrip(\"\\n\")\n dmr_lines.append(line)\n if num_procs == 1:\n for sample in samples:\n methylation_levels[sample]=get_nsite_DMRfind_worker(dmr_lines,mc_type,sample,path_to_allc,output,min_cov,use_mc_status=False)\n else:\n pool = Pool(num_procs)\n results = {}\n for sample in samples:\n results[sample]=pool.apply_async(get_nsite_DMRfind_worker,(dmr_lines,mc_type,sample,path_to_allc,output,min_cov),{\"use_mc_status\":False})\n pool.close()\n pool.join()\n for sample in results:\n methylation_levels[sample]=results[sample].get()\n temp_files = {}\n for sample in samples:\n temp_files[sample]=open(output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\",'r')\n\n for index,line in enumerate(dmr_lines):\n g.write(line)\n for sample in samples:\n #g.write(\"\\t\"+methylation_levels[sample][index])\n g.write(\"\\t\"+temp_files[sample].readline().rstrip(\"\\n\"))\n g.write(\"\\n\")\n for sample in samples:\n temp_files[sample].close()\n subprocess.check_call(shlex.split(\"rm \"+output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\"))", "def lookup_ifproc_file(obsnum, path='/data_lmt/ifproc/', debug=False):\n paths = [path]\n\n if 'ifproc' not in path:\n paths += ['/data_lmt/ifproc/']\n if 'lmtttpm' not in path:\n paths += ['/data_lmt/lmttpm/']\n if 'tel' not in path:\n paths += ['/data_lmt/tel/']\n\n if debug:\n print(paths)\n\n for path in paths:\n filenames = glob.glob(os.path.join(path, '*_%06d_*.nc' % obsnum))\n if len(filenames) > 0:\n if debug:\n print('found %s' % (filenames[0]))\n return filenames[0]\n return ''\n #filename = ''\n #for file in os.listdir(path):\n # if fnmatch.fnmatch(file,'*_%06d_*.nc'%(obsnum)):\n # print('found %s'%(file))\n # filename = path+file\n #if filename == '':\n #print('lookup_ifproc_file: no file for obsnum ', obsnum)\n #if 'lmttpm' not in path:\n # print('look in lmttpm')\n # return lookup_ifproc_file(obsnum,path='/data_lmt/lmttpm/')\n #return(filename)", "def diff_files(self):\n pdup = []\n # Print out files that are only found in the DB\n if self.comparison_info['dbonly']:\n print(\"Files only found in the database --------- \")\n for fname in sorted(self.comparison_info['dbonly']):\n fdb = self.files_from_db[fname]\n print(f\"\\t{fdb['path']}/{fname}\")\n\n # print out files that are only found on disk\n if self.comparison_info['diskonly']:\n print(\"\\nFiles only found on disk --------- \")\n for fname in sorted(self.comparison_info['diskonly']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fdisk['relpath']}/{fname}{addon}\")\n if self.comparison_info['pathdup']:\n print(\"\\n The following files had multiple paths on disk (path filesize):\")\n listing = {}\n for fname in self.comparison_info['pathdup']:\n pdup.append(fname)\n listing[self.comparison_info['pathdup']['relpath']] = self.comparison_info['pathdup']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different paths on disk and in the DB\n if self.comparison_info['path']:\n print(\"\\nPath mismatch (file name, db path, disk path) --------- \")\n for fname in sorted(self.comparison_info['path']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname}\\t{fdb['path']}\\t{fdisk['relpath']}{addon}\")\n if self.comparison_info['duplicates']:\n print(\" The following files have multiple disk paths on disk (path filesize):\")\n for fname in self.comparison_info['duplicates']:\n pdup.append(fname)\n listing[self.comparison_info['duplicates']['relpath']] = self.comparison_info['duplicates']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different file sizes on disk and in the DB\n if self.comparison_info['filesize']:\n print(\"\\nFilesize mismatch (File name, size in DB, size on disk) --------- \")\n for fname in sorted(self.comparison_info['filesize']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['filesize']} {fdisk['filesize']}\")\n\n # Print files that have different md5sum on disk and in DB\n if self.md5sum and 'md5sum' in self.comparison_info and self.comparison_info['md5sum']:\n print(\"\\nmd5sum mismatch (File name, sum in DB, sum on disk) --------- \")\n for fname in sorted(self.comparison_info['md5sum']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['md5sum']} {fdisk['md5sum']}\")\n\n # Print out files that have multiple paths on disk\n if len(self.duplicates) > len(pdup):\n print(\"\\nThe following files have multiple disk paths on disk (path filesize):\")\n for dup in sorted(self.duplicates):\n if dup not in pdup:\n listing = {}\n for fls in self.duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_db and self.files_from_db[dup]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")\n\n # Print out files that have multiple endtries in the DB\n if self.db_duplicates:\n print(\"\\nThe following files have multiple entries in the database (path filesize):\")\n for dup in sorted(self.db_duplicates):\n listing = {}\n for fls in self.db_duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_disk and self.files_from_disk[dup]['path'] == pth:\n addon = \" (Disk Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")", "def test_013_find_files(self):\n HEADING()\n db = self.db\n\n db.connect()\n\n # Clear all jobs currently in the database to ensure a correct final assertion\n db.clear()\n\n # Add the jobs outlined in the YAML file\n db.add_from_yaml(\"etc/jobs.yaml\")\n inputs, outputs = db.find_jobs_with_file(\"in1.txt\")\n\n # Assert that the lengths of the inputs and outputs arrays are correct\n count_fgrep = len(Shell.fgrep(\"in1.txt\", \"etc/jobs.yaml\").strip().split(\"\\n\"))\n assert(len(inputs) == count_fgrep)", "def check_file_references(name):\n references = []\n for root, dirs, files in walk(Path(\".\")):\n for file_name in files:\n if is_notebook(file_name):\n nb_markdown_cells = markdown_cells(os.path.join(root, file_name))\n for cell in nb_markdown_cells:\n for line in cell:\n if name in line:\n references.append(file_name)\n else:\n with open(os.path.join(root, file_name), encoding=\"utf8\", errors='ignore') as non_nb_file:\n if name in non_nb_file.read():\n references.append(file_name)\n return references", "def add_references_to_papers(infile, dir):\n papers = json.load(open(infile))\n for paper in papers:\n for file in os.listdir(dir):\n if file.split(\".txt\")[0] == paper['doi']: # Must find the correct file to parse\n filename = TEXT_DIR+file\n refs =extract_references_from_txt(filename) #Uses the text files to find references\n paper['references']=refs\n return papers", "def XPLMFindDataRef(inDataRefName):\n return int", "def get_refpaths_from_filename(filename, reference_file_types, observatory=None):\n from .. import datamodels\n with datamodels.open(filename) as model:\n refpaths = get_multiple_reference_paths(model, reference_file_types, observatory)\n return refpaths", "def test_get_output_filepaths(self):\r\n\r\n actual_fna_fp, actual_log_fp = get_output_filepaths(\".\",\r\n '/home/tests/seqs.fna')\r\n\r\n expected_fna_fp = \"./seqs_rev_primer_truncated.fna\"\r\n expected_log_fp = \"./rev_primer_truncation.log\"\r\n\r\n self.assertEqual(actual_fna_fp, expected_fna_fp)\r\n self.assertEqual(actual_log_fp, expected_log_fp)", "def get_reffile(self, refs, detector):\n for key in refs:\n if detector in key:\n return refs[key]\n self.logger.error(\"WARNING: no file found for detector {} in {}\"\n .format(detector, refs))", "def openfile(self,files):\n for f in files:\n if f in self.fmap:\n continue\n try:\n fd=open(f,'r');\n self.files.append(fd)\n self.fmap[f]=fd\n if len(self.handle)<2:\n self.handle.append(len(self.files)-1)\n self.fname.append(f)\n self.total+=[0]\n self.inst+=[{}]\n self.excl+=[{}]\n self.incl+=[{}]\n self.caller_callee+=[{}]\n self.loadfile(fd)\n except IOError:\n pass\n print('%s not exist!!'%(f))", "def identify_primary_reference_datasets(conn, log):\n\n primary_ref = {}\n\n primary_ref['refimg_id_ip'] = phot_db.find_primary_reference_image_for_field(conn)\n\n query = 'SELECT facility, filter, software FROM reference_images WHERE refimg_id=\"'+str(primary_ref['refimg_id_ip'])+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n\n primary_ref['facility_id'] = t['facility'][0]\n primary_ref['software_id'] = t['software'][0]\n\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"ip\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref['ip'] = t['filter_id'][0]\n\n for f in ['rp', 'gp']:\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"'+f+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref[f] = t['filter_id'][0]\n\n query = 'SELECT refimg_id FROM reference_images WHERE facility=\"'+str(primary_ref['facility_id'])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND filter=\"'+str(t['filter_id'][0])+'\"'\n qs = phot_db.query_to_astropy_table(conn, query, args=())\n\n if len(qs) > 0:\n primary_ref['refimg_id_'+f] = qs['refimg_id'][0]\n else:\n log.info('WARNING: Database contains no primary reference image data in filter '+f)\n\n log.info('Identified the primary reference datasets for this field as:')\n for key, value in primary_ref.items():\n log.info(str(key)+' = '+str(value))\n\n return primary_ref", "def pavs (dirName,pat,dx,dy):\r\n ntotpat=0\r\n\r\n tabf=np.zeros((dx,dy),np.uint8)\r\n _tabsroi=np.zeros((dx,dy,3),np.uint8)\r\n _tabscan = np.zeros((dx,dy),np.int16)\r\n\r\n (top,tail)=os.path.split(dirName)\r\n print 'pav :',tail,'pattern :',pat\r\n patpickle=[]\r\n nampadir=os.path.join(patchpath,pat)\r\n nampadirl=os.path.join(nampadir,locabg)\r\n if not os.path.exists(nampadir):\r\n os.mkdir(nampadir)\r\n os.mkdir(nampadirl)\r\n\r\n pathpicklepat=os.path.join(picklepathdir,pat)\r\n# print pathpicklepat\r\n pathpicklepatl=os.path.join(pathpicklepat,locabg)\r\n patchpicklenamepatient=tail+'_'+patchpicklename\r\n\r\n pathpicklepatfile=os.path.join(pathpicklepatl,patchpicklenamepatient)\r\n if not os.path.exists(pathpicklepat):\r\n os.mkdir(pathpicklepat)\r\n if not os.path.exists(pathpicklepatl):\r\n os.mkdir(pathpicklepatl)\r\n if os.path.exists(pathpicklepatfile):\r\n os.remove(pathpicklepatfile)\r\n\r\n for scannumb in range (0,dy):\r\n tabp = np.zeros((dx, dy), dtype=np.uint8)\r\n tabf=np.copy(tabroipat3d[pat][scannumb])\r\n\r\n tabfc=np.copy(tabf)\r\n nbp=0\r\n if tabf.max()>0:\r\n vis=contour2(tabf,pat,dx,dy)\r\n if vis.sum()>0:\r\n _tabsroi = np.copy(tabsroi3d[scannumb])\r\n imn=cv2.add(vis,_tabsroi)\r\n imn=tagview(imn,pat,0,20)\r\n tabsroi3d[scannumb]=imn\r\n imn = cv2.cvtColor(imn, cv2.COLOR_BGR2RGB)\r\n\r\n sroifile='tr_'+str(scannumb)+'.'+typeroi\r\n filenamesroi=os.path.join(sroidir,sroifile)\r\n cv2.imwrite(filenamesroi,imn)\r\n\r\n np.putmask(tabf,tabf>0,1)\r\n\r\n atabf = np.nonzero(tabf)\r\n\r\n xmin=atabf[1].min()\r\n xmax=atabf[1].max()\r\n ymin=atabf[0].min()\r\n ymax=atabf[0].max()\r\n\r\n\r\n _tabscan=tabscan3d[scannumb]\r\n\r\n i=xmin\r\n while i <= xmax:\r\n j=ymin\r\n while j<=ymax:\r\n tabpatch=tabf[j:j+dimpavy,i:i+dimpavx]\r\n\r\n area= tabpatch.sum()\r\n targ=float(area)/pxy\r\n\r\n if targ >thrpatch:\r\n imgray = _tabscan[j:j+dimpavy,i:i+dimpavx]\r\n imagemax= cv2.countNonZero(imgray)\r\n min_val, max_val, min_loc,max_loc = cv2.minMaxLoc(imgray)\r\n\r\n if imagemax > 0 and max_val - min_val>2:\r\n nbp+=1\r\n patpickle.append(imgray)\r\n x=0\r\n #we draw the rectange\r\n while x < dimpavx:\r\n y=0\r\n while y < dimpavy:\r\n tabp[y+j][x+i]=150\r\n if x == 0 or x == dimpavx-1 :\r\n y+=1\r\n else:\r\n y+=dimpavy-1\r\n x+=1\r\n #we cancel the source\r\n tabf[j:j+dimpavy,i:i+dimpavx]=0\r\n j+=dimpavy-1\r\n j+=1\r\n i+=1\r\n\r\n if nbp>0:\r\n tabfc =tabfc+tabp\r\n ntotpat=ntotpat+nbp\r\n if scannumb not in listsliceok:\r\n listsliceok.append(scannumb)\r\n stw=tail+'_slice_'+str(scannumb)+'_'+pat+'_'+locabg+'_'+str(nbp)\r\n stww=stw+'.txt'\r\n flw=os.path.join(jpegpath,stww)\r\n mfl=open(flw,\"w\")\r\n mfl.write('#number of patches: '+str(nbp)+'\\n')\r\n mfl.close()\r\n stww=stw+'.'+typej\r\n flw=os.path.join(jpegpath,stww)\r\n scipy.misc.imsave(flw, tabfc)\r\n pickle.dump(patpickle, open(pathpicklepatfile, \"wb\"),protocol=-1)\r\n\r\n return ntotpat", "def lfp_extract(files):\r\n \r\n if 'lfpdata' in locals():\r\n del lfpdata\r\n \r\n for i, file in enumerate(files):\r\n \r\n ### load data\r\n matdat = sio.loadmat(file, variable_names = ['lfpsegs', 'lfpdata', 'fs', 'chnAreas'], \r\n struct_as_record = False, squeeze_me = True) \r\n \r\n \r\n \r\n ### extract the noused channels, only calculate once\r\n if i == 0:\r\n \r\n # chnAreas\r\n chnAreas = matdat['chnAreas'].tolist()\r\n \r\n # fs: sample rate\r\n fs = matdat['fs'] \r\n \r\n \r\n\r\n ### dealing lfp data\r\n \r\n # lfp (np.ndarray): nareas * ntemp * ntrials or ntemp * nareas * ntrials\r\n if 'lfpdata' in matdat.keys():\r\n lfpdata_1file = matdat['lfpdata']\r\n elif 'lfpsegs' in matdat.keys():\r\n lfpdata_1file = matdat['lfpsegs']\r\n\r\n n1, n2, n3 = lfpdata_1file.shape\r\n if n1 > n2: # ntemp * nareas * ntrials\r\n lfpdata_1file = np.transpose(lfpdata_1file, (1, 0, 2))\r\n \r\n # concatenate to lfpdata for all files\r\n if 'lfpdata' not in locals():\r\n lfpdata = lfpdata_1file\r\n else:\r\n lfpdata = np.concatenate((lfpdata, lfpdata_1file), axis = 2)\r\n \r\n \r\n return lfpdata, chnAreas, fs" ]
[ "0.5827411", "0.55954766", "0.5543815", "0.5515996", "0.5477217", "0.5447455", "0.5424407", "0.54009223", "0.5372652", "0.5370227", "0.5364059", "0.5320857", "0.5294063", "0.52870816", "0.5261717", "0.5260605", "0.5242285", "0.52146846", "0.51936823", "0.51909316", "0.51843077", "0.51814646", "0.51709306", "0.5165626", "0.5162636", "0.514549", "0.51454705", "0.5145386", "0.5136396", "0.51355034" ]
0.6289985
0
Assign names to each epi file based on information in the template.
def AssignEpiNames(self): # Sort each run in the series by its acquisition time. epi_sort = self.epi_times.keys() epi_sort.sort() # Rewrite pfiles as an ordered list of p-files to be reconstructed. for idx in xrange(len(epi_sort)): entry = self.epi_times[epi_sort[idx]] info = self.info[entry] if info['data_filetype'] == 'ge_data': self.pfiles_recon.append(entry) info['run'] = '%0d' % (self.n_epi) self.n_epi = self.n_epi + 1 plane = info['plane'] if not self.epinames.has_key(plane): plane = 'any' n_epi = self.epinames[plane]['n_epi'] if n_epi > len(self.epinames[plane]['names'])-1: if self.epinames.has_key('any') and \ n_epi < len(self.epinames['any']): plane = 'any' n_epi = self.epinames[plane]['n_epi'] else: self.DumpInfo() errstr = 'Not enough EPI names in template file' raise RuntimeError(errstr) # epiname = self.epinames[plane]['names'][n_epi] filebase = os.path.basename(self.epinames[plane]['names'][n_epi]) epi_mf_outdir = os.path.dirname(\ self.epinames[plane]['names'][n_epi]) epi_base = self.epinames[plane]['subdir'][n_epi] tmp_outdir = '%s/%s' % (self.tmpdir, epi_base) # Get output directory for raw epis. if self.no_motcorr: epi_r_outdir = epi_mf_outdir elif self.keep_epi_raw: epi_r_outdir = self.epi_scratch_space else: epi_r_outdir = tmp_outdir # Get output directory for motion-corrected epis. if self.keep_epi_mot: epi_m_outdir = self.epi_scratch_space else: epi_m_outdir = tmp_outdir info['outdir'] = epi_mf_outdir if n_epi < len(self.epinames[plane]['names']): epiname = self.epinames[plane]['names'][n_epi] info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase) else: info['imgfile'] = '%s/s%0d_epi_run%0d' % \ (epi_r_outdir, n_epi, idx+1) self.epinames[plane]['n_epi'] += 1 info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase) info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase) info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase) if self.no_motcorr: info['imgfile_m'] = None info['imgfile_mf'] = None info['imgfile_final'] = info['imgfile'] else: info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase) if self.no_fmapcorr or info['fmap_entry'] is None: info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase) info['imgfile_mf'] = None info['imgfile_final'] = info['imgfile_m'] else: info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase) info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase) info['imgfile_final'] = info['imgfile_mf'] info['skip'] = self.skip info['motion_ref_frame'] = self.tmplt['motion_ref_frame'] info['motion_interp'] = self.tmplt['epi_motion_interp'] if not info['motion_interp'].startswith('-'): info['motion_interp'] = '-%s' % info['motion_interp'] info['filetype'] = self.tmplt['epi_file_format'] info['valid'] = True self.info[entry] = info if not self.no_motcorr: epi_base = os.path.basename(info['imgfile_m']) info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base) info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def generate_filename(self):\n file_pattern = os.path.join(self.path, \"TCGA-*\")\n for f in glob(file_pattern):\n organ = get_organ(f)\n for raw_f in glob(os.path.join(f, \"*.tif\")):\n gt_f = raw_f.replace(\".tif\", \".xml\")\n yield raw_f, gt_f, organ", "def make_output_names(self):\n yaml_names = []\n fits_names = []\n\n if self.use_nonstsci_names:\n for i in range(len(self.info['Module'])):\n act = str(self.info['act_id'][i]).zfill(2)\n if self.info['Instrument'][i].lower() == 'niriss':\n det = 'NIS'\n elif self.info['Instrument'][i].lower() == 'fgs':\n det = 'FGS'\n else:\n det = self.info['detector'][i]\n mode = self.info['Mode'][i]\n dither = str(self.info['dither'][i]).zfill(2)\n\n yaml_names.append(os.path.abspath(os.path.join(self.output_dir, 'Act{}_{}_{}_Dither{}.yaml'\n .format(act, det, mode, dither))))\n fits_names.append('Act{}_{}_{}_Dither{}_uncal.fits'.format(act, det, mode, dither))\n\n else:\n for i in range(len(self.info['Module'])):\n if self.info['Instrument'][i].upper() == 'NIRCAM':\n fulldetector = 'nrc{}'.format(self.info['detector'][i].lower())\n else:\n fulldetector = self.info['detector'][i].lower()\n outfilebase = self.create_output_name(self.info, index=i)\n outfile = \"{}{}{}\".format(outfilebase, fulldetector, '_uncal.fits')\n yamlout = \"{}{}{}\".format(outfilebase, fulldetector, '.yaml')\n\n yaml_names.append(yamlout)\n fits_names.append(outfile)\n\n self.info['yamlfile'] = yaml_names\n self.info['outputfits'] = fits_names\n # Table([self.info['yamlfile']]).pprint()", "def _initNames(self):\n self.outselect = os.path.join(self.workpath, 'FT1_selected'+self.suffix+'.fits')\n self.outmktime = os.path.join(self.workpath, 'FT1_filtered'+self.suffix+'.fits')\n self.outltcube = os.path.join(self.workpath, 'LtCube'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCube'+self.suffix+'.fits')\n self.outbinmap = os.path.join(self.workpath, 'CMAP'+self.suffix+'.fits')\n self.outbinexp = os.path.join(self.workpath, 'BinExpMap'+self.suffix+'.fits')\n self.outexpmap = os.path.join(self.workpath, 'ExpMap'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMaps'+self.suffix+'.fits')\n self.outgtlike = os.path.join(self.workpath, 'Results'+self.suffix+'.dat')\n self.outmodel = os.path.join(self.workpath, 'OutModel'+self.suffix+'.xml')\n self.outapert = os.path.join(self.workpath, 'LC_ApPhoto'+self.suffix+'.fits')\n self.outgtmod = os.path.join(self.workpath, 'GtModel'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'Resid'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigma'+self.suffix+'.fits')\n self.outtsmap = os.path.join(self.workpath, 'TSMmap'+self.suffix+'.fits')\n return\n # self.outfind = self.dir + self.src + '_FindSrc'+self.suffix+'.txt'", "def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass", "def test_pnictogen():\n for template in templates:\n template_prefix, extension = os.path.splitext(template)\n for xyz_file in example_xyz_files:\n input_prefix, xyz_file_extension = os.path.splitext(xyz_file)\n\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(xyz_file, xyz_file_extension[1:])\n )\n written_files = pnictogen(mol, input_prefix, template, extension[1:])\n\n assert_equals(type(written_files), list)\n for written_file in written_files:\n assert_equals(type(written_file), str)\n\n written_files2 = pnictogen(mol, input_prefix, template)\n assert_equals(written_files, written_files2)\n\n # Allow use of template in the parent directory\n with cd(\"pnictogen/repo\"):\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(\"../../data/water-dimer.xyz\", \"xyz\")\n )\n written_files = pnictogen(mol, \"../../data/water-dimer\", \"ADF.in\", \"in\")\n\n assert_equals(written_files, [\"../../data/water-dimer.in\"])\n\n main([\"-g\", \"/tmp/hello.world.ORCA.inp\"])\n mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/co.xyz\", \"xyz\"))\n written_files = pnictogen(mol, \"data/co\", \"/tmp/hello.world.ORCA.inp\", foo=\"bar\")\n\n assert_equals(written_files, [\"data/co.inp\"])", "def get_template_names(self): \n product = self.get_object()\n names = ['%s/detail-for-upc-%s.html' % (self.template_folder, product.upc), \n '%s/detail-for-class-%s.html' % (self.template_folder, product.item_class.name.lower()),\n '%s/detail.html' % (self.template_folder)]\n return names", "def file_creator(title_list):\n for file_name in title_list: #title names are retrieved out of genID.txt\n with open (\"nuc_variant_calls/\"+file_name.strip()+\".var\",'w') as x:\n x.write(\"Feature type\\tAlignment length\\tIdentical nucleotides\\tIndel count\\n\") #Table headers.", "def __rename_images(self):\n for idx, image in enumerate(self._values):\n image.partname = '/ppt/media/image%d%s' % (idx+1, image.ext)", "def createExtnNodes(self):\n for parent, dirs, files in os.walk(self.destndir):\n for fname in files:\n filename = os.path.join(parent, fname)\n if os.path.isfile(filename):\n direntry=parent\n #direntry=parent.replace(self.destndir,'',len(self.destndir))\n #direntry = os.path.basename(os.path.abspath(parent))\n self.appendSrcType(direntry, fname)", "def filename(i):\n rand_name = os.path.join(os.getcwd(), \"input-%d.txt\" % i)\n ref_name = os.path.join(os.getcwd(), \"input-%d.ref\" % i)\n return rand_name, ref_name", "def create_files_from_templates(self, model_attributes):\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/%s_%s.py\" % (model_attributes['app_label'], folder_name,\n model_attributes['model_name_slug'], folder_name)\n template_path = \"django_baker/%s\" % (folder_name)\n self.create_file_from_template(file_path, template_path, model_attributes)\n for file_name in [\"base\", \"list\", \"detail\", \"create\", \"update\", \"delete\"]:\n file_path = \"%s/templates/%s/%s_%s.html\" % (model_attributes['app_label'], model_attributes['app_label'],\n model_attributes['model_name_slug'], file_name)\n template_path = \"django_baker/%s.html\" % (file_name)\n self.create_file_from_template(file_path, template_path, model_attributes)", "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK", "def task_process_department_files():\n for dept in Department.list():\n for file_name, file in dept.files.items():\n yield {\n 'name': f'{dept}:{file_name}',\n 'file_dep': file.dependencies +\n [file.raw_path, util.path.CONFIG_PATH],\n 'targets': [file.processed_path],\n 'actions': [file.process],\n 'clean': True,\n }", "def _prepare_file(self, item_name, page_instructions):\n if item_name not in self.prepared_instructions:\n self.prepared_instructions[item_name] = []\n\n for instruction in getattr(page_instructions, item_name):\n item = copy.copy(instruction)\n\n if 'url' in instruction:\n item['location'] = instruction['url']\n\n else:\n template_name = context = process_func = None\n\n if 'process' in instruction:\n process_func = self._get_processing_function(\n instruction.get('process'))\n\n template_name = instruction.get('static', False) or \\\n instruction.get('inline', False)\n\n assert template_name, (\n 'You must provide either \"static\" or \"inline\" properties '\n 'that point to a file, provided object was %r'\n % instruction)\n\n if 'inline' in instruction:\n context = self.context\n else:\n context = None\n\n source, is_cached = self._get_media_source(\n template_name, process_func, context)\n\n if 'css' in item_name and self.make_css_urls_absolute \\\n and not is_cached:\n source = self._fix_css_urls(instruction, source)\n\n if 'static' in instruction:\n location, filename = self._copy_to_media(\n template_name, source)\n item['location'] = location\n elif 'inline' in instruction:\n item['source'] = source\n\n if 'include' in instruction and not \\\n instruction['include']:\n if 'inline' in instruction:\n raise AttributeError('You have specified inline and '\n 'include: false, these really don\\'t make sense '\n 'together')\n continue\n\n self.prepared_instructions[item_name].append(item)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()", "def process_tempita(fromfile):\n if not fromfile.endswith('.in'):\n raise ValueError(\"Unexpected extension: %s\" % fromfile)\n\n from_filename = tempita.Template.from_filename\n template = from_filename(fromfile,\n encoding=sys.getdefaultencoding()) \n\n content = template.substitute()\n\n outfile = os.path.splitext(fromfile)[0]\n with open(outfile, 'w') as f:\n f.write(content)", "def get_names():\n\n #Initialize entities dictionary\n entities = {'entity': 'source_file'}\n\n # Construct the raw_directory path\n project_root = os.environ['PYTHONPATH']\n raw_directory = '{}/data/raw/'.format(project_root)\n \n for file in os.listdir(raw_directory):\n if file.endswith('.json'):\n \n # Construct the full file path\n full_path = '{}{}'.format(raw_directory, file)\n \n # Open each JSON file\n with open(full_path, 'r') as source_file:\n data = source_file.read()\n parsed_data = json.loads(data)\n \n # Iterate through the dictionary parsed_data\n for key in parsed_data:\n if 'SocialTag' in key:\n name = parsed_data[key]['name']\n entities.update({name: file})\n\n return entities", "def prepare_anno(infolder, outfolder, mode=\"split\"):\n\tprint(\"Starting...\")\n\t\n\tinpath = os.path.join(infolder, \"*.xml\")\n\tfilecounter = 0\n\t\n\t# check output folders\n\tif not os.path.exists(outfolder):\n\t\tos.makedirs(outfolder)\n\t\t\n\tout_tei = os.path.join(outfolder, \"temp\")\n\tout_txt = os.path.join(outfolder, \"txt\")\n\t\n\tif not os.path.exists(out_tei):\n\t\tos.makedirs(out_tei)\n\tif not os.path.exists(out_txt):\n\t\tos.makedirs(out_txt)\n\t\t\n\t\n\tfor filepath in glob.glob(inpath):\n\t\tfilecounter+= 1\n\t\tfn = os.path.basename(filepath)[:-4]\n\t\toutfile_x = fn + \".xml\"\n\t\t\n\t\tdoc = etree.parse(filepath)\n\t\t\n\t\tif mode == \"split-1\":\n\t\t\ttransform = etree.XSLT(xslt_TEIwrapper_1)\n\t\telse:\n\t\t\ttransform = etree.XSLT(xslt_TEIwrapper)\n\n\t\tresult_tree = transform(doc)\n\t\tresult = str(result_tree)\n\t\t\n\t\t# create TEI wrapper for future annotation results\n\t\twith open(os.path.join(outfolder, \"temp\", outfile_x), \"w\") as output:\n\t\t\toutput.write(result)\n\t\t\t\n\t\t# create one full text file per chapter (or for the whole text)\n\t\ttei = {'tei':'http://www.tei-c.org/ns/1.0'}\n\t\tcligs_id = doc.xpath(\"//tei:idno[@type='cligs']/text()\", namespaces=tei)\n\t\tif mode == \"split-1\":\n\t\t\tresults = doc.xpath(\"//tei:text/tei:body\", namespaces=tei)\n\t\telse:\n\t\t\tresults = doc.xpath(\"//tei:div[ancestor::tei:body][not(descendant::tei:div[not(ancestor::tei:floatingText)])][not(ancestor::tei:floatingText)]\", namespaces=tei)\n\t\t\n\t\tif isinstance(cligs_id, list):\n\t\t\tcligs_id = cligs_id[0]\n\t\telif isinstance(cligs_id, str) == False:\n\t\t\traise ValueError(\"This type (\" + str(type(cligs_id)) + \") is not supported for cligs_id. Must be list or string.\")\n\t\t\n\t\tfor i,r in enumerate(results):\n\t\t\ttransform = etree.XSLT(xslt_extractDIVs)\n\t\t\tresult_tree = transform(r)\n\t\t\tresult = str(result_tree)\n\t\t\t\n\t\t\toutfile = cligs_id + \"_d\" + str(i + 1) + \".txt\"\n\t\t\t\n\t\t\twith open(os.path.join(outfolder, \"txt\", outfile), \"w\") as output:\n\t\t\t\toutput.write(result)\n\t\n\tprint(\"Done. \" + str(filecounter) + \" files treated.\")", "def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))", "def make_template(filenames):\n result = {}\n for fn in filenames:\n with open(fn) as f:\n conf = yaml.load(f)\n expand_horizons(result, conf)\n return result", "def _SetAnatNames(self, anat_tgt):\n# Define links to structural image in each output directory.\n for entry in self.entry_map['epi'] + self.entry_map['fmap'] + \\\n self.entry_map['dti'] + self.entry_map['asl']:\n self.info[entry]['anat_link'] = anat_tgt\n\n# Name the normalization source image T1High. Number the rest.\n anat_entries = self.entry_map['anat'][:]\n anat_entries.remove(anat_tgt)\n n_t1high = 1\n for entry in anat_entries:\n if self.info[entry]['type'] == 'T1High':\n# High res T1-weighted, not normalization target. Rename it.\n fname = 'T1High_%d' % n_t1high\n fullname = '%s/%s' % (self.info[entry]['outdir'], fname)\n self.info[entry]['imgfile'] = fullname\n self.info[entry]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[entry]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = False\n n_t1high += 1\n fname = 'T1High'\n fullname = '%s/%s' % (self.info[anat_tgt]['outdir'], fname)\n self.info[anat_tgt]['imgfile'] = fullname\n self.info[anat_tgt]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[anat_tgt]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = True\n\n self.anatomical = '%s%s' % (self.info[anat_tgt]['imgfile'], \\\n self.info[anat_tgt]['suffix'])\n# The target for motin correction is the source for spatial normalization.\n self.norm_src = anat_tgt", "def jinja_files(self, val: Pattern):\n self[\"jinja_files\"] = str(val)", "def _read_output_files(self):\n self.manage = {} # Empty the dictionary matching phrases\n self.manage['spin'] = (re.compile(' *net spin of'), self._read_spin)\n self.manage['nelect'] = (re.compile(' *number of electrons'), self._read_nelect)\n self.manage['cellcontents'] = (re.compile(' *Unit Cell'), self._read_cellcontents)\n self.manage['pspots'] = (re.compile(' *Files used for pseudopotentials:'), self._read_pspot)\n self.manage['masses'] = (re.compile(' *Mass of species in AMU'), self._read_masses)\n self.manage['kpoints'] = (re.compile(' *Number of kpoints used'), self._read_kpoints)\n self.manage['kpoint_grid'] = (re.compile(' *MP grid size for SCF'), self._read_kpoint_grid)\n self.manage['finalenergy'] = (re.compile(' *Final energy, E'), self._read_energies)\n self.manage['finalenergy2'] = (re.compile('Final energy ='), self._read_energies2)\n self.manage['finalenergy3'] = (re.compile('Dispersion corrected final energy'), self._read_energies3)\n self.manage['energy_cutoff'] = (re.compile(' *plane wave basis set cut'), self._read_energy_cutoff)\n self.manage['nbands'] = (re.compile(' *number of bands'), self._read_nbands)\n self.manage['pressure'] = (re.compile(' *\\* *Pressure: '), self._read_external_pressure)\n self.manage['opticalDielectric'] = (re.compile(' *Optical Permittivity'), self._read_dielectric)\n self.manage['bornCharges'] = (re.compile(' *Born Effective Charges'), self._read_born_charges)\n # For the .phonon file\n self.manage['frequency'] = (re.compile(' q-pt= 1 0.000000 0.000000 0.000000 1.0000000000 *$'), self._read_frequencies)\n self.manage['nbranches'] = (re.compile(' Number of branches'), self._read_nbranches)\n for f in self._outputfiles:\n self._read_output_file(f)\n return", "def setESFiles(self, eSourceDir = None, verbose = False):\n\n print('\\n***Setting electronic structure files')\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc':\n # Check and set electronic structure file for packaging.\n if '***Missing' in self.nbDetails[key]['jobInfo'][2]:\n self.nbDetails[key]['elecStructure'] = None\n else:\n if eSourceDir is not None:\n # Copy electronic structure files to package using supplied path\n fileName = Path(self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\"))\n self.nbDetails[key]['elecStructure'] = Path(eSourceDir, fileName.name).as_posix()\n\n else:\n # Copy electronic structure files to package, based on full path from original job\n self.nbDetails[key]['elecStructure'] = self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\")\n\n checkList = self.checkFiles(self.nbDetails[key]['elecStructure'])\n\n # If file is missing, set to \"missing\"\n if not checkList[0]:\n self.nbDetails[key]['elecStructure'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n\n # If file is present, check also for corresponding files\n else:\n # Assuming above is molden file, check also for corresponding Gamess file\n gFile = Path(self.nbDetails[key]['elecStructure']).with_suffix('.log')\n checkList = self.checkFiles(gFile)\n if checkList[0]:\n # self.nbDetails[key]['elecStructure'].append(gFile.as_posix()) # Set here to append... hopefully works OK with arch update code...\n self.nbDetails[key]['elecStructureGamess'] = gFile.as_posix() # Set here as separate item\n else:\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {gFile.as_posix()}\"\n #\n\n if verbose:\n print(f\"Job {key}: {self.nbDetails[key]['title']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructure']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructureGamess']}\")", "def postpare_anno(infolder, outfolder, mode=\"fl\"):\n\tprint(\"Starting...\")\n\t\n\tif not os.path.exists(infolder):\n\t\traise ValueError(\"The input folder could not be found.\")\n\t\t\n\tin_temp = os.path.join(infolder, \"temp\")\n\tin_anno = os.path.join(infolder, \"annotated_temp\")\n\t\n\tif not os.path.exists(in_temp):\n\t\traise ValueError(\"The folder 'temp' could not be found inside the input folder.\")\n\tif not os.path.exists(in_anno):\n\t\traise ValueError(\"The folder 'annotated_temp' could not be found inside the input folder.\")\n\tif not os.path.exists(outfolder):\n\t\tos.makedirs(outfolder)\n\t\t\n\tfilecounter = 0\t\n\n\t# fetch annotated snippets for each TEI template file\n\tfor filepath in glob.glob(os.path.join(in_temp, \"*.xml\")):\n\t\tprint(\"doing file \" + filepath)\n\t\tfilecounter+= 1\n\t\tfn = os.path.basename(filepath)\n\t\tannofolder = os.path.join(Path(os.path.join(infolder, \"annotated_temp\")).as_uri(), \"\")\n\t\t# which annotation mode are we in?\n\t\tannomode = mode\n\t\t\n\t\tparser = etree.XMLParser(encoding=\"UTF-8\")\n\t\tparser.resolvers.add(FileResolver())\n\t\t\n\t\tdoc = etree.parse(filepath, parser)\n\t\txslt_root = etree.parse(io.StringIO(xslt_joinDIVs), parser)\n\t\t\n\t\ttransform = etree.XSLT(xslt_root)\n\t\t\n\t\tresult_tree = transform(doc, annofolder= \"'\" + annofolder + \"'\", mode= \"'\" + annomode + \"'\")\n\t\tresult = str(result_tree)\n\t\t\n\t\t# save the results\n\t\twith open(os.path.join(outfolder, fn), \"w\") as output:\n\t\t\toutput.write(result)\n\t\n\tprint(\"Done. \" + str(filecounter) + \" files treated.\")", "def _extract_template_events(self):\n\t\ttry:\n\t\t\ttable = self.hdf5file[fastq_paths[self.version]['template'] % self.group]\n\t\t\tself.template_events = [Event(x) for x in table['Events'][()]]\n\t\texcept Exception, e:\n\t\t\tself.template_events = []", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()" ]
[ "0.5765641", "0.5753263", "0.5717375", "0.5547444", "0.55394816", "0.54884017", "0.54623926", "0.53843206", "0.5353627", "0.531927", "0.52712166", "0.5261715", "0.523186", "0.52143687", "0.51853824", "0.51815045", "0.51764005", "0.5170443", "0.5149673", "0.51465887", "0.5144239", "0.5137167", "0.51357424", "0.5131627", "0.512647", "0.51041424", "0.5102202", "0.5100827", "0.50993335", "0.5083337" ]
0.72918445
0
Dump the info object to a yaml file.
def DumpInfo(self): if self.logdir is None: return self.dumpfile = '%s/preprocess_info.yaml' % (self.logdir) try: f = open(self.dumpfile,'w') f.write(yaml.dump(self.info,default_flow_style=False, indent=4)) f.close() except IOError: self.errors = True errstr = 'Error accessing %s' % self.dumpfile raise IOError(errstr) self.LogErrors(errstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)", "def __exit__(self, *_):\n with self._info_yaml_file_path.open(\"w\") as info:\n self._yml.dump(self._info, info)", "def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))", "def write(self):\n print yaml.dump(self._config, default_flow_style=False),", "def dump(self, yaml_file):\n\n with open(yaml_file, 'w') as fp:\n yaml.dump(self.__dict__, fp)", "def save(self, filename):\n with open(filename, 'w') as f:\n yaml.dump(self.to_dict(), f, sort_keys=False)", "def UnDumpInfo(self):\n filename = '%s/preprocess_info.yaml' % self.logdir\n f = open(filename,'r')\n self.info = yaml.load(f.read())\n f.close()", "def to_yaml(obj: ConfiguredBaseModel, file: str):\n\n fh = open(file, \"w\") if file else sys.stdout\n\n if isinstance(obj, Entity):\n yaml.dump(obj.dict(), fh, indent=4)\n elif isinstance(obj, Results) or isinstance(obj, HistoPheno) or isinstance(obj, AssociationCountList):\n yaml.dump([item.dict() for item in obj.items], fh, indent=4)\n else:\n raise TypeError(FMT_INPUT_ERROR_MSG)\n\n if file:\n console.print(f\"\\nOutput written to {file}\\n\")\n fh.close()\n\n return", "def dump(self, config_file = 'config.yaml'):\n\n with open(config_file, 'w') as fp:\n yaml.dump(self.__dict__, fp)", "def to_yaml(cls,dumper,self):\n #self.__modelData['ids'] = self.__mapObj.ids\n self.__modelData['ids'] = ','.join(map(str,self.__mapObj.ids))\n\n ##GENERATE Overview\n old_size = self.__size\n self.__mapObj.size = PREVIEW_SIZE\n typ,dat,width,height = processOverview(self.__mapObj.png)\n self.__modelData['overview_typ'] = typ\n self.__modelData['overview_dat'] = dat\n self.__modelData['overview_width'] = width\n self.__modelData['overview_height'] = height\n self.__mapObj.size = old_size\n #END Overview\n\n node = dumper.represent_mapping(cls.yaml_tag,self.__modelData)\n self.SetModified(False)\n return node", "def save(self, filename=None):\n name = filename or self.filename\n with open(name, \"w\") as stream:\n yaml.dump(self.data, stream, default_flow_style=False)", "def dump_yaml(self, data, output):\n yaml.indent(mapping=MAPPING, sequence=SEQUENCE, offset=OFFSET)\n yaml.dump(data, output)", "def save_info(self):\n json.dump(self.info, open(os.path.join(self.dstore_dir, \"info.json\"), \"w\"),\n sort_keys=True, indent=4, ensure_ascii=False)", "def dump(self) -> None:\n ...", "def test_to_yaml(self) -> None:\n entry = Entry(\"Cao_2019\", self.EXAMPLE_ENTRY_DICT)\n yaml_str = YAMLParser().dump(entry)\n with open(self.EXAMPLE_YAML_FILE, \"r\", encoding=\"utf-8\") as file:\n assert yaml_str == file.read()", "def save_to_yaml(self, path=None):\n\n if not path:\n path = \".\".join([self.name.value, \"yaml\"])\n\n planet_dict = {}\n for a in sorted(self.attributes):\n exo_param = getattr(self, a)\n param_dict = exo_param.__dict__\n param_dict = {k: str(v)\n for k, v in param_dict.items()\n if v and len(str(v)) > 0}\n planet_dict[a] = param_dict\n\n with open(path, 'w') as yamlfile:\n yaml.dump(planet_dict, yamlfile, default_flow_style=False)", "def DumpYaml(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n text = yaml.safe_dump(data)\n \n return text", "def save(self, filepath):\n writer = json.dump if Config.isjson(filepath) else yaml.dump\n with open(filepath, 'w') as f:\n writer(dict(self), f)", "def __str__(self):\n if self.data is None:\n return \"\"\n\n return yaml.dump(self.data, default_flow_style=False, indent=2)", "def yaml_inventory(self):\n inventory_file = 'inventory_file'\n with open(inventory_file, 'w') as invfile:\n yaml.dump(self.inventory_dict, invfile, default_flow_style=False, sort_keys=False)", "def save_to_yml_file(self):\n yml_filename = self.get_yml_filename()\n\n if os.path.exists(yml_filename) and not self.force:\n logger.warning(\n f\"[red]File {yml_filename} already exists, not writing. To override add --force.[/red]\"\n )\n else:\n if self.force:\n logger.info(\n f\"[yellow]Force flag is used. Overriding {yml_filename} if it exists.[/yellow]\"\n )\n if self.metadata:\n self.metadata.save_dict_as_yaml_integration_file(yml_filename)", "def toYAML(cls, obj):\n if isinstance(obj, dict):\n return yaml.dump(obj, default_flow_style=False)\n else:\n return yaml.dump_all(obj, default_flow_style=False)", "def to_yaml(cls, dumper, data):\n\t\tdict_rep = {'location':data._location, 'startFrame':data._startFrame,\n\t\t\t\t\t'endFrame':data._endFrame, 'camera':data._camera}\n\n\t\tprint(dict_rep)\n\n\t\tnode = dumper.represent_mapping(cls.yaml_tag, dict_rep)\n\t\treturn node", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "async def dump(self, data: dict, file: IO):", "def dump(filename: Path) -> None:\n import yaml\n\n dumped_str = yaml.dump_all(\n [data_dict],\n Dumper=RegressionYamlDumper,\n default_flow_style=False,\n allow_unicode=True,\n indent=2,\n encoding=\"utf-8\",\n )\n with filename.open(\"wb\") as f:\n f.write(dumped_str)", "def save_yaml_to_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n d = i['dict']\n\n try:\n # If using just dump and keys are in unicode,\n # pyyaml adds warning and makes produced yaml unparsable\n s = yaml.safe_dump(d)\n except Exception as e:\n return {'return': 1, 'error': 'problem converting dict to YAML ('+format(e)+')'}\n\n return save_text_file({'text_file': fn, 'string': s})", "def yaml(self):\n raise NotImplementedError", "def save_dict_as_yaml_integration_file(self, output_file: str):\n logger.debug(f\"Writing collected metadata to {output_file}.\")\n\n write_yml(output_file, self.metadata_dict)\n logger.info(\"[green]Finished successfully.[/green]\")", "def dump(self, config):\n raise NotImplementedError" ]
[ "0.7140269", "0.6745914", "0.67231625", "0.6591301", "0.6522555", "0.6498975", "0.64982736", "0.6444522", "0.63701254", "0.62643826", "0.62501603", "0.61706054", "0.60966444", "0.60667735", "0.6060812", "0.6049239", "0.6019757", "0.60015565", "0.60012335", "0.59733576", "0.5957535", "0.5930331", "0.5920716", "0.59146774", "0.58603007", "0.58540636", "0.5849694", "0.58436817", "0.5843065", "0.58338845" ]
0.7883224
0
Convert anatomical images from dicom or ifiles to briks or niftis.
def ConvertAnat(self): if self.verbose: print 'Convert T1 and T2 images...' for entry in self.info: info = self.info[entry] if self.info[entry]['imgfile'] is None: continue if self.info[entry]['type'] in self.anat_types: key = self.info[entry]['type'] imgfile = self.info[entry]['imgfile'] cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \ imgfile, self.info[entry]['filetype']) checkfile = '%s%s' % (imgfile, self.info[entry]['suffix']) self.CheckExec(cmd, [checkfile]) if self.info[entry]['norm_src'] and self.skull_strip: cmd = "3dSkullStrip -input %s -prefix %s" % \ (checkfile, self.info[entry]['imgfile_skstrip']) checkfile = '%s+orig.BRIK' % \ (self.info[entry]['imgfile_skstrip']) self.CheckExec(cmd, [checkfile])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)\n archive_folder.mkdir(exist_ok=True)\n logger.info(\"Converting Dicom to Nifty - START\")\n converter = NiftiConverter(\n padding=\"whole_image\",\n resampling_spacing=-1,\n list_labels=[\"GTVt\"],\n cores=10,\n )\n _ = converter(input_folder, output_folder=output_images_folder)\n\n logger.info(\"Converting Dicom to Nifty - END\")\n logger.info(\"Removing extra VOI - START\")\n move_extra_vois(output_images_folder, archive_folder)\n logger.info(\"Removing extra VOI - END\")\n logger.info(\"Renaming files- START\")\n correct_names(output_images_folder, name_mapping)\n logger.info(\"Renaming files- END\")\n logger.info(\"Cleaning the VOIs - START\")\n clean_vois(output_images_folder)\n logger.info(\"Cleaning the VOIs - END\")\n\n logger.info(\"Computing the bounding boxes - START\")\n bb_df = compute_bbs(output_images_folder)\n bb_df.to_csv(bb_file)\n logger.info(\"Computing the bounding boxes - END\")", "def read_isbi2013_2shell():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'isbi2013')\n fraw = pjoin(folder, 'phantom64.nii.gz')\n fbval = pjoin(folder, 'phantom64.bval')\n fbvec = pjoin(folder, 'phantom64.bvec')\n\n md5_dict = {'data': '42911a70f232321cf246315192d69c42',\n 'bval': '90e8cf66e0f4d9737a3b3c0da24df5ea',\n 'bvec': '4b7aa2757a1ccab140667b76e8075cb1'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def transform_images(img1,img2):", "def _get_image_blob(roidb):\n num_images = len(roidb)\n\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = io.imread(roidb[i]['image'], plugin='tifffile')\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n im, im_scale = blob_utils.prep_im_for_blob(im, roidb[i], 'train')\n im_scales.append(im_scale[0])\n processed_ims.append(im[0])\n\n # Create a blob to hold the input images [n, c, s, h, w]\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales", "def _get_image_blobs(self, roidb, scale_inds):\n num_images = len(roidb)\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n im, im_scale = self._get_image_blob(im, scale_inds[i], False)\n im_scales.append(im_scale)\n processed_ims.append(im)\n \n # Create a blob to hold the input images\n blob = self.im_list_to_blob(processed_ims)\n \n return blob, im_scales", "def nifti2dicom(seg_nifti, bk_nifti, ref_dicom_dir, save_dir, description, mode_RGB=False, zoom_num=4, watermarks=True): \n #Load nifti, here is segmentation and background\n seg_image = sitk.ReadImage(seg_nifti)\n seg_image = sitk.GetArrayFromImage(seg_image)\n seg_image = seg_image.astype(np.uint8)\n \n # print(nifti_image.shape)\n bk_image = sitk.ReadImage(bk_nifti)\n bk_image = sitk.GetArrayFromImage(bk_image)\n\n #Get Volume report from the seg_image, cubic ml, and the 95% CI:\n v_nonenhancing = round(seg_image[seg_image==1].sum()/1000,1)\n ci_nonenhancing = round(v_nonenhancing*0.2,1)\n v_enhancing = round(seg_image[seg_image==4].sum()/1000,1)\n ci_enhancing = round(v_enhancing*0.3,1)\n v_edema = round(seg_image[seg_image==2].sum()/1000,1)\n ci_edema = round(v_edema*0.1,1)\n\n #Loading the reference dicom, in order to get the headers of each slice. \n series_IDs = sitk.ImageSeriesReader.GetGDCMSeriesIDs(ref_dicom_dir)\n if not series_IDs:\n print(\"ERROR: given directory \\\"\"+data_directory+\"\\\" does not contain a DICOM series.\")\n sys.exit(1)\n\n series_file_names = sitk.ImageSeriesReader.GetGDCMSeriesFileNames(ref_dicom_dir, series_IDs[0])\n\n series_reader = sitk.ImageSeriesReader()\n series_reader.SetFileNames(series_file_names)\n\n # Configure the reader to load all of the DICOM tags (public+private):\n # By default tags are not loaded (saves time).\n # By default if tags are loaded, the private tags are not loaded.\n # We explicitly configure the reader to load tags, including the private ones.\n series_reader.MetaDataDictionaryArrayUpdateOn()\n series_reader.LoadPrivateTagsOn()\n ref_image = series_reader.Execute()\n \n #set reader for slice \n reader = sitk.ImageFileReader()\n reader.LoadPrivateTagsOn()\n \n writer = sitk.ImageFileWriter()\n # Use the study/series/frame of reference information given in the meta-data\n # dictionary and not the automatically generated information from the file IO\n writer.KeepOriginalImageUIDOn()\n\n # Copy some of the tags and add the relevant tags indicating the change.\n # For the series instance UID (0020|000e), each of the components is a number, cannot start\n # with zero, and separated by a '.' We create a unique series ID using the date and time. tags of interest:\n \n castFilter = sitk.CastImageFilter()\n castFilter.SetOutputPixelType(sitk.sitkInt16)\n ORG_ROOT=\"1.3.12.2\"\n #create SeriesInstanceUID and StudyInstanceUID\n SeriesInstanceUID = generateUID(org_root=ORG_ROOT)\n StudyInstanceUID = generateUID(org_root=ORG_ROOT)\n #create a prefix for the accession number\n acc='BTS'+series_reader.GetMetaData(0,\"0008|0050\")\n #changing spacing\n reader.SetFileName(series_file_names[0])\n reader.ReadImageInformation()\n\n if mode_RGB:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID),\n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID), \n (\"0028|0004\", 'RGB'),\n (\"0028|0002\", \"3\")]\n else:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID), \n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID)] \n\n os.makedirs(save_dir, exist_ok = True)\n\n #for nifti, the main axis is the first one, while for dicoms it is the last one\n for i in range(ref_image.GetDepth()):\n #zoom 2 times, todo need to figure out which axis to zoom, post is the 3rd\n #pre assume the first axis is the slice numbers\n bk_slice = ndimage.zoom(bk_image[i,:,:], zoom_num, order=0)\n seg_slice = ndimage.zoom(seg_image[i,:,:], zoom_num, order=0)\n \n #Due to the DICOM saving coordinate system is different with nifti,i.e mirrored, it is easier to flip array\n bk_slice = np.flip(bk_slice, (0, 1)) \n seg_slice = np.flip(seg_slice, (0, 1)) \n\n #get contours\n seg_idx = get_contours(seg_slice)\n \n #add watermarks\n if watermarks:\n canvas_tmp = np.zeros(list(bk_slice.shape), dtype=np.uint8)\n font = cv2.FONT_HERSHEY_PLAIN\n cv2.putText(canvas_tmp,'FOR RESEARCH ONLY;REFER TO OFFICIAL REPORT FOR DETAILS',(10,30), \n font,2,255,1)\n cv2.putText(canvas_tmp,'(This tool is intended for evaluation of gliomas, and results may be unreliable for other pathologies)',(90,50), \n font,1,255,1) \n #add Legend and volumes \n cv2.putText(canvas_tmp, 'Legend Volume(+/-95% CI)',(10,900), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Edema {v_edema}+/-{ci_edema} mL',(30,920), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Enhancing {v_enhancing}+/-{ci_enhancing} mL',(30,940), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Non- {v_nonenhancing}+/-{ci_nonenhancing} mL',(30,960), font,0.8,255,1)\n cv2.putText(canvas_tmp,'Enhancing', (30,975), font,0.8,255,1)\n cv2.putText(canvas_tmp,'(The error is based on testing of algorithm performance vs. manual segmentation)', (150,1000), font,1,255,1)\n\n \n \n #burning segmentation contour into slices\n cv2.line(seg_idx, (10,915), (20,915), 2, 2)\n cv2.line(seg_idx, (10,935), (20,935), 4, 2)\n cv2.line(seg_idx, (10,955), (20,955), 1, 2)\n \n if mode_RGB:\n #burning the watermarks\n bk_slice[canvas_tmp==255]=bk_slice.max()\n #convert dicom from nomogram to RGB\n bk_slice = toRGB(bk_slice)\n #colorize the bk_slice according to seg_idx\n bk_slice[0,:,:,0][seg_idx==1] = 255\n bk_slice[0,:,:,1][seg_idx==4] = 255\n bk_slice[0,:,:,2][seg_idx==2] = 255 \n else:\n #grey the ori_image_slice according to seg_idx\n bk_slice[canvas_tmp==255]=bk_slice.max()//2\n bk_slice[seg_idx==1] = bk_slice.max()*2//50\n bk_slice[seg_idx==2] = bk_slice.max()*1//50\n bk_slice[seg_idx==4] = bk_slice.max()*3//50\n\n converted_slice = sitk.GetImageFromArray(bk_slice)\n reader.SetFileName(series_file_names[i])\n reader.ReadImageInformation()\n spacing_new = [i/zoom_num for i in reader.GetSpacing()[:-1]] + [reader.GetSpacing()[-1]]\n \n #generate SOPInstanceUID\n SOPInstanceUID = generateUID(org_root=ORG_ROOT)\n series_tag_values = [(k, reader.GetMetaData(k)) for k in reader.GetMetaDataKeys()] + customized_tag_values + [(\"0008|0018\", SOPInstanceUID)]\n# print(series_tag_values)\n if '_seg_' in description:\n converted_slice = converted_slice \n \n # Tags shared by the series.\n for tag, value in series_tag_values:\n converted_slice.SetMetaData(tag, value)\n \n # especially set spacing tags\n # Image Position (Patient)\n converted_slice.SetMetaData(\"0020|0013\", str(i)) # Instance Number\n converted_slice.SetSpacing(spacing_new)\n \n # Write to the output directory and add the extension dcm, to force writing in DICOM format \n writer.SetFileName(os.path.join(save_dir, str(i)+'.dcm'))\n writer.Execute(converted_slice)", "def getimgs():", "def ToIco( self, forced_bpp_conversion, imagepaths, icopaths ):\n global flag_bit\n \n flag_bit = forced_bpp_conversion\n all_log_mess = []\n \n ## Checks icopaths.\n if not icopaths:\n log_err = 'Output: file/s missing\\n'\n return log_err\n else:\n if isinstance(icopaths, list):\n for path in icopaths:\n if path.lower().endswith('.ico'):\n idType = 1\n elif path.lower().endswith('.cur'):\n idType = 2\n else:\n log_err = 'Output: file \"%s\" with wrong file extension' %path\n return log_err\n else:\n log_err = 'Output: file/s not in a list\\n'\n return log_err\n \n ## Checks imagepaths.\n if not imagepaths:\n log_err = 'Input: file/s missing\\n'\n return log_err\n for ii, paths in enumerate(imagepaths):\n if isinstance(paths, list):\n if not paths:\n log_err = 'Input: file/s missing\\n'\n return log_err\n elif idType == 2 and len(paths) > 1:\n log_err = \"Input: can't create multi-size .cur\\n\"\n return log_err\n else:\n for path in paths:\n if not isfile(path):\n log_err = 'Input: file \"%s\" not exists\\n' %path\n return log_err\n else:\n log_err = 'Input: entry #%s is not a list\\n' %ii\n return log_err\n \n ## Do process.\n for path in zip(imagepaths, icopaths):\n all_log_mess.append(self.Build( path[0], path[1], idType ))\n \n return all_log_mess", "def convert(self):\n \n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n if len(vrtlist)!=0:\n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d mosaicked images were successfully converted to %d %s files.' % (dataCount, len(vrtlist), tifCount, str(self.outformat)))\n \n \n if len(vrtlist)==0: \n \n hdflist = sorted(glob.glob(self.fullPath + '/*.hdf'))\n for i in range(len(hdflist)):\n ms = pymodis.convertmodis_gdal.createMosaicGDAL(hdfnames = [hdflist[i]], subset = self.subset, outformat = 'GTiff')\n ms.run(str(hdflist[i].split('.h')[0]) + 'mos.tif')\n ms.write_vrt(output = str(hdflist[i].split('.h')[0]), separate = True)\n\n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n \n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/full*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d HDF files were successfully converted to %d %s files.' % (dataCount, len(hdflist), tifCount, str(self.outformat)))", "def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break", "def forward(self, rpn_rois, roidb, im_info):\n im_scales = im_info.data.numpy()[:, 2]\n\n # get_fast_rcnn_blob_names()\n output_blob_names = ['rois', \n 'labels_int32', 'bbox_targets', 'bbox_inside_weights', 'bbox_outside_weights',\n 'mask_rois', 'roi_has_mask_int32', 'masks_int32']\n \n # For historical consistency with the original Faster R-CNN\n # implementation we are *not* filtering crowd proposals.\n # This choice should be investigated in the future (it likely does\n # not matter).\n # Note: crowd_thresh=0 will ignore _filter_crowd_proposals\n self.add_proposals(roidb, rpn_rois, im_scales, crowd_thresh=0)\n blobs = {k: [] for k in output_blob_names}\n self.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n\n return blobs", "def transform_images(symbol_dict,\n gray=True,\n gauss_filter=-1,\n bilat_filter=-1,\n global_thresh=-1,\n adapt_thresh_mean=-1,\n adapt_thresh_gauss=-1,\n otsus=-1,\n laplacian=False,\n canny=-1,\n rescale_global_mean=False,\n resize=-1):\n \n for s in symbol_dict.values():\n for symb_img in s:\n if gray:\n gray_img = cv2.cvtColor(symb_img.img, cv2.COLOR_BGR2GRAY)\n symb_img.img = gray_img\n if gauss_filter != -1:\n blur_img = cv2.GaussianBlur(symb_img.img,\n (gauss_filter, gauss_filter),\n 0)\n symb_img.img = blur_img\n if bilat_filter != -1:\n bilat_img = cv2.bilateralFilter(symb_img.img,\n bilat_filter[0],\n bilat_filter[1],\n bilat_filter[2])\n symb_img.img - bilat_img\n if global_thresh != -1:\n ret, thresh_img = cv2.threshold(symb_img.img,\n global_thresh, 255,\n cv2.THRESH_BINARY)\n symb_img.img = thresh_img\n if adapt_thresh_mean != -1:\n thresh_img = cv2.adaptiveThreshold(symb_img.img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \\\n cv2.THRESH_BINARY, adapt_thresh_mean, 2)\n symb_img.img = thresh_img\n if adapt_thresh_gauss != -1:\n thresh_img = cv2.adaptiveThreshold(symb_img.img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \\\n cv2.THRESH_BINARY, adapt_thresh_gauss, 2)\n symb_img.img = thresh_img\n if otsus != -1:\n ret, thresh_img = cv2.threshold(\n symb_img.img, otsus, 255,\n cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n symb_img.img = thresh_img\n if laplacian:\n lap_img = cv2.Laplacian(symb_img.img, cv2.CV_64F)\n symb_img.img = lap_img\n if canny != -1:\n canny_img = cv2.Canny(symb_img.img, canny[0], canny[1])\n symb_img.img = canny_img\n # TODO: is normalizing before resizing correct?\n if rescale_global_mean:\n scaled_img = symb_img.img / 255.0\n symb_img.img = scaled_img - np.mean(scaled_img)\n if resize != -1:\n old_size = symb_img.img.shape[:2]\n\n delta_w = max(old_size) - old_size[1]\n delta_h = max(old_size) - old_size[0]\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\n left, right = delta_w // 2, delta_w - (delta_w // 2)\n\n color = [0, 0, 0]\n symb_img.img = cv2.copyMakeBorder(symb_img.img,\n top,\n bottom,\n left,\n right,\n cv2.BORDER_CONSTANT,\n value=color)\n\n symb_img.img = cv2.resize(symb_img.img, (resize, resize))", "def LIDC_to_niftis(extraction_results_dataframe, spacing=[1.0, 1.0, 1.0], debug=False):\n loop = map(\n lambda t: t[1][[\"extraction_location\", \"annotation_file\"]].values,\n extraction_results_dataframe.iterrows(),\n )\n progbar = tqdm.tqdm(\n loop, total=extraction_results_dataframe.shape[0], desc=\"Converting to NiFTIs...\"\n )\n converted_dicoms = Parallel(n_jobs=1, prefer=\"processes\")(\n delayed(convert_to_niftis)(*t, spacing=spacing) for t in progbar\n )\n initial_shape = extraction_results_dataframe.shape[0]\n extraction_results_dataframe = extraction_results_dataframe[converted_dicoms]\n final_shape = extraction_results_dataframe.shape[0]\n print(f\"{final_shape}/{initial_shape} DICOMs folders successfully converted.\")\n\n # Update config file\n config_file = get_config_file_path(dataset_name=\"fed_lidc_idri\", debug=debug)\n write_value_in_config(config_file, \"preprocessing_complete\", True)\n\n return extraction_results_dataframe", "def dicom_to_nrrd(self, dicom_root_dir, nrrd_files_dir):\n TEMP_FILE = '/Users/chunwei/Downloads/_TEMP'\n SYSTEM_COMMAND = 'gdcmconv -w {0} {1}'\n\n for i, subject_folder in enumerate(glob.glob(dicom_root_dir + '/*')):\n nrrd_file = nrrd_files_dir + '/'\\\n + re.search(self.KEY_WORD_FLODER, subject_folder).group()\\\n + '_%02d.nrrd' % (i + 1)\n print 'Processing ' + nrrd_file\n\n if not os.path.exists(nrrd_files_dir):\n os.makedirs(nrrd_files_dir)\n\n data_3d = None\n\n dicom_files = glob.glob(subject_folder + '/*')\n for j, dicom_file in enumerate(dicom_files):\n # prompt\n ratio = 100 * float(j)/float(len(dicom_files))\n sys.stdout.write('\\r%d%%' % ratio)\n sys.stdout.flush()\n\n # uncompress the dicom image\n command = SYSTEM_COMMAND.format(dicom_file, TEMP_FILE)\n call(command.split(), shell=False)\n\n # concatenate dicom image layer by layer\n ds = dicom.read_file(TEMP_FILE)\n data = ds.pixel_array\n data_3d = self.concatenate_layers(data_3d, data) # bottom up\n\n # get nrrd options\n options = self.load_dicom_options(TEMP_FILE, len(dicom_file))\n\n # transpose the data\n data_3d = numpy.swapaxes(data_3d, 0, 1)\n data_3d = data_3d[:, :, ::-1]\n\n # write the stack files in nrrd format\n nrrd.write(nrrd_file, data_3d, options)\n\n print", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def to_nifti(self,folder_path: str):\n data_path = settings.STORAGE_DIR\n path = folder_path \n nifti=series.get_series_object(path) \n nifti_str=str(nifti)\n nifti_str=nifti_str[1:44]\n if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT': \n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')\n if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':\n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n nifti.set_ExportType('suv')\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')", "def get_images(algorithm=None):\n if algorithm == \"RMA\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_RMA.npy\").item()\n #dates = np.load(\"Dates_RMA.npy\")\n for i in range(n_im):\n i += i_o # Empieza en la posicion 10\n data = RMA.main(\"dset_\"+str(i)+\".hdf5\")\n #Ims[10+i] = data['Sf_n']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_RMA/Im_\"+str(i)+\".npy\",data['Sf_n'])\n #np.save(\"Set_images_RMA\",Ims) # Para guardar el set de imagenes\n np.save(\"Parameters_RMA\",data)\n np.save(\"Dates_RMA\",np.array(dates))\n\n elif algorithm == \"BP\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_BP.npy\").item()\n #dates = np.load(\"Dates_BP.npy\")\n for i in range(n_im): #(4991):\n i += i_o # Empieza en la posicion 10\n data = BP.main(\"dset_\"+str(i)+\".hdf5\") \n #Ims[i] = data['Im']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_BP/Im_\"+str(i)+\".npy\",data['Im']) # Imagenes de todo el dataset\n np.save(\"Parameters_BP\",data) # Parametros geometricos como dimensiones y grilla de la imagen\n np.save(\"Dates_BP\",np.array(dates)) # Fechas de las iamgenes tomadas de todo el dset\n\n return 'Ok'", "def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs", "def masterflat(input_file):\n #set original directory\n original_path = os.getcwd()\n data_path = input_file['data_path']\n save_path = input_file['save_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n flat = glob.glob('flat*.fits')\n print 'Loading flat images \\nTotal of flat files = ',len(flat),'\\nFiles = \\n'\n print flat\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n os.system('cp flat*.fits '+save_path)\n #creating the names of flat with bias subctracted\n bflat = []\n for i in flat:\n bflat.append('B'+i)\n print '\\n Names os flat images with bias subtracted: \\n \\n',bflat\n #change for save_path directory\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superflat.fits') == True:\n os.system('rm superflat.fits')\n #verify if exits previous bflat*.fits files and remove then.\n for i in bflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n print '\\nCreating superflat .... \\n'\n #create the list of flat images and bflat images\n #flat = string.join(flat,',')\n #bflat = string.join(bflat,',')\n print '\\n Subtracting bias from flat images and creating bflat images.... \\n'\n #iraf.imarith()\n for i in range(len(flat)):\n iraf.imarith(flat[i],'-','superbias.fits',bflat[i])\n #print statistics from bflat*.fits images\n iraf.imstat(bflat[i])\n print '\\n .... done \\n'\n #clean previos flat*.fits files\n print '\\n Clean flat*.fits images .... \\n'\n os.system('rm flat*.fits')\n print '\\n .... done. \\n'\n #normalizing each flat\n print '\\nNormalizing each flat ....\\n'\n #checking if mean from numpy is the same from your bflat images using imstat\n #take the mean of each bflat image\n bflat_mean = np.zeros(len(bflat))\n for i in range(len(bflat)):\n image = fits.getdata(bflat[i])\n image = np.array(image,dtype='Float64')\n bflat_mean[i] = round(np.mean(image))\n image = 0 #clean image allocate to this variable\n print 'The mean of each bflat image, respectivaly ...'\n print bflat_mean\n #creating the names of bflat images after the normalization:\n abflat = []\n for i in bflat:\n abflat.append('A'+i)\n print '\\n Names os bflat images with bias subtracted and normalizad: \\n \\n',abflat\n #verify if exist previous ABflat*.fits images and remove then.\n for i in abflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n for i in range(len(abflat)):\n iraf.imarith(bflat[i],'/',bflat_mean[i],abflat[i])\n print '\\n.... done!\\n'\n # print '\\n Cleaning bflat*.fits images ....\\n'\n # os.system('rm Bflat*.fits')\n print '\\n.... done.\\n'\n print 'Statistics of the abflat*.fits images .... \\n'\n for i in range(len(abflat)):\n iraf.imstat(abflat[i])\n print '\\n Combining abflat images ....\\n'\n\n # ablist = string.join(abflat,',')\n # iraf.imcombine(ablist,'superflat.fits')\n #change how import flat files\n #usning the abflat list of flat files We will create a pandas python dataframe\n ablist = DataFrame(abflat)\n ablist.columns=['flat_files']\n ablist.to_csv('flat_list',index_label=False,index=False,header=False)\n #combine all flat images\n iraf.imcombine('@flat_list','superflat.fits')\n iraf.imstat('superflat.fits')\n print '\\n .... done. \\n'\n # print '\\nCleaning ABflat*.fits images ....\\n'\n # os.system('rm ABflat*.fits')\n print '\\n.... done!'\n #Verify if the image was created:\n output = glob.glob('superflat*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #last mensage\n print '\\n MASTERFLAT.FITS created! \\n'\n print '\\n END of Data Reduction for create a masterflat.fits file. \\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output", "def selfies2image(s):\n mol = MolFromSmiles(sf.decoder(s), sanitize=True)\n return Draw.MolToImage(mol)", "def convert_to_nifti(log, brain):\n log.info('Doing convert_to_nifti')\n cmdargs = split('3dAFNItoNIFTI {}'.format(brain))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def convert_image(rel_path_in, rel_path_out):\n #Lade Bild mit Originalmaske im Grayscale-Modus\n img = cv2.imread(rel_path_in, cv2.IMREAD_GRAYSCALE)\n #Jetzt steht in img ein 2D-Array/Matrix mit jedem Graufstufen-Wert der Pixel\n #Skaliere Pixelwerte runter\n for zeilen_index in range(0,img.__len__()):\n for spalten_index in range(0, img[zeilen_index].__len__()):\n #Hole Pixel-Wert an aktueller Stelle\n wert = img[zeilen_index][spalten_index]\n #Falls Wert != 0 (also Pixel gehoert nicht zum Hintergrund)\n if wert != 0: # != 0 statt == 255, da auch z.B. 253er Werte in den Masken existieren... (vielleicht durch Konvertierung in anderes Format?)\n #Markiere den Pixel mit 1 statt 255\n img[zeilen_index][spalten_index]=1\n #print(img)\n #*NACHDEM* alle Pixel skaliert wurden, zeichne Umrandung der Objekte\n umrandung_zeichnen(img)\n #change_color(img, 0, 255)\n #change_color(img, 1, 0)\n #print(img)\n #Schreibe Ergebnis-Bild in uebergebene Datei\n cv2.imwrite(rel_path_out, img)", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def test_BinaryDilation_interface(tmpdir):\n\n data = np.zeros((80, 80, 80), dtype=\"uint8\")\n data[30:-30, 35:-35, 20:-20] = 1\n\n nb.Nifti1Image(data, np.eye(4), None).to_filename(\"mask.nii.gz\")\n\n out1 = (\n BinaryDilation(\n in_mask=str(Path(\"mask.nii.gz\").absolute()),\n radius=4,\n )\n .run()\n .outputs.out_mask\n )\n shutil.move(out1, \"large_radius.nii.gz\")\n\n out2 = (\n BinaryDilation(\n in_mask=str(Path(\"mask.nii.gz\").absolute()),\n radius=1,\n )\n .run()\n .outputs.out_mask\n )\n shutil.move(out2, \"small_radius.nii.gz\")\n\n out_final = (\n BinarySubtraction(\n in_base=str(Path(\"large_radius.nii.gz\").absolute()),\n in_subtract=str(Path(\"small_radius.nii.gz\").absolute()),\n )\n .run()\n .outputs.out_mask\n )\n\n out_data = np.asanyarray(nb.load(out_final).dataobj, dtype=\"uint8\")\n\n assert np.all(out_data[data] == 0)", "def get_bayer_images(self) -> typing.List[np.ndarray]:\n return [rbg_to_bayer_bg(c.get_image()) for c in self.cameras]", "def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')", "def preprocess_nico(path: Path) -> None:\n for superclass in (\"animals\", \"vehicles\"):\n superclass_dir = path / superclass\n for class_dir in superclass_dir.glob(\"*\"):\n for context_dir in class_dir.glob(\"*\"):\n images_paths: list[Path] = []\n for ext in (\"jpg\", \"jpeg\", \"png\", \"gif\"):\n images_paths.extend(context_dir.glob(f\"**/*.{ext}\"))\n for counter, image_path in enumerate(images_paths):\n try:\n image = Image.open(image_path)\n if image.format == \"GIF\":\n image = image.convert(\"RGBA\")\n # Convert from gif to jpeg by extracting the first frame\n new_image = _gif_to_jpeg(image)\n new_image_path = image_path.with_suffix(\".jpg\")\n # Delete the original gif\n image_path.unlink()\n new_image.save(new_image_path, \"JPEG\")\n assert new_image_path.exists()\n image_path = new_image_path\n\n concept = image_path.parent.parent.stem\n context = image_path.parent.stem\n new_name = (\n image_path.parent\n / f\"{concept}_{context}_{counter:04}{image_path.suffix}\".replace(\n \" \", \"_\"\n )\n )\n image_path.rename(new_name)\n # Image is corrupted - delete it\n except UnidentifiedImageError:\n image_path.unlink()", "def create_brainmask(registered_images, truncate_intensity=(.01, .99), verbose=True, antsxnet_cache_directory=None):\n\n preprocessed_image = ants.image_clone(registered_images)\n if antsxnet_cache_directory is None:\n antsxnet_cache_directory = \"ANTsXNet\"\n\n # Truncate intensity\n if truncate_intensity is not None:\n quantiles = (preprocessed_image.quantile(truncate_intensity[0]),\n preprocessed_image.quantile(truncate_intensity[1]))\n if verbose:\n print(\"Preprocessing: truncate intensities ( low =\", quantiles[0], \", high =\", quantiles[1], \").\")\n\n preprocessed_image[preprocessed_image < quantiles[0]] = quantiles[0]\n preprocessed_image[preprocessed_image > quantiles[1]] = quantiles[1]\n\n # Brain extraction\n if verbose:\n print(\"Preprocessing: brain extraction.\")\n probability_mask = antspynet.brain_extraction(preprocessed_image,\n antsxnet_cache_directory=antsxnet_cache_directory,\n verbose=verbose)\n mask = ants.threshold_image(probability_mask, 0.5, 1, 1, 0)\n\n return preprocessed_image, mask" ]
[ "0.5878483", "0.5656656", "0.5646914", "0.5597715", "0.5594963", "0.55353117", "0.5508064", "0.54495406", "0.54446286", "0.5424856", "0.5355759", "0.5332224", "0.5326884", "0.53148586", "0.52936196", "0.5291717", "0.52617556", "0.5258072", "0.5257662", "0.525628", "0.52407", "0.52356297", "0.5216156", "0.5215271", "0.518692", "0.5172915", "0.516383", "0.51601756", "0.51597375", "0.51367646" ]
0.5975682
0
Create the fieldmap(s) and the corresponding magnitude images.
def MakeFieldmaps(self): if self.verbose: print 'Compute fieldmaps.' for entry in self.info: if self.info[entry]['type'] == 'fmap': if self.info[entry]['imgfile'] == None: # Fieldmap data not found. return # Make a magnitude image for use in checking registration. cmd = 'convert_file -f0 -m0 %s %s nii' % \ (entry, self.info[entry]['magfile']) self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii']) # Make fieldmap. Use separate loop in case make_fmap aborts. for entry in self.info: if self.info[entry]['type'] == 'fmap': fmapname = self.info[entry]['imgfile'] if not os.path.exists('%s.nii' % fmapname) or self.redo: # Couldn't find or existing fmap, compute a new one. if self.verbose: extra_args = '-v' else: extra_args = '' if self.info[entry]['correct_fmap_phase'] == 'force': extra_args += ' --force-slicecorr' elif self.info[entry]['correct_fmap_phase'] == 'omit': extra_args += ' --omit-slicecorr' cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname) # error = self.ExecCmd(cmd, halt_on_error=False) if self.no_fmapcorr: halt_on_error = False else: halt_on_error = True error = self.CheckExec(cmd, ['%s.nii' % fmapname], \ halt_on_error=halt_on_error) if error: self.info[entry]['valid'] = False del self.fmaps[entry]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP", "def _get_magnitudes(self):\n\n self.logging.debug('Get magnitudes ' )\n\n self.mags = {}\n\n steps = ['dbopen netmag', 'dbsubset orid != NULL']\n\n fields = ['orid', 'magid', 'magnitude', 'magtype',\n 'auth', 'uncertainty', 'lddate']\n\n for v in extract_from_db(self.db, steps, fields):\n orid = v.pop('orid')\n self.logging.debug('new mag for orid:%s' % orid)\n\n try:\n v['strmag'] = '%0.1f %s' % ( float(v['magnitude']), v['magtype'] )\n except:\n v['strmag'] = '-'\n\n if not orid in self.mags:\n self.mags[ orid ] = {}\n\n self.mags[ orid ][ v['magid'] ] = v", "def _SetFmapInfo(self):\n for epi in self.pfiles + self.epirt_paths:\n self.info[epi]['fmapname'] = None\n self.info[epi]['fmap_entry'] = None\n for entry in self.entry_map['fmap']:\n fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix']\n if self.info[entry]['plane'] == self.info[epi]['plane']:\n# Use the fieldmap acquired at the same plane.\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n else:\n# for fmap in self.fmaps.keys():\n for entry in self.entry_map['fmap']:\n# No fmap at same orientation, look for fmaps in other planes.\n# There won't be more than one, so it isn't much of a choice.\n fmap_name = self.info[entry]['imgfile'] + \\\n self.info[entry]['suffix']\n if self.info[entry]['plane'] == 'sagittal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'axial':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'coronal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'oblique':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n self.info[epi]['plane'] = 'oblique'\n break", "def _fractalize(self, f, compMap):\n\n from PIL import Image\n\n def toImage(cmObject):\n \"\"\"cmObject is the ComplexMap instance\"\"\"\n size = self.gridsize, self.gridsize\n cm = cmObject()\n master = []\n for item in cm:\n master.extend(item)\n\n #Apply default Mandelbrot Set Function\n master = map(f, master)\n\n col1 = (0,0,102,0)\n col2 = (255,204,51,0)\n\n def select_color(x):\n if x == 1: return col1\n else: return col2\n\n master = map(select_color, master)\n \n image = Image.new(\"RGBA\", size, (0,0,0,0))\n image.putdata(master)\n return image\n\n image_width = 0\n image_height = 0\n image_list = []\n #Unpack row\n for (y, row) in enumerate(compMap):\n image_row = []\n\n #Unpack columns\n for item in row:\n #Unpack the individual\n image_row.append(toImage(item))\n\n width = len(image_row) * self.gridsize\n height = self.gridsize\n row_holder_image = Image.new(\"RGBA\", (width, height), (0,0,0,0)) \n\n for (n, image) in enumerate(image_row):\n row_holder_image.paste(image, ((n*self.gridsize),0))\n\n image_list.append(row_holder_image)\n \n image_width = width\n image_height = len(image_list) * self.gridsize\n\n image_whole = Image.new(\"RGBA\", (image_width, image_height), (0,0,0,0))\n for (n, image) in enumerate(image_list):\n image_whole.paste(image, (0, (n*self.gridsize)))\n image_whole.save(\"fractal.jpg\", \"JPEG\")\n\n return", "def update_maps(self):\n if self.fmodel is None:\n return\n def fft_map(map_coeffs, resolution_factor = 0.25):\n return map_coeffs.fft_map(resolution_factor = resolution_factor,\n ).apply_sigma_scaling().real_map_unpadded()\n map_types = [\"2mFo-DFc\", \"mFo-DFc\"]\n map_keys = [\"2mFo-DFc\", \"mFo-DFc\"]\n if (self.fmodel.f_obs().anomalous_flag()):\n if (self.params.anom_map_type == \"phaser\"):\n map_types.append(\"llg\")\n elif (self.params.anom_map_type == \"residual\"):\n map_types.append(\"anom_residual\")\n else :\n map_types.append(\"anom\")\n map_keys.append(\"anom\")\n if (self.use_svm):\n map_types.append(\"mFo\")\n map_keys.append(\"mFo\")\n # To save memory, we sample atomic positions immediately and throw out\n # the actual maps (instead of keeping up to 3 in memory)\n sites_frac = self.xray_structure.sites_frac()\n sites_cart = self.xray_structure.sites_cart()\n self._principal_axes_of_inertia = [ None ] * len(sites_frac)\n self._map_variances = [ None ] * len(sites_frac)\n self._map_gaussian_fits = {}\n self.calpha_mean_two_fofc = 0\n for map_type, map_key in zip(map_types, map_keys):\n real_map = self.get_map(map_type)\n if (real_map is not None):\n # Gather values for map peaks at each site\n self._map_values[map_key] = flex.double(sites_frac.size(), 0)\n self._map_gaussian_fits[map_key] = [ None ] * len(sites_frac)\n for i_seq, site_frac in enumerate(sites_frac):\n atom = self.pdb_atoms[i_seq]\n resname = atom.fetch_labels().resname.strip().upper()\n if (resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED or\n atom.segid.strip().upper() in [\"ION\"]):\n value = real_map.eight_point_interpolation(site_frac)\n self._map_values[map_key][i_seq] = value\n if (self.use_svm):\n gaussian_fit = utils.fit_gaussian(\n unit_cell=self.unit_cell,\n site_cart=atom.xyz,\n real_map=real_map)\n self._map_gaussian_fits[map_key][i_seq] = gaussian_fit\n\n if map_type in [\"2mFo-DFc\"]:\n # Gather values on map variance and principal axes of interia\n from cctbx import maptbx\n for i_seq, site_cart in enumerate(sites_cart):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n if resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED:\n # XXX not totally confident about how I'm weighting this...\n p_a_i = maptbx.principal_axes_of_inertia(\n real_map = real_map,\n site_cart = site_cart,\n unit_cell = self.unit_cell,\n radius = self.params.map_sampling_radius)\n self._principal_axes_of_inertia[i_seq] = p_a_i\n variance = maptbx.spherical_variance_around_point(\n real_map = real_map,\n unit_cell = self.unit_cell,\n site_cart = site_cart,\n radius = self.params.map_sampling_radius)\n self._map_variances[i_seq] = variance\n elif (i_seq in self.calpha_sel):\n # Also collect some info in average C_alpha 2FoFc peak heights\n self.calpha_mean_two_fofc += real_map.eight_point_interpolation(\n sites_frac[i_seq])\n del real_map\n\n if (self.calpha_mean_two_fofc > 0):\n n_calpha = len(self.calpha_sel)\n assert (n_calpha > 0)\n self.calpha_mean_two_fofc /= n_calpha\n\n # Gather info on carbons' average Fo peak height for use in estimating other\n # sites' atomic weight\n self.carbon_fo_values = None\n if (len(self.carbon_sel) > 0):\n self.carbon_fo_values = flex.double()\n self._map_values[\"mFo\"] = flex.double(sites_frac.size(), 0)\n fo_map = fft_map(self.fmodel.map_coefficients(\n map_type = \"mFo\",\n exclude_free_r_reflections = True,\n fill_missing = True))\n\n for i_seq, site_frac in enumerate(sites_frac):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n element = self.pdb_atoms[i_seq].element.strip()\n if (element == \"C\") or ((element == \"O\") and (resname in WATER_RES_NAMES)):\n map_value = fo_map.eight_point_interpolation(site_frac)\n self._map_values[\"mFo\"][i_seq] = map_value\n if (element == \"C\"):\n self.carbon_fo_values.append(map_value)\n del fo_map", "def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def recon(self, spirec):\n tmpdir = tempfile.mkdtemp()\n basename = 'recon'\n basepath = os.path.join(tmpdir, basename)\n pfilename = os.path.abspath(self.pfilename)\n\n # run spirec to get the mag file and the fieldmap file\n cmd = spirec + ' -l --rotate -90 --magfile --savefmap2 --b0navigator -r ' + pfilename + ' -t ' + basename\n self.log and self.log.debug(cmd)\n sp.call(shlex.split(cmd), cwd=tmpdir, stdout=open('/dev/null', 'w'))\n\n self.image_data = np.fromfile(file=basepath+'.mag_float', dtype=np.float32).reshape([self.size_x,self.size_y,self.num_timepoints,self.num_echoes,self.num_slices],order='F').transpose((0,1,4,2,3))\n if os.path.exists(basepath+'.B0freq2') and os.path.getsize(basepath+'.B0freq2')>0:\n self.fm_data = np.fromfile(file=basepath+'.B0freq2', dtype=np.float32).reshape([self.size_x,self.size_y,self.num_echoes,self.num_slices],order='F').transpose((0,1,3,2))\n shutil.rmtree(tmpdir)", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def get_sigma_map(start_x = 0,field_height=100,field_width=100,viewing_distance=12.0,screen_pixel_size=0.282,debug=False):\n start_x_pixels = np.round(get_pixels_at_degrees(degrees=start_x,viewing_distance=viewing_distance,screen_pixel_size=screen_pixel_size))\n optical_nodal_distance = 17.0 # mm from lens to fovea\n viewing_distance_inches = viewing_distance\n viewing_distance = viewing_distance * 25.4 # mm\n center_y, center_x = 0,0\n x_coords = (start_x_pixels + np.arange(-field_width/2.0,field_width/2,1))*screen_pixel_size\n y_coords = np.arange(-field_height/2.0,field_height/2,1)*screen_pixel_size\n x,y = np.meshgrid(x_coords,y_coords)\n coords = np.vstack((y.ravel(),x.ravel())).T\n\n image_dist = cdist(np.matrix([center_y,center_x]),coords)\n fovea_dist = (np.pi/180.0)*optical_nodal_distance*get_degrees_at_pixels(pixels=image_dist/screen_pixel_size,viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n midget_dendritic_field_diameter_micrometers = 8.64 * np.power(fovea_dist,1.04) # midget from Dacey and Peterson, 1994\n midget_dendritic_field_diameter_millimeters = midget_dendritic_field_diameter_micrometers/1000.0\n midget_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n\n midget_sigma_map = midget_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n midget_sigma_map = midget_sigma_map.reshape((field_height,field_width))\n\n parasol_dendritic_field_diameter_micrometers = 70.2 * np.power(fovea_dist,0.65) # parasol from Dacey and Peterson, 1994\n parasol_dendritic_field_diameter_millimeters = parasol_dendritic_field_diameter_micrometers/1000.0\n parasol_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n parasol_sigma_map = parasol_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n parasol_sigma_map = parasol_sigma_map.reshape((field_height,field_width))\n\n return midget_sigma_map,parasol_sigma_map", "def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)", "def render_map(self):\n # first we create a blank image, on which we will draw the base map\n width = self.image_size[0]\n height = self.image_size[1]\n # ex: size of the image 1080 height, 1920 width, 3 channels of colour\n base_map = np.zeros((height, width, 3), np.uint8)\n base_map[:, :] = self.background_color\n\n # we draw each shape of the dictionary on the blank image\n for shape_id in self.shape_dict_filt:\n shape = self.shape_dict_filt[shape_id]\n points = shape.points\n pts = np.array(points, np.int32)\n cv2.polylines(base_map, [pts], True, shape.color_line,\n shape.line_thick, cv2.LINE_AA)\n\n self.map_file = base_map", "def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def field_map(ar_field, ar_coorx, ar_coory, X, picture_out, title, flip=0):\n max_val=max(ar_field)\n\n xmin=min(ar_coorx);xmax=max(ar_coorx)\n ymin=min(ar_coory);ymax=max(ar_coory)\n step=X\n nx=(xmax-xmin)/step+1\n ny=(ymax-ymin)/step+1\n\n ar_indx=np.array((ar_coorx-xmin)/step,int)\n ar_indy=np.array((ar_coory-ymin)/step,int)\n\n ar_map=np.ones((ny,nx))*-99.9\n ar_map[ar_indy,ar_indx]=ar_field\n\n if flip==1:\n ar_map=np.flipud(ar_map)\n\n ar_map2 = ma.masked_where(ar_map <0, ar_map)\n\n\n ut.check_file_exist(picture_out)\n\n pl.clf()\n pl.imshow(ar_map2, interpolation='Nearest',\n origin='lower', vmax=max_val,vmin=0)\n\n pl.title(title)\n pl.colorbar()\n pl.savefig(picture_out)", "def compose_fieldmap(rf1, rf2):\n offset1, size1, step1 = rf1\n offset2, size2, step2 = rf2\n\n size = tuple((size2c - 1) * step1c + size1c\n for size1c, step1c, size2c in zip(size1, step1, size2))\n offset = tuple(offset2c * step1c + offset1c\n for offset2c, step1c, offset1c in zip(offset2, step1, offset1))\n step = tuple(step2c * step1c\n for step1c, step2c in zip(step1, step2))\n return (offset, size, step)", "def _createMap(self,dimensions, density):\n compMap = []\n xmin, xmax = dimensions[0], dimensions[1]\n imin, imax = dimensions[2], dimensions[3]\n\n #Ideally the hsteps and the vsteps are the same\n hsteps = int((xmax - xmin)/density)\n vsteps = int((imax - imin)/density)\n\n for im in range(vsteps):\n compMap.append([])\n for x in range(hsteps):\n myComplexPair = complex(xmin + (density * x), imin + (density * im))\n compMap[im].append(myComplexPair)\n compMap.reverse()\n return compMap", "def plot_maps(self, mode=0, target=1, gfilter=0):\r\n\r\n mpl.figure(1)\r\n\r\n mpl.imshow(self.avgimg, cmap=matplotlib.cm.gray, interpolation=None) # scipy.ndimage.gaussian_filter(ampmap, filter, order=0, mode='reflect'), cmap=matplotlib.cm.gray)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.title('Average image')\r\n\r\n print ('target, mode: ', target, mode)\r\n\r\n max1 = np.amax(self.amplitudeImage1)\r\n\r\n if target > 1:\r\n\r\n max1 = np.amax([max1, np.amax(self.amplitudeImage2)])\r\n\r\n max1 = 10.0*int(max1/10.0)\r\n\r\n mpl.figure(2)\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n ipy0, posl, coll = self.plot_averaged_amplitude()\r\n\r\n\r\n\r\n mpl.subplot(2,2,1)\r\n\r\n self.plot_amplitude_map(self.amplitudeImage1, max1, 'Amplitude Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,3)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n for i, px in enumerate(posl):\r\n\r\n mpl.plot(px, self.ipy+ipy0, 'o-', markersize=5.0, markerfacecolor = coll[i], markeredgecolor='w')\r\n\r\n if target > 1:\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,2)\r\n\r\n self.plot_fft()\r\n\r\n \r\n\r\n mpl.figure(3)\r\n\r\n mpl.title('Phase across center horizontally')\r\n\r\n # extract middle line\r\n\r\n sh = self.phaseImage1.shape\r\n\r\n iy0 = int(sh[1]/2)\r\n\r\n mpl.plot(self.phaseImage1[iy0, :], 'ko-')\r\n\r\n return\r\n\r\n \r\n\r\n if mode == 0:\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.n_times, D[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.DF[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')\r\n\r\n\r\n\r\n if mode == 1 and target > 1:\r\n\r\n \r\n\r\n mpl.subplot(2,3,2)\r\n\r\n mpl.title('Amplitude Map2')\r\n\r\n #scipy.ndimage.gaussian_filter(self.amplitudeImage2, 2, order=0, output=self.amplitudeImage2, mode='reflect')\r\n\r\n imga2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.amplitudeImage2, gfilter, order=0, mode='reflect'))\r\n\r\n imga2.set_clim = (0.0, max1)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.subplot(2,3,5)\r\n\r\n imgp2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect'), cmap=matplotlib.cm.hsv)\r\n\r\n mpl.colorbar()\r\n\r\n imgp2.set_clim=(-np.pi/2.0, np.pi/2.0)\r\n\r\n mpl.title('Phase Map2')\r\n\r\n # doubled phase map\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n #scipy.ndimage.gaussian_filter(self.phaseImage2, 2, order=0, output=self.phaseImage2, mode='reflect')\r\n\r\n np1 = scipy.ndimage.gaussian_filter(self.phaseImage1, gfilter, order=0, mode='reflect')\r\n\r\n np2 = scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect')\r\n\r\n dphase = np1 + np2\r\n\r\n #dphase = self.phaseImage1 - self.phaseImage2\r\n\r\n \r\n\r\n #scipy.ndimage.gaussian_filter(dphase, 2, order=0, output=dphase, mode='reflect')\r\n\r\n imgpdouble = mpl.imshow(dphase, cmap=matplotlib.cm.hsv)\r\n\r\n mpl.title('2x Phi map')\r\n\r\n mpl.colorbar()\r\n\r\n imgpdouble.set_clim=(-np.pi, np.pi)\r\n\r\n\r\n\r\n if mode == 2 or mode == 1:\r\n\r\n if self.phasex == []:\r\n\r\n self.phasex = np.random.randint(0, high=self.DF.shape[1], size=self.DF.shape[1])\r\n\r\n self.phasey = np.random.randint(0, high=self.DF.shape[2], size=self.DF.shape[2])\r\n\r\n\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n sh = self.DF.shape\r\n\r\n spr = sh[2]/self.nPhases\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n Dm = self.avgimg[i*spr,i*spr] # diagonal run\r\n\r\n mpl.plot(self.n_times, 100.0*(self.DF[:,self.phasex[i], self.phasey[i]]/Dm))\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n if mode == 2:\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n sh = self.DF.shape\r\n\r\n x0 = int(sh[1]/2)\r\n\r\n y0 = int(sh[2]/2)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(self.DF[1:,x0,y0])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')", "def __init__(self, mapfile, camera=None, light=None,\r\n width=100.0, depth=100.0, height=10.0,\r\n divx=0, divy=0, ntiles=1.0, name=\"\",\r\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\r\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n if divx > 200 or divy > 200:\r\n print(\"... Map size can't be bigger than 200x200 divisions\")\r\n divx = 200\r\n divy = 200\r\n if issubclass(type(mapfile), type(\"\")): #HORRIBLE. Only way to cope with python2v3\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n if VERBOSE:\r\n print(\"Loading height map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n else:\r\n im = mapfile #allow image files to be passed as mapfile\r\n ix, iy = im.size\r\n if (ix > 200 and divx == 0) or (divx > 0):\r\n if divx == 0:\r\n divx = 200\r\n divy = 200\r\n im = im.resize((divx, divy), Image.ANTIALIAS)\r\n ix, iy = im.size\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n self.pixels = im.load()\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.ix = ix\r\n self.iy = iy\r\n self.ttype = GL_TRIANGLE_STRIP\r\n\r\n if VERBOSE:\r\n print(\"Creating Elevation Map ...\", ix, iy)\r\n\r\n wh = width * 0.5\r\n hh = depth * 0.5\r\n ws = width / ix\r\n hs = depth / iy\r\n ht = height / 255.0\r\n tx = 1.0*ntiles / ix\r\n ty = 1.0*ntiles / iy\r\n\r\n verts = []\r\n norms = []\r\n tex_coords = []\r\n idx = []\r\n\r\n for y in xrange(0, iy):\r\n for x in xrange(0, ix):\r\n hgt = (self.pixels[x, y])*ht\r\n this_x = -wh + x*ws\r\n this_z = -hh + y*hs\r\n if cubic:\r\n \"\"\" this is a bit experimental. It tries to make the map either zero\r\n or height high. Vertices are moved 'under' adjacent ones if there is\r\n a step to make vertical walls. Goes wrong in places - mainly because\r\n it doesn't check diagonals\r\n \"\"\"\r\n if hgt > height / 2:\r\n hgt = height\r\n else:\r\n hgt = 0.0\r\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\r\n if self.pixels[x-1, y] > 127:\r\n this_x = -wh + (x-1)*ws\r\n elif self.pixels[x+1, y] > 127:\r\n this_x = -wh + (x+1)*ws\r\n elif self.pixels[x, y-1] > 127:\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x, y+1] > 127:\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x-1, y-1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x-1, y+1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x+1, y-1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x+1, y+1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y+1)*hs\r\n verts.append((this_x, hgt, this_z))\r\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\r\n\r\n s = 0\r\n #create one long triangle_strip by alternating X directions\r\n for y in range(0, iy-1):\r\n for x in range(0, ix-1):\r\n i = (y * ix)+x\r\n idx.append((i, i+ix, i+ix+1))\r\n idx.append((i+ix+1, i+1, i))\r\n s += 2\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))", "def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))", "def add_field(self, img_dict):\n for k in img_dict.keys():\n assert k in self.bands, \"Celeste model doesn't support band %s\"%k\n self.field_list.append(Field(img_dict))", "def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks", "def generate_winstonlutz_multi_bb_multi_field(\n simulator: Simulator,\n field_layer: type[Layer],\n dir_out: str,\n field_offsets: list[list[float]],\n bb_offsets: list[list[float]] | list[dict[str, float]],\n field_size_mm: tuple[float, float] = (20, 20),\n final_layers: list[Layer] | None = None,\n bb_size_mm: float = 5,\n image_axes: ((int, int, int), ...) = (\n (0, 0, 0),\n (90, 0, 0),\n (180, 0, 0),\n (270, 0, 0),\n ),\n gantry_tilt: float = 0,\n gantry_sag: float = 0,\n clean_dir: bool = True,\n jitter_mm: float = 0,\n align_to_pixels: bool = True,\n) -> list[str]:\n if not osp.isdir(dir_out):\n os.mkdir(dir_out)\n if clean_dir:\n for pdir, _, files in os.walk(dir_out):\n [os.remove(osp.join(pdir, f)) for f in files]\n file_names = []\n for gantry, coll, couch in image_axes:\n sim_single = copy.copy(simulator)\n for field_offset in field_offsets:\n offset_mm_left = field_offset[0] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_up = field_offset[1] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_in = -field_offset[2] + random.uniform(\n -jitter_mm, jitter_mm\n ) # negative because pixels increase as we go out, so to go in we subtract\n long_offset = bb_projection_long(\n offset_in=offset_mm_in,\n offset_up=offset_mm_up,\n offset_left=offset_mm_left,\n sad=1000,\n gantry=gantry,\n )\n gplane_offset = bb_projection_gantry_plane(\n offset_left=offset_mm_left,\n offset_up=offset_mm_up,\n sad=1000,\n gantry=gantry,\n )\n long_offset += gantry_tilt * cos(gantry)\n gplane_offset += gantry_sag * sin(gantry)\n if align_to_pixels:\n long_offset = pixel_align(sim_single.pixel_size, long_offset)\n gplane_offset = pixel_align(sim_single.pixel_size, gplane_offset)\n sim_single.add_layer(\n field_layer(\n field_size_mm=field_size_mm,\n cax_offset_mm=(long_offset, gplane_offset),\n )\n )\n for offset in bb_offsets:\n if isinstance(offset, dict):\n offset_mm_left = offset[\"offset_left_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_up = offset[\"offset_up_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_in = -offset[\"offset_in_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n else:\n offset_mm_left = offset[0] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_up = offset[1] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_in = -offset[2] + random.uniform(\n -jitter_mm, jitter_mm\n ) # negative because pixels increase as we go out, so to go in we subtract\n\n long_offset = bb_projection_long(\n offset_in=offset_mm_in,\n offset_up=offset_mm_up,\n offset_left=offset_mm_left,\n sad=1000,\n gantry=gantry,\n )\n gplane_offset = bb_projection_gantry_plane(\n offset_left=offset_mm_left,\n offset_up=offset_mm_up,\n sad=1000,\n gantry=gantry,\n )\n if align_to_pixels:\n long_offset = pixel_align(sim_single.pixel_size, long_offset)\n gplane_offset = pixel_align(sim_single.pixel_size, gplane_offset)\n sim_single.add_layer(\n PerfectBBLayer(\n cax_offset_mm=(\n long_offset,\n gplane_offset,\n ),\n bb_size_mm=bb_size_mm,\n )\n )\n if final_layers is not None:\n for layer in final_layers:\n sim_single.add_layer(layer)\n file_name = f\"WL G={gantry}, C={coll}, P={couch}; Field={field_size_mm}mm (shifts={field_offsets}); BB={bb_size_mm}mm @ left={offset_mm_left:.2f}, in={offset_mm_in:.2f}, up={offset_mm_up:.2f}; Gantry tilt={gantry_tilt}, Gantry sag={gantry_sag}.dcm\"\n sim_single.generate_dicom(\n osp.join(dir_out, file_name),\n gantry_angle=gantry,\n coll_angle=coll,\n table_angle=couch,\n )\n file_names.append(file_name)\n return file_names", "def analysis_dFF_map(self):\r\n\r\n \r\n\r\n print ('Starting dF/F analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # smoothwin = int(self.imageData.shape[1]/8.)\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n \r\n\r\n mpl.figure(99)\r\n\r\n mpl.imshow(avgimg, vmin=0, vmax=np.max(np.max(avgimg, axis=0), axis=0))\r\n\r\n # self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n # self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n imgdatasm = scipy.ndimage.filters.gaussian_filter(self.imageData,[0,2,2],order=0,output=None,mode='reflect',cval=0.0,truncate=4.0)\r\n # field correction: smooth the average image, subtract it from the imagedata, then add back the mean value\r\n avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n #self.imageData = (self.imageData-avgimgsm)+ self.meanimagevalue\r\n\r\n mpl.figure(98)\r\n mpl.imshow(avgimgsm,vmin=0, vmax=np.max(np.max(avgimgsm, axis=0), axis=0))\r\n mpl.figure(97)\r\n mpl.imshow(np.mean(imgdatasm,axis=0))\r\n self.n_times = self.timebase\r\n\r\n periodsize = int(self.period*self.framerate)\r\n print('periodsize: ',periodsize)\r\n\r\n # windowsize = int(self.freqperiod*self.framerate) # window size for every response\r\n\r\n # r = range(0, self.imageData.shape[0], windowsize)\r\n\r\n sig = np.reshape(imgdatasm, (self.nrepetitions, periodsize, \r\n\r\n self.imageData.shape[1], self.imageData.shape[2]), order='C')\r\n\r\n delresp=np.zeros([19,256,256])\r\n repback = np.mean(sig[:,1:4,:,:],axis=1)\r\n resp = np.mean(sig[:,5:9,:,:],axis=1)\r\n for counter in range(19):\r\n delresp[counter,:,:]=(resp[counter,:,:]-repback[counter,:,:])/repback[counter,:,:]\r\n quot=np.mean(delresp,axis=0)\r\n quot=-quot\r\n print ('shape of quot: ', np.shape(quot))\r\n # quot=(resp-repback)/repback\r\n # quot[quot>0]=0\r\n # quot=-1000*quot\r\n\r\n mpl.figure(7)\r\n mpl.imshow(quot,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n\r\n quotsm = scipy.ndimage.filters.gaussian_filter(quot, 3, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n mpl.figure(8)\r\n mpl.imshow(quotsm,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n \r\n # bl = np.mean(sig[:, range(0, sig.shape[1], windowsize), :, :], axis=0)\r\n\r\n # bl = scipy.ndimage.filters.gaussian_filter(bl, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n\r\n\r\n # print (' windowsize: ', windowsize)\r\n\r\n # print (' periodsize: ', periodsize)\r\n # mc = matplotlib.cm\r\n\r\n # only use sequential maps here\r\n\r\n # clist = [mc.Reds, mc.YlOrBr, mc.Oranges, mc.Greens, mc.GnBu, mc.Blues, mc.RdPu, mc.Purples,mc.Reds,mc.Greens,mc.Blues,mc.Reds,mc.Reds,mc.Reds,mc.Reds]\r\n # clist2 = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'black','red','purple','green','blue','red','red','red','red']\r\n\r\n cs = {}\r\n\r\n # sigd = np.zeros((bl.shape[0], sig.shape[2], sig.shape[3]))\r\n# \r\n # localmax = {}\r\n\r\n # sigmax = 0.\r\n# \r\n # kernel = np.ones((5, 5))\r\n\r\n # psf = kernel / np.sum(kernel)\r\n\r\n # compute dF/F, and get maximum over all frequencies\r\n\r\n print (' sig shape: ', sig.shape)\r\n\r\n # print (' bl shape: ', bl.shape)\r\n\r\n # smax = np.zeros(bl.shape[0])\r\n\r\n # for i in range(bl.shape[0]):\r\n\r\n # sigd[i] = (np.mean(np.max(sig[:,range(i*windowsize, i*windowsize+windowsize),:,:], axis=0), axis=0) - bl[i,:,:])/bl[i,:,:]\r\n\r\n # sigd[i] = sigd[i]**2.0\r\n\r\n # smooth\r\n\r\n #sigd[i] = scipy.ndimage.filters.gaussian_filter(sigd[i], 1., order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # deconvolve\r\n\r\n # sigd[i] = restoration.richardson_lucy(sigd[i], psf, 5)\r\n\r\n# sm = sigd[i].max().max()\r\n\r\n# if sm > sigmax:\r\n\r\n# sigmax = sm\r\n\r\n# smax[i] = sm\r\n\r\n# print( ' i, sm: ', i, sm)\r\n\r\n# # now process for display\r\n\r\n# print (' sigd shape: ', sigd.shape)\r\n\r\n# wdat = np.mean(sig, axis=0)\r\n\r\n# wds = wdat.shape\r\n\r\n# print('wdat shape: ', wds)\r\n\r\n# # print (range(int(wds[1]/2.), int(3.*wds[1]/4.)), range(int(wds[2]/2.), int(3.*wds[2]/4.)))\r\n\r\n# print( 'reduced shape: ', wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))].shape)\r\n\r\n# wp = wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))]\r\n\r\n# wp = np.mean(np.mean(wdat, axis=1), axis=1)\r\n\r\n# mpl.figure(1)\r\n\r\n# mpl.plot(np.linspace(0., len(wp)*1./self.framerate, num=len(wp)), wp)\r\n\r\n\r\n\r\n# mpl.figure(2)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# sigd[i][sigd[i] < self.threshold*sigmax] = 0.\r\n\r\n# # find center of mass of areas above threshold\r\n\r\n# # mass = sigd[i].copy()\r\n\r\n# # mass[sigd[i] > 0.] = 1.\r\n\r\n# # structuring_element = [[0,1,0],[1,1,1],[0,1,0]]\r\n\r\n# # segmentation, segments = scipy.ndimage.label(mass, structuring_element)\r\n\r\n# # coords = scipy.ndimage.center_of_mass(sigd[i], segmentation, range(1,segments+1))\r\n\r\n# # xcoords = np.array([x[1] for x in coords])\r\n\r\n# # ycoords = np.array([x[0] for x in coords])\r\n\r\n# # cs[i] = (xcoords, ycoords)\r\n\r\n\r\n\r\n# # Calculating local maxima\r\n\r\n# lm = skif.peak_local_max(sigd[i], min_distance=2, threshold_rel=0.25, exclude_border=False, \r\n\r\n# indices=True, num_peaks=10, footprint=None, labels=None)\r\n\r\n# localmax[i] = [(m[0], m[1], sigd[i][(m[0], m[1])]) for m in lm]\r\n\r\n# # print ('i, local max: ',i, localmax)\r\n\r\n# mpl.subplot(5,5,i+1)\r\n# print ('shape of sigd: ',[np.shape(sigd),i])\r\n\r\n# imga1 = mpl.imshow(sigd[i], cmap=clist[i], vmin=0, origin='lower')\r\n\r\n# if len(localmax[i]) > 0:\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# else:\r\n\r\n# continue\r\n\r\n# scattersize = 30.\r\n\r\n# for k, lm in enumerate(localmax[i]):\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], edgecolors='k',\r\n\r\n# s=scattersize*lm[2]/max_fr, linewidths=0.125, alpha=0.5)\r\n\r\n# mpl.subplot(6,5,i+15+1)\r\n\r\n# wr = range(i*windowsize, i*windowsize+windowsize)\r\n\r\n# # print (' wr: len, min max: ', len(wr), min(wr), max(wr))\r\n\r\n# wmax = 0.\r\n\r\n# for lmax in localmax[i]: # was xcoords\r\n\r\n# wave = wdat[wr, lmax[0],lmax[1]]\r\n\r\n# wdff = (wave-wave[0])/wave[0]\r\n\r\n# if np.max(wdff) > wmax:\r\n\r\n# wmax = np.max(wdff)\r\n\r\n# mpl.plot(np.linspace(0., len(wave)*1./self.framerate, num=len(wave)),\r\n\r\n# wdff, color=clist2[i])\r\n\r\n# mpl.ylim(-0.1*wmax, wmax)\r\n\r\n# fig = mpl.figure(3)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# if len(localmax[i]) == 0:\r\n\r\n# continue\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# for lm in localmax[i]:\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], \r\n\r\n# s=scattersize*lm[2]/max_fr, alpha=0.5, edgecolors='k')\r\n\r\n# mpl.ylim(0, sigd.shape[2])\r\n\r\n# mpl.xlim(0, sigd.shape[1])\r\n\r\n# mpl.axis('equal')\r\n\r\n mpl.show()\r\n\r\n print (' DF/F analysis finished.\\n')", "def make_input_map(self) :\n\n self.input_map = \"\"\n stencil = self.core.stencil\n pattern = self.core.pattern\n reflect = len(pattern)+1 # reflector id, last material\n N = self.dimension\n coremap = np.zeros((N+2,N+2), dtype='i')\n \n # reflections and vacuum\n coremap[0, 1:N+1] = -1 \n coremap[1:N+1, 0] = -1\n coremap[N+1, 1:N+1] = -2\n coremap[1:N+1, N+1] = -2\n \n fuelindex = 0\n \n for i in range(1, N+1) :\n for j in range(1, N+1) :\n if j == 1 and i > 1 :\n pass\n else :\n if stencil[i-1, j-1] > 0 : # a fuel\n coremap[i, j] = pattern[fuelindex]+1\n fuelindex += 1\n elif stencil[i-1, j-1] == 0 : # a reflector\n coremap[i, j] = reflect\n else : # a void\n pass \n # Copy elements such that rotational symmetry is enforced. \n for j in range(2, N+1) :\n coremap[j, 1] = coremap[1, j]\n for i in range(0, N+2) :\n for j in range(0, N+2) :\n self.input_map +='%4i' % (coremap[i, j])\n self.input_map += '\\n'" ]
[ "0.677067", "0.64014757", "0.62887424", "0.6082136", "0.58927894", "0.58386827", "0.58132875", "0.57924163", "0.57924163", "0.57452226", "0.5743414", "0.57319504", "0.57206607", "0.57153034", "0.563697", "0.5628693", "0.56157184", "0.5559617", "0.55475587", "0.5537074", "0.55315846", "0.55233526", "0.5520516", "0.5520516", "0.5520156", "0.5495048", "0.5467132", "0.54520327", "0.54375637", "0.5427407" ]
0.7716374
0
Create links to BRIK, HEAD, and .nii files.
def LinkFiles(self, srcdir, target): if '+orig' in target: tgt_prefix = target.replace('.BRIK','') tgt_prefix = tgt_prefix.replace('.HEAD','') linkfiles = ['%s.HEAD'%tgt_prefix, '%s.BRIK' %tgt_prefix] else: linkfiles = [target] for linkfile in linkfiles: linkname = '%s/%s' % (srcdir, os.path.basename(linkfile)) rel_linkdir = abspath_to_relpath(os.path.dirname(target), srcdir) rel_linkfile = '%s/%s' % (rel_linkdir, os.path.basename(linkfile)) if not os.path.exists(linkname) and not os.path.islink(linkname): cmd = 'cd %s && ln -s %s %s' % (srcdir, rel_linkfile, linkname) self.ExecCmd(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_links(self):\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))", "def makeLinks(self):\n self.deleteIndexFileIfExists()\n _fileNames = self.getHTMLFileNames()\n _msgPart1 = \"<a href=\\\"\"\n _msgPart2 = \"\\\" target=\\\"loadHTMLResults\\\">\"\n _msgPart3 = \"</a><br>\"\n _link = \"\"\n for _fileName in _fileNames:\n _origFileName = _fileName\n _linkName = _fileName.split('.')[0]\n _createAnchorTag = (_msgPart1+str(_origFileName)+_msgPart2+str(_linkName)+_msgPart3)\n _link = _link + _createAnchorTag\n return _link", "def make_links(self):\n for filepath in list(self):\n self.make_link(filepath)", "def _generate_links(self):\n index = 0\n links = \"\"\n for ch in self.text:\n if ch == '[':\n links += \"(^\"\n elif ch == ']':\n links += \")$|\"\n index += 1\n elif links[-1:] != '|' and links != \"\":\n links += ch\n self.links = compile(links[:-1].lower())", "def __add_gitlinks(self, gitlinks):\n for sha1, path in gitlinks:\n if sha1 == p4gf_const.NULL_COMMIT_SHA1:\n self.__append(\"D {}\\n\".format(path))\n else:\n self.__append(\"M 160000 {0} {1}\\n\".format(sha1, path))", "def update_symlinks(n):\n\tif n > 0: return\n\tsymlink_dir = sc.text_image_symlink_dir.absolute()\n\tfor tpi, info in sorted(index.items(), key=lambda t: t[0]):\n\t\tsymlink = symlink_dir / info['url']\n\t\tif symlink.is_symlink():\n\t\t\tif symlink.resolve() == info['file']:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tsymlink.unlink()\n\t\tif not symlink.parent.exists():\n\t\t\tsymlink.parent.mkdir(parents=True)\n\t\tsymlink.symlink_to(info['file'])", "def gen_links(text):\n return []", "def make_franny_symlinks(src_dirs, out_dir):\n\n for path, dirs, files in chain.from_iterable(os.walk(path)\n for path in src_dirs):\n print('Looking in %s' % path)\n for sta in ['NS12', 'NS13', 'NS14']:\n for filename in fnmatch.filter(files, '*.%s*' % sta):\n net = filename.split('.')[-7]\n chan = filename.split('.')[-4]\n if chan[-1] == 'N':\n new_chan = 'EH1'\n elif chan[-1] == 'E':\n new_chan = 'EH2'\n else:\n continue\n mseed_nm = filename.split('/')[-1]\n new_mseed = string.replace(mseed_nm, chan, new_chan)\n old_path = os.path.join(path, filename)\n new_path = '%s/%s/%s/%s.D/%s' % (out_dir, net,\n sta, new_chan, new_mseed)\n\n print('Creating symlink for file %s at %s'\n % (old_path, new_path))\n spwd = '*blackmore89'\n cmnd = 'sudo -S ln %s %s' % (old_path, new_path)\n os.system('echo %s | %s' % (spwd, cmnd))\n return", "def makeBackrefLink(self, info, g_links, i):\n atts, content, infoid, link = '', '', '', ''\n if 'def' in info:\n link = info['def']['link']\n backlink_type = link or g_links\n i_ = self.encode_high(i)\n allow_inc = i not in self.syms\n i_ = int(i_)\n\n if backlink_type == \"!\":\n return ''\n elif backlink_type == '^':\n return \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n info['refids'][0], i\n )\n else:\n result = []\n for refid in info['refids']:\n i_entity = self.decode_high(i_)\n sup = \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n refid, i_entity\n )\n if allow_inc:\n i_ += 1\n result.append(sup)\n result = ' '.join(result)\n return result", "def create_home_directory_symbolic_links():\n file_paths = (\n path\n for path in repo_home.rglob(\"*\")\n if path.is_file() and not path.is_symlink()\n )\n\n for file_path in file_paths:\n sym_link_path = translate_home_path(file_path)\n\n if sym_link_path.is_file() and not sym_link_path.is_symlink():\n backup_file(sym_link_path)\n sym_link_path.unlink()\n\n if sym_link_path.is_symlink():\n sym_link_path.unlink()\n\n print(f\"Creating Symlink: {sym_link_path} -> {file_path}\")\n sym_link_path.symlink_to(file_path)", "def test_apiLinking(self):\n version = \"1.2.3\"\n input, output = self.getArbitraryLoreInputAndOutput(version)\n self.howtoDir.child(\"one.xhtml\").setContent(input)\n\n self.builder.build(version, self.howtoDir, self.howtoDir,\n self.templateFile, \"scheme:apilinks/%s.ext\")\n out = self.howtoDir.child('one.html')\n self.assertIn(\n '<a href=\"scheme:apilinks/foobar.ext\" title=\"foobar\">foobar</a>',\n out.getContent())", "def fix_links():\n pass", "def download_brick_catalog(brick):\n urls = {1: 'http://archive.stsci.edu/pub/hlsp/phat/brick01/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12058-m31-b01_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 2: 'http://archive.stsci.edu/pub/hlsp/phat/brick02/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12073-m31-b02_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 3: 'http://archive.stsci.edu/pub/hlsp/phat/brick03/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12109-m31-b03_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 4: 'http://archive.stsci.edu/pub/hlsp/phat/brick04/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12107-m31-b04_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 5: 'http://archive.stsci.edu/pub/hlsp/phat/brick05/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12074-m31-b05_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 6: 'http://archive.stsci.edu/pub/hlsp/phat/brick06/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12105-m31-b06_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 7: 'http://archive.stsci.edu/pub/hlsp/phat/brick07/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12113-m31-b07_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 8: 'http://archive.stsci.edu/pub/hlsp/phat/brick08/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12075-m31-b08_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 9: 'http://archive.stsci.edu/pub/hlsp/phat/brick09/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12057-m31-b09_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 10: 'http://archive.stsci.edu/pub/hlsp/phat/brick10/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12111-m31-b10_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 11: 'http://archive.stsci.edu/pub/hlsp/phat/brick11/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12115-m31-b11_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 12: 'http://archive.stsci.edu/pub/hlsp/phat/brick12/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12071-m31-b12_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 13: 'http://archive.stsci.edu/pub/hlsp/phat/brick13/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12114-m31-b13_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 14: 'http://archive.stsci.edu/pub/hlsp/phat/brick14/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12072-m31-b14_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 15: 'http://archive.stsci.edu/pub/hlsp/phat/brick15/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12056-m31-b15_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 16: 'http://archive.stsci.edu/pub/hlsp/phat/brick16/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12106-m31-b16_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 17: 'http://archive.stsci.edu/pub/hlsp/phat/brick17/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12059-m31-b17_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 18: 'http://archive.stsci.edu/pub/hlsp/phat/brick18/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12108-m31-b18_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 19: 'http://archive.stsci.edu/pub/hlsp/phat/brick19/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12110-m31-b19_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 20: 'http://archive.stsci.edu/pub/hlsp/phat/brick20/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12112-m31-b20_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 21: 'http://archive.stsci.edu/pub/hlsp/phat/brick21/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12055-m31-b21_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 22: 'http://archive.stsci.edu/pub/hlsp/phat/brick22/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12076-m31-b22_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 23: 'http://archive.stsci.edu/pub/hlsp/phat/brick23/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12070-m31-b23_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits'} # NOQA\n url = urls[brick]\n output_path = os.path.join(os.getenv('PHATV2DATA'), os.path.basename(url))\n print \"Downloading {url}\".format(url=url)\n cmd = 'wget -c -nc -q -O {output} {input}'.format(output=output_path,\n input=url)\n print \"Started at\", datetime.utcnow()\n if not os.path.exists(output_path):\n subprocess.call(cmd, shell=True)\n print \"Ended at \", datetime.utcnow()", "def makelinks(links, sp1, sp2):\n sp1_links = []\n sp2_links = []\n sp1_chrom = []\n sp2_chrom = []\n f = open(\"circos.{}-{}.links.txt\".format(sp1, sp2), 'w')\n with open(links, 'r') as link:\n for line in link:\n x = line.strip().split()\n species = x[0].split(\".\")[0]\n chrom = x[0].split(\".\")[1]\n orient = x[3]\n size = int(x[4])\n align_l = int(x[2])\n align_s = int(x[1])\n if orient == \"+\":\n start = align_s\n end = start + align_l\n elif orient == \"-\":\n start = size - align_s\n end = start - align_l\n else:\n print(\"\\nNo Direction indicated\".format(line))\n if species == sp1:\n sp1_links.append(\"{} {} {}\".format(chrom, start, end))\n sp1_chrom.append(chrom)\n elif species == sp2:\n sp2_links.append(\"{} {} {}\".format(chrom, start, end))\n sp2_chrom.append(chrom)\n [f.write(\"{} {}\\n\".format(i, j)) for i, j in zip(sp1_links, sp2_links)]\n f.close()\n\n return(sp1_chrom, sp2_chrom)", "def test_all_notebooks_linked():\n assert _public_nb_dir.is_dir()\n assert _linked_nb_dir.is_dir()\n linked_nbs = [f for f in listdir(_linked_nb_dir) if f.endswith(\".ipynb\")]\n new_links = []\n for f in listdir(_public_nb_dir):\n if not f.endswith(\".ipynb\"):\n continue\n linked_name = get_symlink_name(f)\n if linked_name not in linked_nbs:\n cwd = os.getcwd()\n os.chdir(str(_linked_nb_dir))\n os.symlink(\n f\"../../../../jupyter_notebooks/{f}\",\n linked_name,\n target_is_directory=False,\n )\n os.chdir(cwd)\n new_links.append(str(_linked_nb_dir / linked_name))\n if new_links:\n ll = \"\\n\".join(new_links)\n pytest.fail(f\"Please commit the following notebook symlinks:\\n{ll}\")", "def create_links(self, name):\n for target, linknames in self._link_map.iteritems():\n for linkname in linknames:\n self._api.path.mock_copy_paths(target, linkname)\n self._api.python(\n name,\n self._resource,\n args = [\n '--link-json',\n self._api.json.input({str(target) : linkname\n for target, linkname in self._link_map.iteritems()\n }),\n ],\n infra_step=True)", "def test_create_symlink_file(self):\n pass", "def _post_src_install_soname_symlinks(mysettings, out):\n\n\timage_dir = mysettings[\"D\"]\n\tneeded_filename = os.path.join(mysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\"build-info\", \"NEEDED.ELF.2\")\n\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(needed_filename,\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tlines = f.readlines()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\t\treturn\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = \"\"\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(os.path.join(\n\t\t\tmysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\t\"build-info\", \"QA_SONAME_NO_SYMLINK\"),\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tqa_no_symlink = f.read()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = qa_no_symlink.split()\n\tif qa_no_symlink:\n\t\tif len(qa_no_symlink) > 1:\n\t\t\tqa_no_symlink = \"|\".join(\"(%s)\" % x for x in qa_no_symlink)\n\t\t\tqa_no_symlink = \"^(%s)$\" % qa_no_symlink\n\t\telse:\n\t\t\tqa_no_symlink = \"^%s$\" % qa_no_symlink[0]\n\t\tqa_no_symlink = re.compile(qa_no_symlink)\n\n\tlibpaths = set(portage.util.getlibpaths(\n\t\tmysettings[\"ROOT\"], env=mysettings))\n\tlibpath_inodes = set()\n\tfor libpath in libpaths:\n\t\tlibdir = os.path.join(mysettings[\"ROOT\"], libpath.lstrip(os.sep))\n\t\ttry:\n\t\t\ts = os.stat(libdir)\n\t\texcept OSError:\n\t\t\tcontinue\n\t\telse:\n\t\t\tlibpath_inodes.add((s.st_dev, s.st_ino))\n\n\tis_libdir_cache = {}\n\n\tdef is_libdir(obj_parent):\n\t\ttry:\n\t\t\treturn is_libdir_cache[obj_parent]\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\trval = False\n\t\tif obj_parent in libpaths:\n\t\t\trval = True\n\t\telse:\n\t\t\tparent_path = os.path.join(mysettings[\"ROOT\"],\n\t\t\t\tobj_parent.lstrip(os.sep))\n\t\t\ttry:\n\t\t\t\ts = os.stat(parent_path)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif (s.st_dev, s.st_ino) in libpath_inodes:\n\t\t\t\t\trval = True\n\n\t\tis_libdir_cache[obj_parent] = rval\n\t\treturn rval\n\n\tmissing_symlinks = []\n\n\t# Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does.\n\tfor l in lines:\n\t\tl = l.rstrip(\"\\n\")\n\t\tif not l:\n\t\t\tcontinue\n\t\tfields = l.split(\";\")\n\t\tif len(fields) < 5:\n\t\t\tportage.util.writemsg_level(_(\"\\nWrong number of fields \" \\\n\t\t\t\t\"in %s: %s\\n\\n\") % (needed_filename, l),\n\t\t\t\tlevel=logging.ERROR, noiselevel=-1)\n\t\t\tcontinue\n\n\t\tobj, soname = fields[1:3]\n\t\tif not soname:\n\t\t\tcontinue\n\t\tif not is_libdir(os.path.dirname(obj)):\n\t\t\tcontinue\n\t\tif qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None:\n\t\t\tcontinue\n\n\t\tobj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))\n\t\tsym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)\n\t\ttry:\n\t\t\tos.lstat(sym_file_path)\n\t\texcept OSError as e:\n\t\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\t\traise\n\t\telse:\n\t\t\tcontinue\n\n\t\tmissing_symlinks.append((obj, soname))\n\n\tif not missing_symlinks:\n\t\treturn\n\n\tqa_msg = [\"QA Notice: Missing soname symlink(s):\"]\n\tqa_msg.append(\"\")\n\tqa_msg.extend(\"\\t%s -> %s\" % (os.path.join(\n\t\tos.path.dirname(obj).lstrip(os.sep), soname),\n\t\tos.path.basename(obj))\n\t\tfor obj, soname in missing_symlinks)\n\tqa_msg.append(\"\")\n\tfor line in qa_msg:\n\t\teqawarn(line, key=mysettings.mycpv, out=out)", "def write_downloaded_links():\n global downloaded_links_fn\n text_file = open(downloaded_links_fn,\"w\")\n for link in downloaded_links.items():\n text_file.write(link[0] + \"\\n\")\n text_file.close()", "def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ", "def _open_output_files(self):\n self.links_outfile = open(self.opts.links_outfile, 'wb')", "def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)", "def main():\n # Construct the feed generator\n f = LogBufferFeed(FEED_DIR)\n f.MAX_AGE = 24 * 60 * 60 # 1 day\n f.FEED_META['feed.title'] = '%s Referrering Links' % SITE_NAME\n f.FEED_META['feed.tagline'] = \\\n 'New referring links from Apache access.log on %s' % SITE_NAME\n \n # Load up tail of access log, parse, and filter\n new_lines = bookmark_tailgrep(ACCESS_LOG, max_initial_lines=100000)\n all_events = parse_access_log(new_lines)\n events = [ x for x in all_events if event_filter(x) ]\n \n # Scan through latest events for new referrers\n referrers_seen = shelve.open(REFER_SEEN)\n new_referrers = []\n for evt in events:\n k = '%(referrer)s -> %(path)s' % evt\n if not referrers_seen.has_key(k):\n referrers_seen[k] = 1\n new_referrers.append( (evt['referrer'], evt['path']) )\n referrers_seen.close()\n \n # If there were new referrers found, insert a new entry.\n if len(new_referrers) > 0:\n \n # Build a list of hyperlinks for referrers\n links_out = [\n LINK_TMPL % {\n 'SITE_ROOT' : SITE_ROOT,\n 'referrer' : x[0],\n 'path' : x[1],\n }\n for x in new_referrers\n ]\n \n # Build a summary for this entry.\n summary = SUMMARY_TMPL % { \n 'count' : len(new_referrers), \n 'links' : \"\\n\".join(links_out)\n }\n \n # Construct and append a new entry\n entry = FeedEntryDict({\n 'title' : '%s new referrers' % len(new_referrers),\n 'link' : '',\n 'summary' : summary\n })\n f.append_entry(entry)\n\n # Output the current feed entries as both RSS and Atom\n open(FEED_NAME_FN % 'rss', 'w').write(f.scrape_rss())\n open(FEED_NAME_FN % 'atom', 'w').write(f.scrape_atom())", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def get_all_books_page_links(raw_page_rip):\n nt = {}\n ot = {}\n OTIDS = []\n soup = raw_page_rip.soup\n if not os.path.exists(data_store):\n os.mkdir(data_store)\n \n nt_soup = soup.find(\"td\", class_=\"NT\")\n ot1 = soup.find(\"td\", class_=\"OT1\")\n ot2 = soup.find(\"td\", class_=\"OT2\")\n \n for each in nt_soup.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n href = each.get(\"href\")\n name = each.text\n\n idd = re.search(r'\\d{5}', href).group(0)\n nt[name] = [domain + href, idd]\n \n with open(os.path.join(data_store, \"new_test.json\"), \"w+\") as wh:\n json.dump(nt, wh)\n\n for each in ot1.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n \n href = each.get(\"href\")\n name = each.text\n idd = re.search(r'\\d{5}', href).group(0)\n \n if idd in OTIDS:\n ot[domain + href][0] = name + \" or \" + ot[domain + href][0]\n else:\n ot[domain + href] = [name, idd]\n OTIDS.append(idd)\n \n for each in ot2.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n \n href = each.get(\"href\")\n name = each.text\n idd = re.search(r'\\d{5}', href).group(0)\n \n if idd in OTIDS:\n ot[domain + href][0] = name + \" or \" + ot[domain + href][0]\n else:\n ot[domain + href] = [name, idd]\n OTIDS.append(idd)\n \n rev_old = {value[0] : [key, value[1]] for key, value in ot.items()}\n with open(os.path.join(data_store, \"old_test.json\"), \"w+\") as wh:\n json.dump(rev_old, wh)", "def generate_links(wiki, page, tags):\n wiki.generate_tag_links(page, tags)", "def create_links(list_of_paths, dest_dir, print_cfg_ipol=False):\n ms = False\n for i, f in enumerate(list_of_paths):\n\n if isinstance(f, tuple): # we have the ms image\n # tif ms\n ms = True\n symlink_p(f[1], os.path.join(dest_dir, 'im_ms_%02d.tif' % (i+1)))\n\n # preview ms\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f[1]), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # enhance contrast\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp)\n else:\n print('MS PREVIEW not found for %s' % f[1], file=sys.stderr)\n f = f[0] # the path to ms preview is not needed anymore\n\n # pan preview (if no ms preview)\n if not os.path.isfile(os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1))):\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp))\n else:\n print('PAN PREVIEW not found for %s' % f, file=sys.stderr)\n\n # dim\n tmp = copy_file_matching_pathname('DIM_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'dim_%02d.xml' % (i+1)))\n\n # rpc\n tmp = copy_file_matching_pathname('RPC_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'rpc_%02d.xml' % (i+1)))\n\n # tif panchro\n symlink_p(f, os.path.join(dest_dir, 'im_panchro_%02d.tif' % (i+1)))\n\n # dzi 8 bits\n dzi8_found = False\n dzi8 = '%s_8BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files8 = '%s_8BITS_files' % f[:-8]\n if os.path.isfile(dzi8) and os.path.isdir(files8):\n symlink_p(dzi8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d.dzi' % (i+1)))\n symlink_p(files8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d_files' % (i+1)))\n dzi8_found = True\n\n # dzi 16 bits\n dzi16_found = False\n dzi16 = '%s_16BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files16 = '%s_16BITS_files' % f[:-8]\n if os.path.isfile(dzi16) and os.path.isdir(files16):\n symlink_p(dzi16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d.dzi' % (i+1)))\n symlink_p(files16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d_files' % (i+1)))\n dzi16_found = True\n\n # print warning if neither 8bit nor 16bit dzi was found\n if (not dzi8_found) and (not dzi16_found):\n print('WARNING: no dzi file found for img %s' % f, file=sys.stderr)\n\n if print_cfg_ipol:\n print_cfg.main(dest_dir, len(list_of_paths), ms)", "def connect_links(base_url, extensions, wikidir, body):\n if base_url.endswith(\"/\"):\n base_url = base_url[:-1]\n\n i = 0\n body2 = []\n\n for match in WIKILINK.finditer(body):\n body2.append(body[i:match.span(0)[0]])\n \n text = match.group(1)\n\n if \"|\" in text:\n topic, desc = text.split(\"|\")\n topic = topic.strip()\n else:\n topic, desc = (text, text)\n\n fn = os.path.join(wikidir, topic)\n\n ext = tools.what_ext(extensions, fn)\n if not ext:\n body2.append(match.group(0))\n i = match.span(0)[1]\n continue\n\n body2.append(\"<a href=\\\"%s/%s/%s\\\">%s</a>\" % \\\n (base_url, TRIGGER, topic, desc))\n i = match.span(0)[1]\n\n body2.append(body[i:])\n return \"\".join(body2)", "def _collect_img_links(self):\n raise NotImplementedError", "def link_dihedra(self, verbose: bool = ...) -> None:\n ..." ]
[ "0.667778", "0.59682673", "0.5959984", "0.567664", "0.5635121", "0.55980814", "0.553239", "0.5522011", "0.54892784", "0.54057527", "0.5380412", "0.5370104", "0.53354234", "0.5332412", "0.5303601", "0.5300853", "0.52821183", "0.5264791", "0.5261789", "0.5261702", "0.5261481", "0.52487564", "0.5235796", "0.5218422", "0.5204191", "0.5178898", "0.51704174", "0.5159474", "0.5132064", "0.51179516" ]
0.6162017
1
Extract the initial EPIs stored in dicom format.
def ExtractFirstEpi(self): for entry in self.info: if self.info[entry]['type'] == 'first_epi': epiname = self.info[entry]['imgfile'] cmd = 'convert_file %s -f0 %s %s %s' % \ (self.flip_opts, entry,epiname, self.info[entry]['filetype']) fname = '%s%s' % (epiname, self.info[entry]['suffix']) self.CheckExec(cmd, [fname]) self.info[entry]['imgfile'] = fname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dicom_load():\n # Identify folders with EPI data\n dirs = [i for i in os.listdir(dcm_dir) if os.path.isdir(os.path.join(dcm_dir, i))]\n d_cnt = 0\n for d in dirs:\n dcm_file = os.path.join(dcm_dir,d,os.listdir(os.path.join(dcm_dir,d))[0])\n try:\n dcm_data = pydicom.dcmread(dcm_file)\n except:\n pass\n else:\n # If data is EPI then get start time, etc\n if 'EPI' in dcm_data.ImageType:\n dcm_dict[d_cnt] = {}\n dcm_dict[d_cnt]['dcm_file'] = dcm_file\n dcm_dict[d_cnt]['task_name'] = dcm_data.SeriesDescription\n dcm_dict[d_cnt]['task_name'] = dcm_dict[d_cnt]['task_name'].replace('_','-')\n date = dcm_data.SeriesDate\n start = dcm_data.SeriesTime\n start_time = '%s-%s-%s %s:%s:%s'%(date[0:4],date[4:6],date[6:],start[0:2],start[2:4],start[4:])\n dcm_dict[d_cnt]['start_time'] = datetime.fromisoformat(start_time)\n dcm_dict[d_cnt]['run_length'] = dcm_data[0x0019,0x105a].value/1000\n dcm_dict[d_cnt]['end_time'] = dcm_dict[d_cnt]['start_time'] + timedelta(milliseconds=dcm_dict[d_cnt]['run_length'])\n d_cnt = d_cnt+1", "def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK", "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)", "def _get_dmi(cls, data):\n\t\tdata['pdi'] = cls._get_pdi(data, 14)\n\t\tdata['mdi'] = cls._get_mdi(data, 14)\n\t\tdata['dx'] = cls._get_dx(data, 14)\n\t\tdata['adx'] = data['dx_6_ema']\n\t\tdata['adxr'] = data['adx_6_ema']\n\t\treturn data", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def extractSeriesInfo(self, inputdir):\n self.m_status.SetLabelText(\"Detecting DICOM data ... please wait\")\n allfiles = [y for x in walk(inputdir) for y in iglob(join(x[0], '*.IMA'))]\n self.controller.parseDicom(self, allfiles)\n # n = 1\n # for filename in allfiles:\n # try:\n # if not self.db.hasFile(filename):\n # dcm = dicom.read_file(filename)\n # updatemsg = \"Detecting DICOM data ... %d of %d\" % (n, len(allfiles))\n # self.m_status.SetLabelText(updatemsg)\n # n += 1\n #\n # # Check DICOM header info\n # series_num = str(dcm.SeriesInstanceUID)\n # uuid = self.generateuid(series_num)\n # imagetype = str(dcm.ImageType[2])\n # dicomdata = {'uuid': uuid,\n # 'patientid': str(dcm.PatientID),\n # 'patientname': str(dcm.PatientName),\n # 'seriesnum': series_num,\n # 'sequence': str(dcm.SequenceName),\n # 'protocol': str(dcm.ProtocolName),\n # 'imagetype': imagetype\n # }\n #\n # if not self.db.hasUuid(uuid):\n # self.db.addDicomdata(dicomdata)\n # if not self.db.hasFile(filename):\n # self.db.addDicomfile(uuid, filename)\n # except InvalidDicomError:\n # print(\"Not DICOM - skipping: \", filename)\n # continue\n # Load for selection\n # Columns: Toggle Select\n # Text PatientID\n # Text Sequence\n # Text Protocol\n # Text Image Type\n # Text Num Files\n # Text Series ID\n\n # for suid in db.getNewUuids():\n # numfiles = db.getNumberFiles(suid)\n # self.m_dataViewListCtrl1.AppendItem(\n # [True, self.controller.db.getDicomdata(suid, 'patientname'),\n # self.controller.db.getDicomdata(suid, 'sequence'),\n # self.controller.db.getDicomdata(suid, 'protocol'),\n # self.controller.db.getDicomdata(suid, 'imagetype'), str(numfiles),\n # self.controller.db.getDicomdata(suid, 'seriesnum')])\n #\n # msg = \"Total Series loaded: %d\" % self.m_dataViewListCtrl1.GetItemCount()\n # self.m_status.SetLabelText(msg)", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def as_exons(self,input={}):\n # handle potentially applied input argument\n self._handle_input_subdict(input)\n # parse data in the AbgpGeneLocusDir\n self.parseinputgff()\n self.rungetorf()\n # we need abgp_geneconfirmation.geneconfirmation first!\n geneconfirmation( { self._create_auto_key(): self.input } )\n\n # get only the CDS-type of tracks that define the coding sequence\n genecdstracks = filtergffs4fmethod( self._obtain_gene_gff(), GFF_CDS_FMETHOD ) \n\n if len(genecdstracks) == 1:\n # deal with SingleExonOnOrf -> TSS + donor\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n return [ SingleExonOnOrf(tss,genecdstracks[-1][4],orf,gff={}) ]\n\n elif len(genecdstracks) == 0:\n # no tracks !?\n return []\n elif not self.input['orfid-genestructure']:\n # not mappable on Orfs / or no genestructure provided\n return []\n else:\n # list with exons,introns to return \n exons = []\n introns = []\n exonsandintrons = []\n\n # deal with FirstExonOnOrf -> TSS + donor\n try:\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n except:\n print self.input.keys(), self.input['proteinfref']\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n donor = self._gene_cds_track_2_donor( genecdstracks[0], orf )\n donor.phase = ( genecdstracks[0][4]-genecdstracks[0][3]-1 ) % 3\n exons.append( FirstExonOnOrf(tss,donor,orf,gff={}) )\n exonsandintrons.append( exons[-1] )\n\n # deal with internal ExonOnOrf(s): -> acceptor + donor\n for pos in range(1,len(genecdstracks)-1):\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][pos])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[pos], orf )\n accep.phase = exons[-1].donor.phase\n donor = self._gene_cds_track_2_donor( genecdstracks[pos], orf )\n donor.phase = ( genecdstracks[pos][4]-genecdstracks[pos][3]-1+accep.phase ) % 3\n exons.append( ExonOnOrf(accep,donor,orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf, exons[-2].orf,\n exons[-1].acceptor, exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # deal with FinalExonOnOrf -> acceptor + StopCodon\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][-1])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[-1], orf )\n accep.phase = exons[-1].donor.phase\n exons.append( FinalExonOnOrf(accep,genecdstracks[-1][4],orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf,exons[-2].orf,\n exons[-1].acceptor,exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # return list of exons&introns\n return exonsandintrons", "def read_icd(self):\n wiki = wikipediaapi.Wikipedia('en') # may as well declare this here so I don't need to call it every query\n supplemental_articles = []\n with open(ICD10_DESC_PATH, 'r') as f:\n current_family = [] # list of lists of descriptions within the current family (3 letter code = family)\n current_parent = None # Most recent 3 letter code seen\n for line in tqdm(f.readlines(), desc=\"ICD10 Lines Processed\"):\n\n code = line[6:14].strip().lower()\n description = simple_clean(line[77:])\n self.code2desc[code] = description.split()\n\n if len(code) == PARENT_CODE_LENGTH: # found a parent\n # query web if set params to True\n wiki_result = self.query_wikipedia(wiki, description) if self.query else []\n pubmed_result = self.query_pubmed(description) if self.query else []\n\n # store results\n if wiki_result:\n supplemental_articles.extend(wiki_result)\n if pubmed_result:\n supplemental_articles.extend(pubmed_result)\n\n # update metrics using current family\n self.process_family_frequencies(current_parent, current_family)\n current_family = []\n current_parent = code\n current_family.append(description.split())\n self.n_desc += 1\n\n # process the last family\n self.process_family_frequencies(current_parent, current_family)\n # go through all the articles we found, preprocess, and add to self.data\n self.data.extend(self.process_articles(supplemental_articles))\n\n # lastly calculate tf and idf over all descriptions (not including articles here) for use in weighting later\n self.n_words = log10(self.n_words)\n self.n_desc = log10(self.n_words)\n self.word2tf = {word: log10(count) - self.n_words for word, count in self.word2tf.items()}\n self.word2df = {word: count - self.n_desc for word, count in self.word2df.items()}\n self.dump()", "def extract(args):\n prism.extract.run(\n input_fp=args.input,\n output_fp=args.output,\n depth_cutoff=args.depth_cutoff,\n num_cpg_cutoff=args.num_cpg_cutoff,\n prepend_chr=args.prepend_chr,\n paired=args.paired,\n verbose=args.verbose,\n )", "def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes", "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def disease_descriptors(civic_did8):\n return [civic_did8]", "def get_ephemeris(rundate, sat_name):\n file_key = \"slr_ephemeris\"\n ephemeris_data = get_satellite_vars(sat_name)\n provider_list = config.tech.prediction_providers.list\n # Find the latest version of the observation file\n versions = config.files.glob_variable(file_key, \"version\", r\"\\d+\", file_vars=ephemeris_data)\n\n try:\n ephemeris_data[\"version\"] = sorted(versions)[-1]\n providers = config.files.glob_variable(file_key, \"provider\", r\"\\w+\", file_vars=ephemeris_data)\n for provider in provider_list:\n if provider in providers:\n ephemeris_data[\"provider\"] = provider\n break\n else:\n log.fatal(f\"No valid provider found: {', '.join(providers)}\")\n except IndexError:\n log.info(\"No ephemeris data found\")\n log.info(f\"Download manually from https://cddis.nasa.gov/archive/slr/cpf_predicts/{rundate.year}/{sat_name}\")\n log.fatal(f\"Please save missing file as '{config.files.path(file_key)}' !\")\n eph_parser = parsers.parse_key(file_key, file_vars=ephemeris_data)\n eph = calculate_initial_values(eph_parser.as_dict(), rundate)\n\n return eph", "def extract_core_ids(self):\n path2folder = 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[self.radii[0]] + '/'\n analysis_files = [dir for dir in os.listdir(path2folder) if dir.startswith('Matrix-analysis-IP_')]\n analysis_file = path2folder + analysis_files[0] #work for 1 component system\n with open(analysis_file, 'r') as fid:\n my_file = yaml.load(fid, Loader=yaml.FullLoader)\n self.core_ids = list(my_file.keys())\n self.mol_name = analysis_files[0].split('_')[1].split('.')[0]\n\n\n print('coreids', self.core_ids)", "def exon_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n exonpos = defline[1:].split(' ')[1]\n seqs[exonpos] = seq\n\n rnaid_to_accession = dict()\n reported_exons = {}\n exons, cdss = [], {}\n start, stop = None, None\n moltypes = ['mRNA', 'tRNA', 'ncRNA', 'transcript', 'primary_transcript',\n 'V_gene_segment', 'D_gene_segment', 'J_gene_segment',\n 'C_gene_segment']\n for entry in gff3:\n for moltype in moltypes:\n if ('\\t%s\\t' % moltype) in entry:\n accession = re.search(r'accession=([^;\\n]+)', entry).group(1)\n tid = re.search(r'ID=([^;\\n]+)', entry).group(1)\n rnaid_to_accession[tid] = accession\n\n if '\\texon\\t' in entry:\n exons.append(entry)\n elif '\\tCDS\\t' in entry:\n fields = entry.split('\\t')\n pos = '%s_%s-%s%s' % (fields[0], fields[3], fields[4], fields[6])\n cdss[pos] = entry\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if len(exons) == 0:\n continue\n xcept = False\n for exonpos in cdss:\n if ';exception=ribosomal slippage' in cdss[exonpos]:\n xcept = True\n if xcept:\n exons, cdss = [], {}\n start, stop = None, None\n continue\n assert start, 'No start codon for exon(s): %s' % exons[0]\n assert stop, 'No stop codon for exon(s): %s' % exons[0]\n for exon in exons:\n fields = exon.split('\\t')\n assert len(\n fields) == 9, 'entry does not have 9 fields: %s' % exon\n mrnaid = re.search(r'Parent=([^;\\n]+)', fields[8]).group(1)\n exonpos = '%s_%s-%s%s' % (fields[0],\n fields[3], fields[4], fields[6])\n if exonpos in reported_exons:\n continue\n exonlength = int(fields[4]) - int(fields[3]) + 1\n exonseq = seqs[exonpos]\n assert len(exonseq) == exonlength, \\\n 'exon \"%s\": length mismatch; gff=%d, fa=%d' % (\n exonpos, exonlength, len(exonseq))\n gccontent = gc_content(exonseq)\n gcskew = gc_skew(exonseq)\n ncontent = n_content(exonseq)\n context = exon_context(exon, start, stop)\n phase = None\n remainder = None\n if context == 'cds':\n cexon = cdss[exonpos]\n phase = int(cexon.split('\\t')[7])\n remainder = (exonlength - phase) % 3\n values = '%s %s %d %.3f %.3f %.3f %s %r %r' % (\n exonpos, rnaid_to_accession[mrnaid], exonlength, gccontent,\n gcskew, ncontent, context, phase, remainder)\n reported_exons[exonpos] = 1\n yield values.split(' ')\n exons, cdss = [], {}\n start, stop = None, None", "def make_pmodel_energies():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n data = pmodel.load_data(\"test_data/protein_load/traj/traj_test.xtc\")\n heps, dheps = pmodel.get_potentials_epsilon(data)\n\n true_energies = np.loadtxt(\"test_data/protein_load/traj/energy_gaussian_test.dat\")\n\n return pmodel, data, heps, dheps, true_energies", "def dicom_cli():", "def test_getAbstract(self):\n cases = {\n self.test_eac + 'NE00001.xml':'NE00001',\n self.test_eac + 'NE00100.xml':'NE00100',\n self.test_eac + 'NE00200.xml':'NE00200',\n self.test_eac + 'NE00600.xml':'NE00600',\n }\n for case in cases:\n doc = EacCpf.EacCpf(case, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n self.assertNotEqual(doc, None)\n abstract = doc.getAbstract()\n self.assertNotEqual(abstract, None)", "def extract_all_lazy():\n\n\t#Construct filepaths: Data COMP_INFO_1\n\tdata_ci1_name = \"DATA_2016_COMP_INFO_1.csv\"\n\tdata_ci1_fullname = os.path.join(files_location, data_ci1_name)\n\t#Data COMP_INFO_2\n\tdata_ci2_name = \"DATA_2016_COMP_INFO_2.csv\"\n\tdata_ci2_fullname = os.path.join(files_location, data_ci2_name)\n\t#Data PROPERTY INFO\n\tdata_pi_name = \"DATA_2016_PROPERTY_INFO_ST.csv\"\n\tdata_pi_fullname = os.path.join(files_location, data_pi_name)\n\t#Data General Info\n\tdata_gi_name = \"DATA_2016_GENERAL_INFO.csv\"\n\tdata_gi_fullname = os.path.join(files_location, data_gi_name)\n\n\t#Read & Process COMP_INFO\n\tdata_ci1 = pd.read_csv(data_ci1_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\tdata_ci2 = pd.read_csv(data_ci2_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\n\tdata_ci = data_ci1.append(data_ci2)\n\tdata_ci['QUESTION'] = data_ci['QUESTION'].replace(constants.ci_mapping)\n\t# Take only the survey questions mapped\n\tdata_ci = data_ci[data_ci['QUESTION'].isin(constants.ci_mapping.values())]\n\tdata_ci = data_ci.set_index(['PROPERTY_CODE','PROPERTY_NAME','JOB_CODE','POSITION'])\n\tdata_ci = data_ci.pivot(columns=\"QUESTION\")\n\tdata_ci.columns = [\"_\".join(pair) for pair in data_ci.columns]\n\tdata_ci = data_ci.reset_index()\n\n\t#Read & Process Property Info data\n\tdata_pi = pd.read_csv(data_pi_fullname, usecols = constants.keep_columns_PI, encoding='ISO-8859-1')\n\t#survey_type_transformed = transform.surveytype_categorical(data_pi)\n\t#data_pi = pd.merge(data_pi, survey_type_transformed, on=['PROPERTY_CODE'])\n\n\t#Read & Process General Info\n\tdata_gi = pd.read_csv(data_gi_fullname, skiprows = 2, usecols = constants.keep_columns_GI, encoding='ISO-8859-1')\n\tdata_gi['QUESTION'] = data_gi['QUESTION'].replace(constants.gi_mapping)\n\t# Take onl the survey questions mapped\n\tdata_gi = data_gi[data_gi['QUESTION'].isin(constants.gi_mapping.values())]\n\tdata_gi = data_gi.set_index(['PROPERTY_CODE','PROPERTY_NAME'])\n\tdata_gi = data_gi.pivot(columns=\"QUESTION\")\n\tdata_gi.columns = [\"_\".join(pair) for pair in data_gi.columns]\n\tdata_gi = data_gi.reset_index()\n\n\t#This frame needs to be reworked\n\td_ci = pd.merge(data_gi, data_pi, on = ['PROPERTY_CODE','PROPERTY_NAME'])\n\td_ci = pd.merge(d_ci, data_ci, on = ['PROPERTY_CODE','PROPERTY_NAME'],suffixes= ['_ci','_gi'])\n\n\t#Observations by Dimensions to determine top X markets\n\t#Can this be in a better position?\n\td_ci = d_ci[~(d_ci['PROPERTY_NAME'].isin(constants.del_rows_property_name))]\n\td_ci['POSITION'] = d_ci['POSITION'].astype(str)\n\n\tpayload = {}\n\tpayload['gi'] = data_gi\n\tpayload['pi'] = data_pi\n\tpayload['ci'] = data_ci\n\tpayload['d_ci'] = d_ci\n\n\treturn payload", "def extract(src_dir,feat_file,ivectors_dir,num_gselect):\n os.system(\"./extract_ivectors.sh --num-gselect \"+str(num_gselect)+ \" \" + src_dir + \" \" + feat_file + \" \" + ivectors_dir)\n keys=[]\n ivectors=np.empty((0,0))\n for key,mat in kaldi_io.read_vec_flt_scp(ivectors_dir+'/ivector.scp'):\n if ivectors.shape[1] != mat.shape[0]:\n ivectors=ivectors.reshape((0,mat.shape[0]))\n ivectors=np.vstack((ivectors,mat))\n keys.append(key)\n\n ivectors=np.asarray(ivectors)\n keys=np.asarray(keys)\n return ivectors,keys", "def extract_mediapackage_endpoints(mp_client, mp_channel_id_list):\n emp_endpoint_list = {}\n for channel in mp_channel_id_list:\n emp_endpoint_list[str(channel)] = []\n response = mp_client.list_origin_endpoints()\n for endpoint in response['OriginEndpoints']:\n if str(endpoint[\"ChannelId\"]) in mp_channel_id_list:\n emp_endpoint_list[str(endpoint[\"ChannelId\"])].append(str(endpoint['Id']))\n return emp_endpoint_list", "def __init__(self, epics_only=False, *args, **kwargs):\n self._kwargs = {}\n self._detectors = {}\n self._det_list = [] \n self._det_aliases = {}\n self._psplots = {}\n self._event_functions = {}\n self._source_attrs = []\n self._evt_time_last = (0,0)\n self.ievent = 0\n self._reloadOnLoadRun = False\n self._reloadOnNextEvent = False\n self.psana_cfg_dict = {}\n self._default_module_path = ''\n\n# self._user_attrs = {}\n# self._histograms = {}\n \n for key in kwargs:\n self._kwargs[key] = kwargs[key] \n if key in self._exp_defaults:\n setattr(self,key,kwargs[key])\n print 'setting ',key, kwargs[key]\n\n self._device_config = read_device_config(**kwargs)\n self._device_sets = self._device_config['device_sets'] \n self._device_types = self._device_config['device_types'] \n\n for det in self._device_sets:\n if 'det' in self._device_sets[det]:\n if ('detName' in self._device_sets[det]['det'] or\n 'typeName' in self._device_sets[det]['det']):\n self._det_list.append(det)\n if 'det_key' in self._device_sets[det]['det']:\n det_key = self._device_sets[det]['det']['det_key']\n self._det_aliases[det_key] = det \n else:\n pass\n \n# if 'pvs' in self._device_sets[det]:\n# for attr in self._device_sets[det]['pvs']:\n# pvbase = self._device_sets[det]['pvs'][attr]['base']\n# alias = '_'.join([det,attr])\n# self.add_pv(pvbase, alias)\n\n self.set_exp_defaults(**kwargs)\n if not self._kwargs.get('noload'):\n self.data_source = self.get_data_source(**kwargs)\n print 'Data Source = ', self.data_source\n else:\n self.data_source = None\n\n if not self.data_source:\n self._kwargs['noload'] = True\n else:\n kwargs['run'] = self.run\n\n# if self._kwargs.get('noload') or self.live:\n# if self._kwargs.get('epics_live'):\n# self.set_kwargs(ami=True)\n \n if self._kwargs.get('ami'):\n print 'loading ami'\n self.load_ami(**kwargs)\n\n if not self._kwargs.get('noload'):\n print 'loading run'\n self.load_run(*args, **kwargs)\n self._no_epicsStore = False\n \n print 'Instrument = ', self.instrument\n\n if self._kwargs.get('epics_live'): # and self._kwargs.get('epics_file'):\n print 'loading epics'\n self.load_epicsLive(**kwargs)\n\n if self.ds and self.live:\n self.next_event()\n \n if self.ds and self._reloadOnNextEvent:\n self.next_event()\n \n if not self.ds:\n self._no_epicsStore = True\n self._no_evtData = True\n for det in self._device_sets:\n if 'pvs' in self._device_sets[det]:\n print 'Adding epics ',det\n self.add_detector(det)", "def list_all_ephemerides_files(self) -> Dict:\n ephs = self.list_result_ephemerides_files()\n while 'nextPageToken' in ephs:\n next_page_token = ephs['nextPageToken']\n _, e = self.list_result_ephemerides_files(page_token=next_page_token)\n ephs['ephemerisResourcePath'].extend(e['ephemerisResourcePath'])\n return ephs", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def gene_descriptors(civic_gid19):\n return [civic_gid19]", "def decode(self):\n # Extract all the experiments\n\n # Map of imageset/scan pairs\n imagesets = {}\n\n # For every experiment, use the given input to create\n # a sensible experiment.\n el = ExperimentList()\n for eobj in self._obj[\"experiment\"]:\n\n # Get the models\n identifier = eobj.get(\"identifier\", \"\")\n beam = self._lookup_model(\"beam\", eobj)\n detector = self._lookup_model(\"detector\", eobj)\n goniometer = self._lookup_model(\"goniometer\", eobj)\n scan = self._lookup_model(\"scan\", eobj)\n crystal = self._lookup_model(\"crystal\", eobj)\n profile = self._lookup_model(\"profile\", eobj)\n scaling_model = self._lookup_model(\"scaling_model\", eobj)\n\n key = (eobj.get(\"imageset\"), eobj.get(\"scan\"))\n\n imageset = None\n try:\n imageset = imagesets[key] # type: ImageSet\n except KeyError:\n # This imageset hasn't been loaded yet - create it\n imageset_data = self._lookup_model(\"imageset\", eobj)\n\n # Create the imageset from the input data\n if imageset_data is not None:\n if \"params\" in imageset_data:\n format_kwargs = imageset_data[\"params\"]\n else:\n format_kwargs = {}\n\n # Load the external lookup data\n mask_filename, mask = self._load_pickle_path(imageset_data, \"mask\")\n gain_filename, gain = self._load_pickle_path(imageset_data, \"gain\")\n pedestal_filename, pedestal = self._load_pickle_path(\n imageset_data, \"pedestal\"\n )\n dx_filename, dx = self._load_pickle_path(imageset_data, \"dx\")\n dy_filename, dy = self._load_pickle_path(imageset_data, \"dy\")\n\n if imageset_data[\"__id__\"] == \"ImageSet\":\n imageset = self._make_stills(\n imageset_data, format_kwargs=format_kwargs\n )\n elif imageset_data[\"__id__\"] == \"ImageGrid\":\n imageset = self._make_grid(\n imageset_data, format_kwargs=format_kwargs\n )\n elif (\n imageset_data[\"__id__\"] == \"ImageSequence\"\n or imageset_data[\"__id__\"] == \"ImageSweep\"\n ):\n imageset = self._make_sequence(\n imageset_data,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n format_kwargs=format_kwargs,\n )\n elif imageset_data[\"__id__\"] == \"MemImageSet\":\n imageset = self._make_mem_imageset(imageset_data)\n else:\n raise RuntimeError(\"Unknown imageset type\")\n\n if imageset is not None:\n # Set the external lookup\n if mask is None:\n mask = ImageBool()\n else:\n mask = ImageBool(mask)\n if gain is None:\n gain = ImageDouble()\n else:\n gain = ImageDouble(gain)\n if pedestal is None:\n pedestal = ImageDouble()\n else:\n pedestal = ImageDouble(pedestal)\n if dx is None:\n dx = ImageDouble()\n else:\n dx = ImageDouble(dx)\n if dy is None:\n dy = ImageDouble()\n else:\n dy = ImageDouble(dy)\n\n if not imageset.external_lookup.mask.data.empty():\n if not mask.empty():\n mask = tuple(m.data() for m in mask)\n for m1, m2 in zip(\n mask, imageset.external_lookup.mask.data\n ):\n m1 &= m2.data()\n imageset.external_lookup.mask.data = ImageBool(mask)\n else:\n imageset.external_lookup.mask.data = mask\n imageset.external_lookup.mask.filename = mask_filename\n imageset.external_lookup.gain.data = gain\n imageset.external_lookup.gain.filename = gain_filename\n imageset.external_lookup.pedestal.data = pedestal\n imageset.external_lookup.pedestal.filename = pedestal_filename\n imageset.external_lookup.dx.data = dx\n imageset.external_lookup.dx.filename = dx_filename\n imageset.external_lookup.dy.data = dy\n imageset.external_lookup.dy.filename = dy_filename\n\n # Update the imageset models\n if isinstance(imageset, ImageSequence):\n imageset.set_beam(beam)\n imageset.set_detector(detector)\n imageset.set_goniometer(goniometer)\n imageset.set_scan(scan)\n elif isinstance(imageset, (ImageSet, ImageGrid)):\n for i in range(len(imageset)):\n imageset.set_beam(beam, i)\n imageset.set_detector(detector, i)\n imageset.set_goniometer(goniometer, i)\n imageset.set_scan(scan, i)\n\n imageset.update_detector_px_mm_data()\n\n # Add the imageset to the dict - even if empty - as this will\n # prevent a duplicated attempt at reconstruction\n imagesets[key] = imageset\n\n # Append the experiment\n el.append(\n Experiment(\n imageset=imageset,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n crystal=crystal,\n profile=profile,\n scaling_model=scaling_model,\n identifier=identifier,\n )\n )\n\n # Return the experiment list\n return el", "def generate_initial_siaf_aperture_definitions(instrument):\n siaf_detector_layout = iando.read.read_siaf_detector_layout()\n\n prd_siaf = pysiaf.Siaf(instrument)\n siaf_definitions = Table()\n\n\n for attribute_name in 'AperName AperType XDetRef YDetRef XSciSize YSciSize XSciRef YSciRef'.split():\n siaf_definitions[attribute_name] = [getattr(prd_siaf[aperture_name], attribute_name) for aperture_name in prd_siaf.apertures]\n\n parent_apertures = [None]*len(siaf_definitions)\n dependency_type = [None]*len(siaf_definitions)\n\n if instrument == 'NIRISS':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n if aperture_name != 'NIS_CEN':\n parent_apertures[i] = 'NIS_CEN'\n if '_OSS' in aperture_name:\n dependency_type[i] = 'oss_default'\n elif aperture_name != 'NIS_CEN':\n dependency_type[i] = 'default'\n\n elif instrument == 'FGS':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n if aperture_name not in ['FGS1_FULL', 'FGS2_FULL']:\n parent_apertures[i] = '{}_FULL'.format(aperture_name.split('_')[0])\n else:\n dependency_type[i] = 'master'\n if '_OSS' in aperture_name:\n dependency_type[i] = 'oss_default'\n elif aperture_name not in ['FGS1_FULL', 'FGS2_FULL']:\n dependency_type[i] = 'default'\n\n elif instrument == 'NIRSpec':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n if siaf_definitions['AperType'][i] == 'TRANSFORM':\n continue\n\n if '_OSS' in aperture_name:\n dependency_type[i] = 'oss_default'\n parent_apertures[i] = aperture_name.split('_OSS')[0]\n elif 'NRS_IFU' in aperture_name:\n parent_apertures[i] = 'NRS1_FULL'\n dependency_type[i] = 'default'\n elif aperture_name in ['NRS_S200B1_SLIT']:\n parent_apertures[i] = 'NRS2_FULL'\n dependency_type[i] = 'default'\n elif aperture_name in 'NRS_S200A1_SLIT NRS_S200A2_SLIT NRS_S400A1_SLIT NRS_S1600A1_SLIT NRS_FULL_IFU'.split():\n parent_apertures[i] = 'NRS1_FULL'\n dependency_type[i] = 'default'\n elif ('_MSA1' in aperture_name) or ('_MSA2' in aperture_name):\n parent_apertures[i] = 'NRS2_FULL'\n dependency_type[i] = 'default'\n elif ('_MSA3' in aperture_name) or ('_MSA4' in aperture_name):\n parent_apertures[i] = 'NRS1_FULL'\n dependency_type[i] = 'default'\n elif aperture_name in ['NRS1_FP1MIMF']:\n parent_apertures[i] = 'NRS_S1600A1_SLIT'\n dependency_type[i] = 'FP1MIMF'\n elif 'MIMF' in aperture_name:\n parent_apertures[i] = aperture_name.split('_')[0]+'_FULL'\n dependency_type[i] = 'default'\n\n\n # if aperture_name not 'NIS_CEN':\n # parent_apertures[i] = 'NIS_CEN'\n # elif aperture_name != 'NIS_CEN':\n # dependency_type[i] = 'default'\n\n elif instrument == 'NIRCam':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n\n # Master apertures\n if aperture_name in siaf_detector_layout['AperName']:\n dependency_type[i] = 'master'\n\n elif siaf_definitions['AperType'][i] in ['SUBARRAY', 'FULLSCA', 'ROI']:\n if 'MASK' in aperture_name:\n # Coronagraphic apertures with wedge offset\n dependency_type[i] = 'wedge'\n elif 'DHSPIL_WEDGES' in aperture_name:\n dependency_type[i] = 'dhspil_wedge'\n else:\n dependency_type[i] = 'default'\n sca_name = aperture_name[0:5]\n parent_apertures[i] = '{}_FULL'.format(sca_name)\n\n # OSS apertures\n elif siaf_definitions['AperType'][i] in ['OSS']:\n dependency_type[i] = 'default'\n parent_apertures[i] = aperture_name.split('_OSS')[0]\n\n elif (siaf_definitions['AperType'][i] in ['SLIT', 'COMPOUND']) and ('GRISM' in aperture_name) and ('WFSS' in aperture_name):\n dependency_type[i] = 'grism_wfss'\n sca_name = aperture_name.split('_')[0]\n # sca_name = aperture_name[0:5]\n parent_apertures[i] = '{}_FULL'.format(sca_name)\n\n # elif 'MASK' in aperture_name:\n # dependency_type[i] = 'wedge'\n # sca_name = aperture_name[0:5]\n # parent_apertures[i] = '{}_FULL'.format(sca_name)\n\n\n\n elif aperture_name in 'NRCALL_FULL NRCAS_FULL NRCBS_FULL'.split():\n dependency_type[i] = 'nircam_compound'\n if aperture_name == 'NRCALL_FULL':\n parent_apertures[i] = '; '.join(['NRCA1_FULL', 'NRCB2_FULL', 'NRCB1_FULL', 'NRCA2_FULL'])\n elif aperture_name == 'NRCAS_FULL':\n parent_apertures[i] = '; '.join(['NRCA1_FULL', 'NRCA3_FULL', 'NRCA4_FULL', 'NRCA2_FULL'])\n elif aperture_name == 'NRCBS_FULL':\n parent_apertures[i] = '; '.join(['NRCB4_FULL', 'NRCB2_FULL', 'NRCB1_FULL', 'NRCB3_FULL'])\n\n\n\n siaf_definitions['parent_apertures'] = parent_apertures\n siaf_definitions['dependency_type'] = dependency_type\n\n siaf_definitions.pprint()\n\n\n siaf_definitions_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_aperture_definition.txt'.format(instrument.lower()))\n\n comments = []\n comments.append('{} aperture definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains all the necessary aperture information to generate the full SIAF given the necessary reference files (focal plane alignment, distortion) and auxiliary information (DDC mapping, wedge offsets, ...)')\n comments.append('This file also defines the order in which the apertures are presented.')\n comments.append('')\n comments.append('Originally based on {}.'.format(JWST_PRD_VERSION))\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('{}'.format(username))\n comments.append('')\n siaf_definitions.meta['comments'] = comments\n siaf_definitions.write(siaf_definitions_file_name, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)", "def _get_data(ch_decim=1):\n # Read evoked\n evoked = mne.read_evokeds(fname_ave, 0, baseline=(None, 0))\n evoked.info[\"bads\"] = [\"MEG 2443\"]\n with evoked.info._unlock():\n evoked.info[\"lowpass\"] = 16 # fake for decim\n evoked.decimate(12)\n evoked.crop(0.0, 0.3)\n picks = mne.pick_types(evoked.info, meg=True, eeg=False)\n picks = picks[::ch_decim]\n evoked.pick_channels([evoked.ch_names[pick] for pick in picks])\n evoked.info.normalize_proj()\n\n noise_cov = mne.read_cov(fname_cov)\n noise_cov[\"projs\"] = []\n noise_cov = regularize(noise_cov, evoked.info, rank=\"full\", proj=False)\n return evoked, noise_cov", "def get_dicom_info(paths, index_col=None, verbose=False):\n meta_info = []\n paths = tqdm_notebook(paths, leave=False) if verbose else paths\n for path in paths:\n first_slice = dicom.read_file(os.path.join(path, os.listdir(path)[0]))\n\n if hasattr(first_slice, 'PatientAge'):\n patient_age = str(first_slice.PatientAge)\n else:\n patient_age = ''\n\n if hasattr(first_slice, 'PatientSex'):\n patient_sex = str(first_slice.PatientSex)\n else:\n patient_sex = ''\n\n locations = []\n for name in os.listdir(path):\n slice_path = os.path.join(path, name)\n dicom_slice = dicom.read_file(slice_path, stop_before_pixels=True)\n locations.append(float(dicom_slice.SliceLocation))\n\n steps_z = np.diff(np.sort(np.array(locations)))\n spacing_z = np.min(steps_z)\n info_dict = {\n \"UniformSpacing\": np.allclose(steps_z, spacing_z),\n 'MinSpacingZ': np.min(steps_z),\n 'MaxSpacingZ': np.max(steps_z),\n 'SliceThickness': float(first_slice.SliceThickness),\n 'SpacingZ': spacing_z,\n 'SpacingY': float(first_slice.PixelSpacing[0]),\n 'SpacingX': float(first_slice.PixelSpacing[1]),\n 'StudyID': str(first_slice.StudyID),\n 'ConvolutionKernel': str(first_slice.ConvolutionKernel),\n 'FilterType': str(first_slice.FilterType),\n 'WindowWidth': str(first_slice.WindowWidth),\n 'WindowCenter': str(first_slice.WindowCenter),\n 'PatientAge': patient_age,\n 'PatientSex': patient_sex,\n 'AccessionNumber': str(first_slice.AccessionNumber),\n 'PatientID': str(first_slice.PatientID),\n 'Rows': int(first_slice.Rows),\n 'Columns': int(first_slice.Columns),\n 'NumSlices': len(os.listdir(path)),\n 'ScanID': os.path.basename(path),\n 'Index': str(first_slice.AccessionNumber) + '_' + os.path.basename(path),\n 'ScanPath': path\n }\n meta_info.append(info_dict)\n return pd.DataFrame(meta_info) if index_col is None else pd.DataFrame(meta_info).set_index(index_col)" ]
[ "0.6132044", "0.59762484", "0.5642774", "0.53444785", "0.52427983", "0.52217543", "0.5216209", "0.51387924", "0.5125599", "0.50237185", "0.5018066", "0.5001039", "0.49862388", "0.49604324", "0.4950634", "0.49402615", "0.49259216", "0.49019468", "0.48623866", "0.4825966", "0.4802431", "0.47943074", "0.47941", "0.4782276", "0.47811458", "0.4775542", "0.47599554", "0.47576952", "0.47461975", "0.47358406" ]
0.6886132
0
Reconstruct the EPIs from pfiles.
def ReconEpis(self): run = zeros(100) if self.verbose: print 'Reconstruct EPIs' for pfile in self.pfiles_recon: if self.info[pfile]['refdat'] is None: # Find the ref.dat file later. continue if self.info[pfile]['compression'] is not None: # Data are compressed, copy to tmp. compression = self.info[pfile]['compression'] pfile_decomp = '%s/%s' % (self.tmpdir, \ os.path.basename(self.info[pfile]['pfile_decomp'])) if os.path.exists(pfile_decomp): errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \ ' in ReconEpis' cmd = '%s %s > %s' % \ (decompress_cmds[compression], pfile, pfile_decomp) self.ExecCmd(cmd) else: # Create a link on /tmp to the pfile so the link to ref.dat will also # be on /tmp, (which is always writeable.) pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile)) if not os.path.exists(pfile_decomp): os.symlink(pfile, pfile_decomp) refname, refcmpress = self.CheckCompression( \ self.info[pfile]['refdat']) if refcmpress is not None: refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname)) cmd = '%s %s > %s' % \ (decompress_cmds[refcmpress], \ self.info[pfile]['refdat'], refdat_decomp) self.ExecCmd(cmd) else: refdat_decomp = self.info[pfile]['refdat'] if refdat_decomp is not None: if refdat_decomp != 'ref.dat': # Create link bearing the file name epirecon_ex expects. refdat_link = '%s/ref.dat' % self.tmpdir if not os.path.exists(refdat_link): if self.verbose: print 'ln -s %s %s' % (refdat_decomp, refdat_link) if os.path.islink(refdat_link): # ref.dat is a broken symbolic link. if self.verbose: print 'rm %s' % ref_file os.remove(refdat_link) try: os.symlink(refdat_decomp, refdat_link) except OSError: self.errors = True pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp)) os.symlink(pfile_decomp, pfile_link) os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir) series = int(self.info[pfile]['series']) run[series] = run[series] + 1 epiname = self.info[pfile]['imgfile'] cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \ (pfile_decomp, epiname, self.skip) fname = '%s+orig.BRIK' % epiname self.CheckExec(cmd, [fname]) # self.epi_prefixes[pfile] = self.info[pfile]['imgfile'] else: errstr = '*******************************************\n' + \ 'No ref.dat file exists for %s\n' % pfile + \ '*******************************************\n' self.error_log = self.error_log + errstr self.f_crash.write(errstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK", "def from_file(epub_file):\n self = Epub()\n\n # TODO: zipfile.ZipFile accepts a file or a fileobject.\n # That seems ambiguous. We should probably create a\n # separate method to create an EPUB from a file object to be more\n # clear.\n\n if (isinstance(epub_file, file)):\n self.filename = file.name\n\n if (isinstance(epub_file, str)):\n self.filename = epub_file\n\n try:\n archive = zipfile.ZipFile(epub_file)\n except Exception as e:\n print 'Could not open zipfile \"%s\" \\n' % self.filename\n print e\n\n # parse container.xml for full path to content.opf file\n container_xml = archive.read(PATH_TO_CONTAINER_XML)\n container_xml_tree = etree.fromstring(container_xml)\n fullpath = container_xml_tree.xpath('n:rootfiles/n:rootfile/@full-path',\n namespaces=XML_NAMESPACES)[0]\n\n # Each major XML element in the content.opf file is mapped to its own class.\n # This dict maps those classes to the XPaths that point to the corresponding XML\n # element.\n #\n # for example: the XPath \"opf:package\" points to the '<package>' XML element\n # which is mapped to the Package class\n element_map = [{'name': 'package',\n 'class': Package,\n 'element_xpath': '/opf:package'},\n {'name': 'metadata',\n 'class': MetaData,\n 'element_xpath': '/opf:package/opf:metadata',\n 'sub_element_class': Element,\n 'sub_element_xpath': \"./*\"},\n {'name': 'manifest',\n 'class': Manifest,\n 'element_xpath': '/opf:package/opf:manifest',\n 'sub_element_class': ManifestElement,\n 'sub_element_xpath': 'opf:item'},\n {'name': 'spine',\n 'class': Spine,\n 'element_xpath': '/opf:package/opf:spine',\n 'sub_element_class': Element,\n 'sub_element_xpath': 'opf:itemref'},\n {'name': 'guide',\n 'class': Guide,\n 'element_xpath': '/opf:package/opf:guide',\n 'sub_element_class': Element,\n 'sub_element_xpath': 'opf:reference',\n 'optional': True}]\n\n tree = etree.fromstring(archive.read(fullpath))\n\n for element in element_map:\n try:\n element_tree = tree.xpath(element['element_xpath'], namespaces=XML_NAMESPACES)[0]\n except IndexError as e:\n # If the element is marked as optional, just keep going if we don't find it.\n if element['optional']:\n continue\n else:\n print element\n element_class = element['class']()\n element_class.as_xhtml = etree.tostring(element_tree)\n # Step through the attrib dict and replace each key with its localname version\n # i.e. if the key is '{namespace}event', replace it with 'event'.\n # There *shouldn't* be any collisions.\n element_class.tag.attributes = {etree.QName(key).localname: value for key, value in\n element_tree.attrib.iteritems()}\n element_class.tag.localname = etree.QName(element_tree).localname\n element_class.tag.namespace = etree.QName(element_tree).namespace\n element_class.text = element_tree.text\n\n if 'sub_element_class' in element:\n sub_element_tree = element_tree.xpath(element['sub_element_xpath'], namespaces=XML_NAMESPACES)\n for k in sub_element_tree:\n sub_element_class = element['sub_element_class']()\n sub_element_class.as_xhtml = etree.tostring(k)\n sub_element_class.tag.attributes = {etree.QName(key).localname: value for key, value in\n k.attrib.iteritems()}\n sub_element_class.tag.localname = etree.QName(k.tag).localname\n sub_element_class.tag.namespace = etree.QName(k.tag).namespace\n sub_element_class.tag.text = k.text\n element_class.append(sub_element_class)\n\n # if we just created a ManifestElement, we need to additionally\n # pass it a reference to the epub archive and the dirname\n # contained in the fullpath in order for it to access the file\n # it points to\n\n if type(sub_element_class) == ManifestElement:\n # fullpath is the path to the content.opf file.\n # This should also be the path to the manifest item files.\n sub_element_class.basedir = os.path.dirname(fullpath)\n sub_element_class.archive = archive\n\n # Assigns the class we just created as an attribute of the Epub object.\n # The attr name is taken from the 'name' value in the element_map above.\n setattr(self, element['name'], element_class)\n\n # If we just created the spine element, we need to pass it\n # a reference to the manifest. This will enable the spine element to access\n # manifeset elements directly\n # note: this assumes the manifest element has alreay been created\n if element['name'] == 'spine':\n self.spine.manifest = self.manifest\n\n # read in the items from the manifest\n for element in self.manifest:\n if element.isDocument():\n pass\n if element.isImage():\n self.images.append(element)\n if element.isCSS():\n self.css.append(element)\n if element.isTOC():\n pass\n\n # create an array called parts that references elements\n # listed in the spine\n\n for itemref in self.spine.list:\n self.parts.append(self.manifest.getElementById(itemref.tag.attributes['idref']))\n\n return self", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def reffile_setup(self):\n # Prepare to find files listed as 'config'\n # and set up PSF path\n\n # set up as dictionary of dictionaries\n self.configfiles = {}\n self.psfpath = {}\n self.psfbasename = {}\n self.psfpixfrac = {}\n self.reference_file_dir = {}\n\n for instrument in 'nircam niriss fgs'.split():\n self.configfiles[instrument] = {}\n self.psfpath[instrument] = os.path.join(self.datadir, instrument, 'gridded_psf_library')\n self.psfbasename[instrument] = instrument\n self.reference_file_dir[instrument] = os.path.join(self.datadir, instrument, 'reference_files')\n\n # Set instrument-specific file paths\n if instrument == 'nircam':\n self.psfpixfrac[instrument] = 0.25\n elif instrument == 'niriss':\n self.psfpixfrac[instrument] = 0.1\n elif instrument == 'fgs':\n self.psfpixfrac[instrument] = 0.1\n\n # Set global file paths\n self.configfiles[instrument]['filter_throughput'] = os.path.join(self.modpath, 'config', 'placeholder.txt')\n\n for instrument in 'miri nirspec'.split():\n self.configfiles[instrument] = {}\n self.psfpixfrac[instrument] = 0\n self.psfbasename[instrument] = 'N/A'\n\n # create empty dictionaries\n list_names = 'superbias linearity gain saturation ipc astrometric photom pam dark lindark'.split()\n for list_name in list_names:\n setattr(self, '{}_list'.format(list_name), {})\n\n self.det_list = {}\n self.det_list['nircam'] = ['A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B4', 'B5']\n self.det_list['niriss'] = ['NIS']\n self.det_list['fgs'] = ['G1', 'G2']\n self.det_list['nirspec'] = ['NRS']\n self.det_list['miri'] = ['MIR']\n\n for instrument in 'nircam niriss fgs miri nirspec'.split():\n for list_name in list_names:\n getattr(self, '{}_list'.format(list_name))[instrument] = {}\n\n if self.offline:\n # no access to central store. Set all files to none.\n for list_name in list_names:\n if list_name in 'dark lindark'.split():\n default_value = ['None']\n else:\n default_value = 'None'\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n elif instrument == 'nircam':\n rawdark_dir = os.path.join(self.datadir, 'nircam/darks/raw')\n lindark_dir = os.path.join(self.datadir, 'nircam/darks/linearized')\n for det in self.det_list[instrument]:\n self.dark_list[instrument][det] = glob(os.path.join(rawdark_dir, det, '*.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(lindark_dir, det, '*.fits'))\n\n elif instrument in ['nirspec', 'miri']:\n for key in 'subarray_def_file fluxcal filtpupil_pairs readpatt_def_file crosstalk ' \\\n 'dq_init_config saturation_config superbias_config refpix_config ' \\\n 'linearity_config filter_throughput'.split():\n self.configfiles[instrument][key] = 'N/A'\n default_value = 'none'\n for list_name in list_names:\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n else: # niriss and fgs\n for det in self.det_list[instrument]:\n if det == 'G1':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS1_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS1_DARK_SEARCH_STRING))\n\n elif det == 'G2':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS2_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS2_DARK_SEARCH_STRING))\n\n elif det == 'NIS':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/raw',\n '*uncal.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/linearized',\n '*linear_dark_prep_object.fits'))", "def ppt_files_to_dict(self):\n if len(self.ppt_path_list) == 0:\n return\n\n for file_path in self.ppt_path_list:\n self.ppt_file_to_dict(file_path)", "def __init__(self, config):\n self.config = config\n self.outpath = prepDir(config.outpath)\n self.xslpath = config.xslpath\n self.imagespath = config.imagespath\n self.errors = []\n self.xeps = []\n files = []\n if config.xeps:\n for xep in config.xeps:\n if os.path.isfile(xep):\n files.append(os.path.abspath(xep))\n elif os.path.isdir(xep):\n fltr = os.path.join(os.path.abspath(xep), '*.xml')\n files += glob.glob(fltr)\n else:\n if os.path.isfile(\"xep-{0}.xml\".format(xep)):\n files.append(\n os.path.abspath(os.path.join(os.getcwd(), \"xep-{0}.xml\".format(xep))))\n else:\n # no xeps given, try all xml-files in curdir\n fls = glob.glob(os.path.join(os.getcwd(), '*.xml'))\n for fle in fls:\n files.append(os.path.abspath(fle))\n # try if we can find an existing XEP-table:\n if os.path.isfile(os.path.join(self.outpath, \"xeps.xml\")):\n self.xeptable = os.path.join(self.outpath, \"xeps.xml\")\n else:\n self.xeptable = None\n # read files to xeps\n for fle in sorted(set(files)):\n try:\n self.xeps.append(\n xeputils.xep.XEP(fle,\n outpath=self.outpath,\n xslpath=self.xslpath,\n imagespath=self.imagespath))\n except:\n e = \"Error while parsing {}\\n\".format(fle)\n e += \"FATAL: {} is not included\\n\".format(fle)\n e += traceback.format_exc()\n self.errors.append(e)", "def generate_extracts(self, root=None):\n import collections\n from ambry.util import toposort\n\n \n ext_config = self.extracts\n\n # Order the extracts to satisfy dependencies. \n graph = {}\n for key,extract in ext_config.items():\n graph[key] = set(extract.get('depends',[]))\n \n\n if graph:\n exec_list = []\n for group in toposort(graph):\n exec_list.extend(group)\n else:\n exec_list = ext_config.keys()\n \n if root:\n deps = self.dep_tree(root)\n exec_list = [ n for n in exec_list if n in deps]\n \n \n # now can iterate over the list. \n for key in exec_list:\n extract = ext_config[key]\n extract['_name'] = key\n for_ = extract.get('for', \"'True'\")\n function = extract.get('function', False)\n file_ = extract.get('file', False)\n each = extract.get('each', [])\n p_id = extract.get('partition', False)\n eaches = self._expand_each(each)\n \n \n # This part is a awful hack and should be refactored\n if function:\n for data in eaches: \n yield self._sub(dict(extract.items() + data.items()))\n\n elif p_id: \n partitions = self._expand_partitions(p_id, for_)\n \n for partition in partitions:\n p_dict = self._make_partition_dict(partition)\n for data in eaches: \n yield self._sub(dict(p_dict.items()+extract.items() + \n data.items() ))\n elif file_:\n for data in eaches:\n yield self._sub(dict(extract.items() + data.items()))\n else:\n self.bundle.error(\"Extract group {} should have either a function or a partition\".format(key))", "def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()", "def extract_pp(self):\n build([srna.ExtractPPW(fastq_dic=self.fastq_dic, num_cpus=self.num_cpus,\n indexfile=self.hisat_index, workdir=self.workdir,\n kingdom=self.kingdom)],\n local_scheduler=self.local_scheduler)", "def PruneEpiEntries(self):\n pruned = {}\n basefiles = []\n baseentries = {}\n for entry in self.entry_map['epi']:\n if baseentries.has_key(self.info[entry]['basefile']):\n baseentries[self.info[entry]['basefile']].append(entry)\n else:\n baseentries[self.info[entry]['basefile']] = [entry]\n for entry in self.entry_map['epi']:\n targets = []\n if self.no_motcorr:\n target = self.info[entry]['imgfile']\n elif self.info[entry]['fmapname'] is None or self.no_fmapcorr:\n target = self.info[entry]['imgfile_m']\n else:\n target = self.info[entry]['imgfile_mf']\n targets.append(target + self.info[entry]['suffix'])\n targets.append('%s%s' % (self.info[entry]['censor_prefix'], '_censor.1D'))\n pruned[entry] = [True, baseentries[self.info[entry]['basefile']]]\n for target in targets:\n pruned[entry] = \\\n [False, baseentries[self.info[entry]['basefile']]]\n for key in pruned.keys():\n if not pruned[key][0]:\n for entry in pruned[key][1]:\n pruned[entry][0] = False\n tmp = new_map = []\n for entry in self.entry_map['epi']:\n if pruned[entry][0]:\n if self.verbose:\n print 'Skipping %s: Already reconstructed.' % targets[0]\n if entry in self.pfiles_recon:\n self.pfiles_recon.remove(entry)\n else:\n new_map.append(entry)\n self.entry_map['epi'] = new_map", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations", "def load(self):\n if self.__fname == '':\n print('You must pass in a file name to load!')\n return []\n\n ext = os.path.splitext(self.__fname)[1]\n first_pt = None\n if len(self.__fea.points) > 0:\n first_pt = self.__fea.points[0]\n if ext == '.dxf':\n parts = self.__load_dxf()\n elif ext in ['.brep', '.brp', '.iges', '.igs', '.step', '.stp']:\n self.__make_geo()\n parts = self.__load_geo()\n last_pt = None\n if first_pt != None:\n if len(self.__fea.points) > 2:\n last_pt = self.__fea.points[-1]\n if self.__scale != '':\n # call scale\n pass\n return parts", "def compute_products(self):\r\n src_to_classfiles = defaultdict(list)\r\n for pcd_entry in self.pcd_entries:\r\n srcfile = pcd_entry[1]\r\n # In the file classes are represented with slashes, not dots. E.g., com/foo/bar/Baz.\r\n src_to_classfiles[srcfile].append(pcd_entry[0] + '.class')\r\n return src_to_classfiles", "def init_pta(params_all):\n\n ptas = dict.fromkeys(params_all.models)\n for ii, params in params_all.models.items():\n\n allpsr_model = params_all.noise_model_obj(psr=params_all.psrs,\n params=params)\n\n models = list()\n from_par_file = list()\n ecorrexists = np.zeros(len(params_all.psrs))\n\n # Including parameters common for all pulsars\n if params.tm=='default':\n tm = gp_signals.TimingModel()\n elif params.tm=='ridge_regression':\n log10_variance = parameter.Uniform(-20, -10)\n basis = scaled_tm_basis()\n prior = ridge_prior(log10_variance=log10_variance)\n tm = gp_signals.BasisGP(prior, basis, name='ridge')\n\n # Adding common noise terms for all pulsars\n # Only those common signals are added that are listed in the noise model\n # file, getting Enterprise models from the noise model object.\n if 'm_all' in locals():\n del m_all\n for psp, option in params.common_signals.items():\n if 'm_all' in locals():\n m_all += getattr(allpsr_model, psp)(option=option)\n else:\n m_all = tm + getattr(allpsr_model, psp)(option=option)\n\n # Including single pulsar noise models\n for pnum, psr in enumerate(params_all.psrs):\n\n singlepsr_model = params_all.noise_model_obj(psr=psr, params=params)\n\n # Determine if ecorr is mentioned in par file\n try:\n for key,val in psr.t2pulsar.noisemodel.items():\n if key.startswith('ecorr') or key.startswith('ECORR'):\n ecorrexists[pnum]=True\n except Exception as pint_problem:\n print(pint_problem)\n ecorrexists[pnum]=False\n\n # Add noise models\n if psr.name in params.noisemodel.keys():\n noise_model_dict_psr = params.noisemodel[psr.name]\n else:\n noise_model_dict_psr = params.universal\n for psp, option in noise_model_dict_psr.items():\n if 'm_sep' in locals():\n m_sep += getattr(singlepsr_model, psp)(option=option)\n elif 'm_all' in locals():\n m_sep = m_all + getattr(singlepsr_model, psp)(option=option)\n else:\n m_sep = tm + getattr(singlepsr_model, psp)(option=option)\n\n models.append(m_sep(psr))\n del m_sep\n\n pta = signal_base.PTA(models)\n\n if 'noisefiles' in params.__dict__.keys():\n noisedict = get_noise_dict(psrlist=[p.name for p in params_all.psrs],\\\n noisefiles=params.noisefiles)\n print('For constant parameters using noise files in PAL2 format')\n pta.set_default_params(noisedict)\n\n print('Model',ii,'params (',len(pta.param_names),') in order: ', \\\n pta.param_names)\n\n if params.opts is not None:\n if params.opts.mpi_regime != 2:\n np.savetxt(params.output_dir + '/pars.txt', pta.param_names, fmt='%s')\n \n ptas[ii]=pta\n\n return ptas", "def __load(self, pkgrels):\n # keep track of which parts are already loaded\n part_dict = {}\n\n # discard any previously loaded relationships\n self.__relationships = _RelationshipCollection()\n\n # add model-side rel for each pkg-side one, and load target parts\n for pkgrel in pkgrels:\n # unpack working values for part to be loaded\n reltype = pkgrel.reltype\n pkgpart = pkgrel.target\n partname = pkgpart.partname\n content_type = pkgpart.content_type\n # log.debug(\"%s -- %s\", reltype, partname)\n\n # create target part\n part = Part(reltype, content_type)\n part_dict[partname] = part\n part._load(pkgpart, part_dict)\n\n # create model-side package relationship\n model_rel = _Relationship(pkgrel.rId, reltype, part)\n self.__relationships._additem(model_rel)\n\n # gather references to image parts into __images\n self.__images = ImageCollection()\n image_parts = [part for part in self._parts\n if part.__class__.__name__ == 'Image']\n for image in image_parts:\n self.__images._loadpart(image)", "def _extract(self):\r\n self._data = []\r\n for fname in self.files:\r\n meta = dict(filename=fname)\r\n\r\n # Perform the actual metadata extraction\r\n fname = os.path.splitext(self.filter_filename(fname))[0]\r\n values = fname.split(self.sep)\r\n\r\n # Handle the case where number of fields is less than the length\r\n # of the extracted values, ie cases where we only want to extract\r\n # a subset of available fields.\r\n if self.index:\r\n values = [val for i, val in enumerate(values) if i in self.index]\r\n\r\n meta.update(dict(zip(self.fields, values)))\r\n if self.split_by in self.fields:\r\n meta[self.split_by] = self._get_split_field_values(meta['filename'])\r\n self._data.append(meta)", "def CleanEpi(self):\n for entry in self.info.keys():\n info = self.info[entry]\n if info['psdname'] == 'epi':\n for tag in ('imgfile', 'imgfile_m', 'imgfile_mf', 'imgfile_t'):\n if info.has_key(tag) and info[tag] is not None and \\\n os.path.exists(info[tag]):\n print 'Deleting %s*' % (info[tag], info['suffix'])\n cmd = '/bin/rm %s%s*' % (info[tag], info['suffix'])\n self.ExecCmd(cmd)\n if '.BRIK' in info['suffix']:\n cmd = '/bin/rm %s%s*' % (info[tag], \\\n info['suffix'].replace('.BRIK','.HEAD'))\n self.ExecCmd(cmd)", "def make_pmodel_energies():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n data = pmodel.load_data(\"test_data/protein_load/traj/traj_test.xtc\")\n heps, dheps = pmodel.get_potentials_epsilon(data)\n\n true_energies = np.loadtxt(\"test_data/protein_load/traj/energy_gaussian_test.dat\")\n\n return pmodel, data, heps, dheps, true_energies", "def setESFiles(self, eSourceDir = None, verbose = False):\n\n print('\\n***Setting electronic structure files')\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc':\n # Check and set electronic structure file for packaging.\n if '***Missing' in self.nbDetails[key]['jobInfo'][2]:\n self.nbDetails[key]['elecStructure'] = None\n else:\n if eSourceDir is not None:\n # Copy electronic structure files to package using supplied path\n fileName = Path(self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\"))\n self.nbDetails[key]['elecStructure'] = Path(eSourceDir, fileName.name).as_posix()\n\n else:\n # Copy electronic structure files to package, based on full path from original job\n self.nbDetails[key]['elecStructure'] = self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\")\n\n checkList = self.checkFiles(self.nbDetails[key]['elecStructure'])\n\n # If file is missing, set to \"missing\"\n if not checkList[0]:\n self.nbDetails[key]['elecStructure'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n\n # If file is present, check also for corresponding files\n else:\n # Assuming above is molden file, check also for corresponding Gamess file\n gFile = Path(self.nbDetails[key]['elecStructure']).with_suffix('.log')\n checkList = self.checkFiles(gFile)\n if checkList[0]:\n # self.nbDetails[key]['elecStructure'].append(gFile.as_posix()) # Set here to append... hopefully works OK with arch update code...\n self.nbDetails[key]['elecStructureGamess'] = gFile.as_posix() # Set here as separate item\n else:\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {gFile.as_posix()}\"\n #\n\n if verbose:\n print(f\"Job {key}: {self.nbDetails[key]['title']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructure']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructureGamess']}\")", "def load_PSF_data(self):\n self.epsf = {}\n for filter in ['F105W', 'F125W', 'F140W', 'F160W']:\n file = os.path.join(os.getenv('GRIZLI'), 'CONF',\n 'PSFSTD_WFC3IR_{0}.fits'.format(filter))\n \n data = pyfits.open(file)[0].data.T\n data[data < 0] = 0 \n \n self.epsf[filter] = data\n \n # Dummy, use F105W ePSF for F098M and F110W\n self.epsf['F098M'] = self.epsf['F105W']\n self.epsf['F110W'] = self.epsf['F105W']\n \n # Extended\n self.extended_epsf = {}\n for filter in ['F105W', 'F125W', 'F140W', 'F160W']:\n file = os.path.join(os.getenv('GRIZLI'), 'CONF',\n 'extended_PSF_{0}.fits'.format(filter))\n \n if not os.path.exists(file):\n msg = 'Extended PSF file \\'{0}\\' not found.'.format(file)\n msg += '\\n Get the archive from http://www.stsci.edu/~brammer/Grizli/Files/WFC3IR_extended_PSF.v1.tar.gz'\n msg += '\\n and unpack in ${GRIZLI}/CONF/' \n raise FileNotFoundError(msg)\n \n data = pyfits.open(file)[0].data#.T\n data[data < 0] = 0 \n \n # Mask center\n NX = data.shape[0]/2-1\n yp, xp = np.indices(data.shape)\n R = np.sqrt((xp-NX)**2+(yp-NX)**2)\n data[R <= 4] = 0.\n \n self.extended_epsf[filter] = data\n self.extended_N = int(NX)\n \n self.extended_epsf['F098M'] = self.extended_epsf['F105W']\n self.extended_epsf['F110W'] = self.extended_epsf['F105W']", "def extract_data(filename, id_ep, conn):\n\n try:\n tree = ET.parse(filename)\n root = tree.getroot()\n\n # creation of a speaker's dict to manage the local ids (specific speakers' id file-dependent)\n speakers = {}\n # creation of a iterable to identify the unknown/unnamed speakers\n uknw_id = 0\n\n names = []\n cur = conn.cursor()\n cur.execute(\"SELECT name FROM speaker\")\n rows = cur.fetchall()\n i = 0\n for row in rows:\n names.append(row[0])\n i += 1\n\n # creation of speakers\n for spkr in root.iter(\"Speaker\"):\n name = spkr.attrib[\"name\"]\n if ((\",\" in name) or (\"sup+\" in name) or (\"Sup+\" in name)):\n name = \"multi_spk\"\n elif ((\"spk\" in name) or (\"speaker\" in name) or (\"Inconnu\" in name) or (\"unknown\" in name)):\n name = \"spk_\"+str(id_ep)+\"_\"+str(uknw_id)\n uknw_id += 1\n else :\n n = name.split(\" \")\n name = n[0]\n if len(n) > 1:\n for i in range(1, len(n)):\n name += \"_\" + n[i].upper()\n\n if \"type\" in spkr.attrib:\n if spkr.attrib[\"type\"] not in (\"male\", \"female\"):\n gender = \"NA\"\n else:\n gender = spkr.attrib[\"type\"]\n else:\n gender = \"NA\"\n\n if \"dialect\" in spkr.attrib:\n native = spkr.attrib[\"dialect\"]\n else:\n native = \"NA\"\n\n # speaker added only if doesn't already exists in the database\n if name not in names:\n new_speaker = (name, gender, native)\n create_speaker(conn, new_speaker)\n\n # update of the local id->name dictionary\n speakers[spkr.attrib['id']] = name\n\n # creation of turns\n for turn in root.iter(\"Turn\"):\n\n if \"speaker\" in turn.attrib:\n if len(turn.attrib[\"speaker\"]) > 5:\n speaker_name = \"multi_spk\"\n else:\n speaker_name = speakers[turn.attrib[\"speaker\"]]\n start_time = turn.attrib[\"startTime\"]\n end_time = turn.attrib[\"endTime\"]\n\n cur = conn.cursor()\n cur.execute(\"SELECT id_speaker FROM speaker WHERE name=?\", (speaker_name,))\n id_speaker = cur.fetchone()[0]\n id_episode = id_ep\n trans = ET.tostring(turn, \"ISO-8859-1\", method=\"text\")\n trans = trans.decode(\"ISO-8859-1\")\n trans = re.sub(\"\\n\", \" \", trans)\n trans = re.sub(\" \", \" \", trans)\n\n new_turn = (start_time, end_time, id_speaker, id_episode, trans)\n create_turn(conn, new_turn)\n\n except Exception as e:\n print(e)\n print(filename)\n pass", "def elastixTemplates():\n\t\ttransformations = []\n\t\tfileNames = os.listdir(AppVars.transformationsPath())\n\t\tfor fileName in fileNames:\n\t\t\tfullFileName = os.path.join(AppVars.transformationsPath(), fileName)\n\t\t\ttransformation = ParameterList()\n\t\t\tif transformation.loadFromFile(fullFileName):\n\t\t\t\ttransformations.append(transformation)\n\t\treturn transformations", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def _extract_models(self, name, from_dict):\n \"\"\"if name == imageset: Extract imageset objects from the source.\n\n This function does resolving of an (old) method of imageset lookup\n e.g. it was valid to have a string as the imageset value in an\n experiment instead of an int - in which case the imageset was\n loaded from the named file in the target directory.\n\n If any experiments point to a file in this way, the imageset is\n loaded and the experiment is rewritted with an integer pointing\n to the new ImageSet in the returned list.\n\n Returns:\n The ordered list of serialized-ImageSet dictionaries\n that the Experiment list points to.\n \"\"\"\n\n # Extract all the model list\n mlist = self._obj.get(name, [])\n\n # Convert the model from dictionary to concreate\n # python class for the model.\n mlist = [from_dict(d) for d in mlist]\n\n # Dictionaries for file mappings\n mmap = {}\n\n # For each experiment, check the model is not specified by\n # a path, if it is then get the dictionary of the model\n # and insert it into the list. Replace the path reference\n # with an index\n for eobj in self._obj[\"experiment\"]:\n value = eobj.get(name)\n if value is None:\n continue\n elif isinstance(value, str):\n if value not in mmap:\n mmap[value] = len(mlist)\n mlist.append(\n from_dict(_experimentlist_from_file(value, self._directory))\n )\n eobj[name] = mmap[value]\n elif not isinstance(value, int):\n raise TypeError(\"expected int or str, got %s\" % type(value))\n\n # Return the model list\n return mlist", "def extract_all_lazy():\n\n\t#Construct filepaths: Data COMP_INFO_1\n\tdata_ci1_name = \"DATA_2016_COMP_INFO_1.csv\"\n\tdata_ci1_fullname = os.path.join(files_location, data_ci1_name)\n\t#Data COMP_INFO_2\n\tdata_ci2_name = \"DATA_2016_COMP_INFO_2.csv\"\n\tdata_ci2_fullname = os.path.join(files_location, data_ci2_name)\n\t#Data PROPERTY INFO\n\tdata_pi_name = \"DATA_2016_PROPERTY_INFO_ST.csv\"\n\tdata_pi_fullname = os.path.join(files_location, data_pi_name)\n\t#Data General Info\n\tdata_gi_name = \"DATA_2016_GENERAL_INFO.csv\"\n\tdata_gi_fullname = os.path.join(files_location, data_gi_name)\n\n\t#Read & Process COMP_INFO\n\tdata_ci1 = pd.read_csv(data_ci1_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\tdata_ci2 = pd.read_csv(data_ci2_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\n\tdata_ci = data_ci1.append(data_ci2)\n\tdata_ci['QUESTION'] = data_ci['QUESTION'].replace(constants.ci_mapping)\n\t# Take only the survey questions mapped\n\tdata_ci = data_ci[data_ci['QUESTION'].isin(constants.ci_mapping.values())]\n\tdata_ci = data_ci.set_index(['PROPERTY_CODE','PROPERTY_NAME','JOB_CODE','POSITION'])\n\tdata_ci = data_ci.pivot(columns=\"QUESTION\")\n\tdata_ci.columns = [\"_\".join(pair) for pair in data_ci.columns]\n\tdata_ci = data_ci.reset_index()\n\n\t#Read & Process Property Info data\n\tdata_pi = pd.read_csv(data_pi_fullname, usecols = constants.keep_columns_PI, encoding='ISO-8859-1')\n\t#survey_type_transformed = transform.surveytype_categorical(data_pi)\n\t#data_pi = pd.merge(data_pi, survey_type_transformed, on=['PROPERTY_CODE'])\n\n\t#Read & Process General Info\n\tdata_gi = pd.read_csv(data_gi_fullname, skiprows = 2, usecols = constants.keep_columns_GI, encoding='ISO-8859-1')\n\tdata_gi['QUESTION'] = data_gi['QUESTION'].replace(constants.gi_mapping)\n\t# Take onl the survey questions mapped\n\tdata_gi = data_gi[data_gi['QUESTION'].isin(constants.gi_mapping.values())]\n\tdata_gi = data_gi.set_index(['PROPERTY_CODE','PROPERTY_NAME'])\n\tdata_gi = data_gi.pivot(columns=\"QUESTION\")\n\tdata_gi.columns = [\"_\".join(pair) for pair in data_gi.columns]\n\tdata_gi = data_gi.reset_index()\n\n\t#This frame needs to be reworked\n\td_ci = pd.merge(data_gi, data_pi, on = ['PROPERTY_CODE','PROPERTY_NAME'])\n\td_ci = pd.merge(d_ci, data_ci, on = ['PROPERTY_CODE','PROPERTY_NAME'],suffixes= ['_ci','_gi'])\n\n\t#Observations by Dimensions to determine top X markets\n\t#Can this be in a better position?\n\td_ci = d_ci[~(d_ci['PROPERTY_NAME'].isin(constants.del_rows_property_name))]\n\td_ci['POSITION'] = d_ci['POSITION'].astype(str)\n\n\tpayload = {}\n\tpayload['gi'] = data_gi\n\tpayload['pi'] = data_pi\n\tpayload['ci'] = data_ci\n\tpayload['d_ci'] = d_ci\n\n\treturn payload", "def parse( cls, filename, verbose = False ) :\n if verbose : sys.stdout.write( \"%s.parse(%s)\\n\" % (cls.__name__, filename,) )\n\n infile = os.path.realpath( filename )\n dat = cls( verbose )\n\n with open( infile, \"rU\" ) as inf :\n expt_num = None\n for line in inf :\n if verbose :\n sys.stdout.write( line )\n\n m = dat.version_pat.search( line )\n if m :\n dat.version = m.group( 1 )\n continue\n\n m = dat.expt_pat.search( line )\n if m :\n expt_num = int( m.group( 1 ) )\n par_set = m.group( 2 ).upper()\n\n if not par_set in bmrbmb.topspin.EXPERIMENTS.keys() :\n raise Exception( \"Unknown experiment parameter set: %s\" % (m.group( 2 ),) )\n\n# adapted sweep width HSQC\n#\n if (par_set == \"HSQCETGP\") and (m.group( 3 ) is not None) :\n expt_name = \"2D 1H-13C HSQC SW small\"\n else :\n expt_name = bmrbmb.topspin.EXPERIMENTS[par_set]\n\n dat.data[expt_num] = { \"name\" : expt_name }\n\n# next line should have experiment details\n# 1 or 2D only\n#\n\n m = dat.dim_pat.search( line )\n if m :\n if expt_num is None :\n raise Exception( \"Experiment detail without parameter set\" )\n\n dims = { m.group( 1 ) : { \"nuc\" : m.group( 2 ), \"sw\" : m.group( 3 ) } }\n if m.group( 4 ) is not None :\n dims[m.group( 4 )] = { \"nuc\" : m.group( 5 ), \"sw\" : m.group( 6 ) }\n\n dat.data[expt_num][\"dims\"] = dims\n\n expt_num = None\n\n return dat", "def load_host_seq_prep(prep_data):\n module_logger.info(\"Creating a template %s.\", __name__)\n prep = HostSeqPrep()\n\n module_logger.debug(\"Filling in %s details.\", __name__)\n\n # The attributes commmon to all iHMP nodes\n prep._set_id(prep_data['id'])\n prep.version = prep_data['ver']\n prep.links = prep_data['linkage']\n prep.tags = prep_data['meta']['tags']\n\n # The attributes that are particular to HostSeqPrep documents\n prep.comment = prep_data['meta']['comment']\n prep.lib_layout = prep_data['meta']['lib_layout']\n prep.lib_selection = prep_data['meta']['lib_selection']\n prep.ncbi_taxon_id = prep_data['meta']['ncbi_taxon_id']\n prep.prep_id = prep_data['meta']['prep_id']\n prep.sequencing_center = prep_data['meta']['sequencing_center']\n prep.sequencing_contact = prep_data['meta']['sequencing_contact']\n prep.storage_duration = prep_data['meta']['storage_duration']\n\n if 'adapters' in prep_data['meta']:\n module_logger.info(\"%s data has 'adapters' present.\", __name__)\n prep.adapters = prep_data['meta']['adapters']\n\n if 'experimental_factor' in prep_data['meta']:\n module_logger.info(\"%s data has 'experimental_factor' present.\", __name__)\n prep.experimental_factor = prep_data['meta']['experimental_factor']\n\n if 'findex' in prep_data['meta']:\n module_logger.info(\"%s data has 'findex' present.\", __name__)\n prep.findex = prep_data['meta']['findex']\n\n if 'frag_size' in prep_data['meta']:\n module_logger.info(\"%s data has 'frag_size' present.\", __name__)\n prep.frag_size = prep_data['meta']['frag_size']\n\n if 'lib_const_meth' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_const_meth' present.\", __name__)\n prep.lib_const_meth = prep_data['meta']['lib_const_meth']\n\n if 'lib_screen' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_screen' present.\", __name__)\n prep.lib_screen = prep_data['meta']['lib_screen']\n\n if 'lib_size' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_size' present.\", __name__)\n prep.lib_size = prep_data['meta']['lib_size']\n\n if 'lib_vector' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_vector' present.\", __name__)\n prep.lib_vector = prep_data['meta']['lib_vector']\n\n if 'mims' in prep_data['meta']:\n module_logger.info(\"%s data has 'mims' present.\", __name__)\n prep.mims = prep_data['meta']['mims']\n\n if 'nucl_acid_amp' in prep_data['meta']:\n module_logger.info(\"%s data has 'nucl_acid_amp' present.\", __name__)\n prep.nucl_acid_amp = prep_data['meta']['nucl_acid_amp']\n\n if 'nucl_acid_ext' in prep_data['meta']:\n module_logger.info(\"%s data has 'nucl_acid_amp' present.\", __name__)\n prep.nucl_acid_ext = prep_data['meta']['nucl_acid_ext']\n\n if 'rindex' in prep_data['meta']:\n module_logger.info(\"%s data has 'rindex' present.\", __name__)\n prep.rindex = prep_data['meta']['rindex']\n\n if 'samp_mat_process' in prep_data['meta']:\n module_logger.info(\"%s data has 'samp_mat_process' present.\", __name__)\n prep.samp_mat_process = prep_data['meta']['samp_mat_process']\n\n if 'srs_id' in prep_data['meta']:\n module_logger.info(\"%s data has 'srs_id' present.\", __name__)\n prep.srs_id = prep_data['meta']['srs_id']\n\n module_logger.debug(\"Returning loaded %s\", __name__)\n\n return prep" ]
[ "0.64974564", "0.61578393", "0.57928175", "0.57109356", "0.5541757", "0.5491878", "0.5438996", "0.54297066", "0.5325947", "0.5219759", "0.52026135", "0.5192286", "0.5152777", "0.51420754", "0.5114544", "0.5093096", "0.50334144", "0.5020539", "0.5005299", "0.49603912", "0.4959661", "0.49570575", "0.4956181", "0.49561778", "0.49294677", "0.49101338", "0.4872313", "0.48491374", "0.48084882", "0.48028648" ]
0.64618903
1
Eliminate entries in epi recon table that have already been reconstructed. I don't remember why this is here but I know that at one time it was important.
def PruneEpiEntries(self): pruned = {} basefiles = [] baseentries = {} for entry in self.entry_map['epi']: if baseentries.has_key(self.info[entry]['basefile']): baseentries[self.info[entry]['basefile']].append(entry) else: baseentries[self.info[entry]['basefile']] = [entry] for entry in self.entry_map['epi']: targets = [] if self.no_motcorr: target = self.info[entry]['imgfile'] elif self.info[entry]['fmapname'] is None or self.no_fmapcorr: target = self.info[entry]['imgfile_m'] else: target = self.info[entry]['imgfile_mf'] targets.append(target + self.info[entry]['suffix']) targets.append('%s%s' % (self.info[entry]['censor_prefix'], '_censor.1D')) pruned[entry] = [True, baseentries[self.info[entry]['basefile']]] for target in targets: pruned[entry] = \ [False, baseentries[self.info[entry]['basefile']]] for key in pruned.keys(): if not pruned[key][0]: for entry in pruned[key][1]: pruned[entry][0] = False tmp = new_map = [] for entry in self.entry_map['epi']: if pruned[entry][0]: if self.verbose: print 'Skipping %s: Already reconstructed.' % targets[0] if entry in self.pfiles_recon: self.pfiles_recon.remove(entry) else: new_map.append(entry) self.entry_map['epi'] = new_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def elimination_ofconc(a2_data):\n for data in a2_data.values():\n data.pop('conc')\n return a2_data", "def nonphysicalxs_remotion(a2_data,res_nufi_removal):\n for i in a2_data['I'].keys():\n if i=='MACR' and res_nufi_removal==True:\n if 'nufi' in a2_data['I'][i]['R'].keys():\n a2_data['I'][i]['R'].pop('nufi')\n for r in a2_data['I'][i]['R'].keys():\n if any(x in r for x in ['111', '112', '122', '212', '222', '211', '322',\n '321', '312', '311', '221', '121']):\n a2_data['I'][i]['R'].pop(r)\n return a2_data", "def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated", "def revise():", "def clean_edges(self):", "def clean(df):", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)", "def cleaning (data):", "def remove_hydrogens(self) -> None:\n for cid, c in self:\n for rid, r in c:\n for aid, a in r:\n if a.element == 'H':\n print('removing H at %s' % aid)\n r.remove_atom(a)", "def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']", "def row_inout_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0]\n \n if location in location_dict.keys():\n outside = location_dict[location][0]\n \n if str(6) not in box: #only look at periods 1-5\n \n following_activity = inout_dict[box][0]\n if following_activity not in solved_values:\n temp_list = list(values[following_activity])\n \n for locations_next in values[following_activity]:\n \n if location_dict[locations_next][0] == outside and outside == True:\n \n try:\n temp_list.remove(locations_next)\n except:\n pass\n \n \n values[following_activity] = temp_list\n\n return values", "def _remove_dangling_bonds(self) -> None:\n for residue in self.residues:\n bonds, impropers, cross_maps, ics = [], [], [], []\n for bond in residue.bonds:\n for atom_id in bond:\n if atom_id not in self._id_to_index:\n break\n else:\n bonds.append(bond)\n for improper in residue.impropers:\n for atom_id in improper:\n if atom_id not in self._id_to_index:\n break\n else:\n impropers.append(improper)\n for cross_map in residue.cross_maps:\n for atom_id in cross_map:\n if atom_id not in self._id_to_index:\n break\n else:\n cross_maps.append(cross_map)\n for ic in residue.ics:\n for res_index, atom_name in ic[:4]:\n if atom_name.replace(\"*\", \"\") not in self._id_to_index:\n break\n else:\n ics.append(ic)\n residue.bonds = bonds\n residue.impropers = impropers\n residue.cross_maps = cross_maps\n residue.ics = ics", "def vacuum(self):\n\t\t\n\t\tstart_resnr = 1\n\t\t#---check for remark 999 to see if we have a starting residue not 1\n\t\twith open(self.rootdir+'system-input.pdb','r') as fp: lines = fp.readlines()\n\t\tregex = 'REMARK 999 starting residue = ([0-9]+)'\n\t\ttrawl = [re.findall(regex,line) for line in lines if re.match(regex,line)]\n\t\tif trawl != []: start_resnr = int(trawl[0][0])\n\n\t\t#---fix histidine naming according to the convention set by the force field\n\t\tif self.settings['force_field'] == 'charmm27':\n\t\t\tif self.settings['histype'] == 'd':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISD\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tif self.settings['histype'] == 'e':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISE\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tif self.settings['histype'] == 'p':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISP\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tcall(hisfix,cwd=self.rootdir)\n\t\telse: copy(self.rootdir+'system-input.pdb',self.rootdir+'prep-protein-start.pdb')\n\t\t\t\n\t\tprint \"stripping non-protein molecules\"\n\t\tcmd = [gmxpaths['make_ndx'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-index-protein.ndx']\n\t\tcall(cmd,logfile='log-make-ndx-prep-protein',cwd=self.rootdir,inpipe=\"q\\n\")\n\t\tprotgrp = int(checkout([\"awk\",\"'/[ ,\\t]+Protein[ ,\\t]+:/ {print $1}'\",\n\t\t\t\"log-make-ndx-prep-protein\"],cwd=self.rootdir).strip())\n\t\tcmd = [gmxpaths['make_ndx'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-index-protein-only.ndx']\n\t\tcall(cmd,logfile='log-make-ndx-prep-protein-only',cwd=self.rootdir,\n\t\t\tinpipe=\"keep \"+str(protgrp)+\"\\nq\\n\")\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-protein-start-stripped.pdb',\n\t\t\t'-n prep-index-protein-only.ndx',\n\t\t\t'-resnr '+str(start_resnr)]\n\t\tcall(cmd,logfile='log-editconf-prep-protein-strip',cwd=self.rootdir)\n\n\t\tprint \"running pdb2gmx\"\n\t\tcmd = [gmxpaths['pdb2gmx'],\n\t\t\t'-f prep-protein-start-stripped.pdb',\n\t\t\t'-o vacuum-alone-number1.gro',\n\t\t\t'-p vacuum-standard.top',\n\t\t\t'-ignh',\n\t\t\t'-i system-posre.itp',\n\t\t\t'-ff '+self.settings['force_field'],\n\t\t\t'-water '+self.settings['water_model']]\n\t\tcall(cmd,logfile='log-pdb2gmx',cwd=self.rootdir)\n\t\t\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone-number1.gro',\n\t\t\t'-o vacuum-alone.gro']\n\t\tcall(cmd,logfile='log-editconf-renumber',cwd=self.rootdir)\n\t\t\n\t\t#---intervening step will isolate the ITP data from the TOP file to use standardized TOP\n\t\twith open(self.rootdir+'vacuum-standard.top','r') as f: topfile = f.read()\n\t\t#---extract protein chain names here if necessary\n\t\tchains = []\n\t\tstartline = [ii for ii,i in enumerate(topfile.split('\\n')) \n\t\t\tif re.match('^(\\s+)?\\[(\\s+)?system(\\s+)?\\]',i)][0]\n\t\tfor line in topfile.split('\\n')[startline:]:\n\t\t\tif re.match('^Protein',line):\n\t\t\t\tchains.append(line.split(' ')[0])\n\t\tif len(chains) > 1:\n\t\t\t#---assume one domain per chain\n\t\t\tself.nprots = [1 for i in chains]\n\t\t\tself.protname = chains\n\t\telse:\t\n\t\t\tself.protname = chains[0]\n\t\t\tself.nprots = 1\n\t\tfp = open(self.rootdir+'protein.itp','w') \n\t\tfor line in topfile.split('\\n'):\n\t\t\t#---skip any part of the top that follows the water topology and/or system composition\n\t\t\tif re.match('; Include water topology',line): break\n\t\t\tif re.match('; Include topology for ions',line): break\n\t\t\tif re.match('\\[ system \\]',line): break\n\t\t\t#---you must extract forcefield.itp from the file to prevent redundant includes\n\t\t\tif not re.match(\".+forcefield\\.itp\",line) and not \\\n\t\t\t\tre.match(\"; Include forcefield parameters\",line): \n\t\t\t\tfp.write(line+'\\n')\n\t\tfp.close()\n\t\t\n\t\t#---note that this method is currently set to only simulate one protein\n\t\tself.write_topology_protein('vacuum.top')\n\t\t\n\t\tprint \"building box with \"+str(self.settings['wbuffer'])+'nm of water'\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone.gro',\n\t\t\t'-d '+str(self.settings['wbuffer']),\n\t\t\t('-princ' if 'align_x' in self.settings.keys() \n\t\t\tand self.settings['align_x'] == True else ''),\n\t\t\t'-o vacuum.gro']\n\t\tcall(cmd,logfile='log-editconf-vacuum',cwd=self.rootdir,\n\t\t\tinpipe=('0\\n' if 'align_x' in self.settings.keys() \n\t\t\tand self.settings['align_x'] == True else None))\t\t\n\t\tself.minimization_method('vacuum')", "def main():\n # get commmand line args\n args = parse_arguments()\n \n adj_file = args.adj # open(\"UCSC_VIPER/pathways/extended_pathways_transcriptional.adj\", \"r\")\n \n # this set isn't actually used in the script, but I was curious...\n adj_gene_set = set() \n \n cutoff_number = args.cutoff_number\n #cutoff_percent = args.cutoff_percent\n \n expr_gene_file = args.expr_genes #open(\"stanford_batchK1-12.HUGO_only_genes.lst\", 'r')\n expr_genes = [line.strip() for line in expr_gene_file] \n \n # for each line, check that the regulator and other genes are in the\n # expression matrix gene set. if not, remove them, or remove the whole\n # line if the regulator isn't in the set or if too few genes remain\n for line in adj_file:\n \n line_list = line.strip().split()\n regulator_plus_gene_list = [x for x in line_list if x !=\"1.0\"]\n regulator = regulator_plus_gene_list[0]\n \n if regulator not in expr_genes:\n # remove the whole regulator + regulon\n print(\"Skipped a line (regulator not in expr genes): \", \n line_list[0], file=sys.stderr) \n continue\n \n gene_list = regulator_plus_gene_list[1:]\n list_size = len(gene_list)\n adj_gene_set.update(gene_list)\n \n how_many_to_remove= 0\n good_genes = []\n \n for gene in gene_list:\n if gene not in expr_genes:\n how_many_to_remove += 1\n else:\n good_genes.append(gene)\n \n #print(\"\\n\")\n #pdb.set_trace()\n #if (100-how_many_to_remove/list_size*100 < cutoff_percent) and (list_size-how_many_to_remove < cutoff_number):\n if (list_size-how_many_to_remove < cutoff_number):\n print(\"Skipped a line (too many removed): \", line_list[0], file=sys.stderr)\n \n else:\n # re-generate the new line of the .adj file with kept genes\n #genes_to_print = good_genes.insert(0, regulator)\n regulated_genes = \"\\t1.0\\t\".join(good_genes)\n print(regulator+\"\\t\"+regulated_genes+\"\\t1.0\")", "def strip_ds(ds):\n if 'brain' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'brain'), :]\n print('excluded the rest of the brain from the dataset')\n if 'overlap' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'overlap'), :]\n print('excluded overlap from the dataset')\n return ds", "def get_degenerate_statements(self):\n logger.info(\"Checking for 'degenerate' statements...\\n\")\n # Get rules of type protein X -> activity Y\n q_stmts = prefixes + \"\"\"\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasSubject ?subj .\n ?stmt belvoc:hasObject ?obj .\n {\n { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }\n }\n {\n { ?subj a belvoc:ProteinAbundance . }\n UNION\n { ?subj a belvoc:ModifiedProteinAbundance . }\n }\n ?subj belvoc:hasConcept ?xName .\n {\n {\n ?obj a belvoc:ProteinAbundance .\n ?obj belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:ModifiedProteinAbundance .\n ?obj belvoc:hasChild ?proteinY .\n ?proteinY belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:AbundanceActivity .\n ?obj belvoc:hasChild ?objChild .\n ?objChild a belvoc:ProteinAbundance .\n ?objChild belvoc:hasConcept ?yName .\n }\n }\n FILTER (?xName != ?yName)\n }\n \"\"\"\n res_stmts = self.g.query(q_stmts)\n\n logger.info(\"Protein -> Protein/Activity statements:\")\n logger.info(\"---------------------------------------\")\n for stmt in res_stmts:\n stmt_str = strip_statement(stmt[0])\n logger.info(stmt_str)\n self.degenerate_stmts.append(stmt_str)", "def _reducedProtToPeps(protToPeps, proteins):\n return {k: v for k, v in viewitems(protToPeps) if k not in proteins}", "def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True", "def clean(self):\n return _coconut_tail_call((self.__class__), *filter(_coconut.functools.partial(_coconut.operator.ne, self.identity), self.elems))", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n ecols = ['eflux_38-53', 'eflux_175-315']\n\n # Evaluate the electron flux data\n self[self.data['status_e'] > max_status, ecols] = np.nan\n\n # Evaluate the proton flux data\n pcols = ['pflux_47-68', 'pflux_115-195', 'pflux_310-580',\n 'pflux_795-1193', 'pflux_1060-1900']\n self[self.data['status_p'] > max_status, pcols] = np.nan\n\n # Include both fluxes and the anisotropy index in the removal eval\n eval_cols = ecols + pcols\n eval_cols.append('anis_ind')\n\n # Remove lines without any good data\n good_cols = (np.isfinite(self.data.loc[:, eval_cols])).sum(axis=1)\n bad_index = good_cols[good_cols == 0].index\n self.data = self.data.drop(index=bad_index)\n\n return", "def _remove_tech_rep_negatives(self):\n\n # For each row in the post_med_df, find the mapping key that is a substring\n # Should be only one, check this.\n # Then once you have found the one, check all samples in the post_med df to see if it matches any other\n # if you return multiple matches, then keep only the one with the biggest number of contigs,\n # and all others to a drop list. Keep a checked list so that we don't have to check readsets twice.\n # Also up date a dictionary as you go that is the full readset to the sample-id that it needs to become.\n # Once this has been done for the post-med do it for the pre-med.\n # For the pre-med, use the dictionary we created while doing the post-med\n\n # Get the post med df. Read it in with index as false and set index manually without dropping\n # this way we can work with the index, but then we can not write it out later so as not\n # to disturb the column orders.\n post_med_count_path = os.path.join(self.negative_output_dir_path, 'post_med_seqs', [_ for _ in os.listdir(\n os.path.join(self.negative_output_dir_path, 'post_med_seqs')) if 'abund' in _][0])\n post_med_df = pd.read_csv(post_med_count_path, index_col=False)\n post_med_df = post_med_df.set_index('sample-id', drop=False)\n\n # Same for the pre_med\n pre_med_count_path = os.path.join(self.negative_output_dir_path, 'pre_med_seqs', [_ for _ in os.listdir(\n os.path.join(self.negative_output_dir_path, 'pre_med_seqs')) if 'abund' in _][0])\n pre_med_df = pd.read_csv(pre_med_count_path, index_col=False)\n pre_med_df = pre_med_df.set_index('sample-id', drop=False)\n\n # First check to see if the sample-ids have already been fixed\n if 'TARA' in pre_med_df.index[0] and 'TARA' in post_med_df.index[0]:\n return\n if 'TARA' in pre_med_df.index[0] and 'TARA' not in post_med_df.index[0]:\n raise RuntimeError\n if 'TARA' not in pre_med_df.index[0] and 'TARA' in post_med_df.index[0]:\n raise RuntimeError\n\n # The dictionary df that Setphane produced\n mapping_df = pd.read_csv(self.negative_mapping_file_path, index_col=0)\n # Make the mapping dictionary from the Stephane df\n raw_mapping_dict = {}\n for df_ind in mapping_df.index:\n raw_mapping_dict[df_ind] = mapping_df.at[df_ind, 'sample-id_source']\n\n # This is the dictionary we are going to populate that had the full genoscope readset\n # as the key and the equivalent TARA sample-id as the value\n curated_mapping_dict = {}\n\n # Check that the assumption holds that both of the indeces are identifcal except for order.\n # NB the post med df has an annoying row at the end.\n assert(set(post_med_df.index[:-1]) == set(pre_med_df.index))\n contig_dict = {readset: contig for readset, contig in zip(post_med_df['sample-id'][:-1], post_med_df['raw_contigs'][:-1])}\n\n to_drop_list = []\n checked_list = []\n for pm_ind in post_med_df.index[:-1]:\n if pm_ind in checked_list:\n continue\n match = []\n for map_ind in mapping_df.index:\n if map_ind in pm_ind:\n match.append(map_ind)\n if len(match) == 0:\n print(f'pm_ind: {pm_ind} found 0 matches. This sample will be dropped.')\n to_drop_list.append(pm_ind)\n continue\n elif len(match) > 1:\n raise RuntimeError\n\n # Now we have the mapping indice that matches\n match = match[0]\n pm_matches = []\n for pm_ind_again in post_med_df.index[:-1]:\n if match in pm_ind_again:\n pm_matches.append(pm_ind_again)\n assert(len(pm_matches) > 0)\n if len(pm_matches) > 1:\n # Then we have technical replicates and we only want to keep the largest\n contig_match_dict = {pm_match: contig_dict[pm_match] for pm_match in pm_matches}\n sorted_keys = sorted(contig_match_dict, key=contig_match_dict.get, reverse=True)\n # Add all of the matches to the check_list\n checked_list.extend(sorted_keys)\n curated_mapping_dict[sorted_keys[0]] = raw_mapping_dict[match]\n to_drop_list.extend(sorted_keys[1:])\n else:\n checked_list.append(pm_matches[0])\n curated_mapping_dict[pm_matches[0]] = raw_mapping_dict[match]\n\n # drop the rows\n post_med_df.drop(index=to_drop_list, inplace=True)\n # We now need to get rid of any sequence count columns that only have 0s after dropping the samples\n # The last meta column is post_med_unique\n cols = list(post_med_df)\n c_ind = cols.index('post_med_unique') + 1\n cols_to_check = cols[c_ind:]\n cols_to_drop = []\n for col in cols_to_check:\n if (post_med_df[col][:-1] == 0).all():\n cols_to_drop.append(col)\n\n # drop the cols\n post_med_df.drop(columns=cols_to_drop, inplace=True)\n\n # rename\n for ind in post_med_df.index[:-1]:\n current = post_med_df.at[ind, 'sample-id']\n post_med_df.at[ind, 'sample-id'] = curated_mapping_dict[current]\n\n # Here we have the curated mapping dict popualted and we can now use this to\n # process the pre_med df\n pre_med_df.drop(index=to_drop_list, inplace=True)\n # We now need to get rid of any sequence count columns that only have 0s after dropping the samples\n # The last meta column is post_med_unique\n cols = list(pre_med_df)\n c_ind = cols.index('sample-id') + 1\n cols_to_check = cols[c_ind:]\n cols_to_drop = []\n for col in cols_to_check:\n if (pre_med_df[col][:-1] == 0).all():\n cols_to_drop.append(col)\n\n # drop the cols\n pre_med_df.drop(columns=cols_to_drop, inplace=True)\n\n # rename\n for ind in pre_med_df.index:\n current = pre_med_df.at[ind, 'sample-id']\n pre_med_df.at[ind, 'sample-id'] = curated_mapping_dict[current]\n\n # Now convert the columns to int32\n d_type_dict = {col_name : pd.Int32Dtype() for col_name in list(post_med_df)[2:]}\n post_med_df = post_med_df.astype(d_type_dict)\n d_type_dict = {col_name : pd.Int32Dtype() for col_name in list(pre_med_df)[2:]}\n pre_med_df = pre_med_df.astype(d_type_dict)\n\n # Important to write out with index as false\n post_med_df.to_csv(post_med_count_path, index=False, header=True)\n pre_med_df.to_csv(pre_med_count_path, index=False, header=True)", "def cleanup_transition_matrix(matrix,polarization):\n\n index = []\n for i in range(len(matrix['label'])):\n if (polarization[0] == 0) & ('right' in matrix['label'][i]):\n index.append(i)\n elif (polarization[1] == 0) & ('parallel' in matrix['label'][i]):\n index.append(i)\n elif (polarization[2] == 0) & ('left' in matrix['label'][i]):\n index.append(i)\n\n for i in reversed(index):\n del matrix['label'][i]\n del matrix['bra_energy'][i]\n del matrix['ket_energy'][i]\n del matrix['matrix'][i]\n\n return matrix", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def CleanUp(self):\n for Ind in self.IndList():\n if amax(abs(self[Ind]))<1e-10:\n del self[Ind]", "def clean_up(self, prune=True, b_factor=None, filename=\"clean.pdb\"):\n skipped = [\"HOH\", \"WAT\"]\n for chain in self.hier.chains():\n for residue in chain.residue_groups():\n if b_factor is not None:\n atoms = residue.atom_groups()[0].atoms()\n atoms.set_b(flex.double(len(atoms), b_factor))\n resname = residue.unique_resnames()[0].strip()\n if prune:\n if resname in unmodified_residues:\n continue\n elif resname in PTM_reverse_lookup.keys():\n pruned_resname = PTM_reverse_lookup[resname]\n PTM_lookup[pruned_resname][resname][\"prune_lambda\"](residue)\n for ag in residue.atom_groups():\n ag.resname = pruned_resname\n else:\n if resname not in skipped:\n print \"Warning: skipping unrecognized residue, ligand or ion %s\" % resname\n skipped.append(resname)\n self.hier.write_pdb_file(filename)", "def dropRedundantEcotypes(self, input_fname, ecotypeid2tg_ecotypeid):\n\t\tsys.stderr.write(\"Dropping redundant ecotypes ...\\n\")\n\t\treader = csv.reader(open(input_fname), delimiter=figureOutDelimiter(input_fname))\n\t\tcol_name2col_index = getColName2IndexFromHeader(reader.next())\n\t\tecotypeid_idx = col_name2col_index['ecotypeid']\n\t\thaplo_name_idx = col_name2col_index['haplogroup']\n\t\tnativename_idx = col_name2col_index['nativename']\n\t\ttg_ecotypeid2row = {}\n\t\tno_of_duplicates = 0\n\t\tno_of_duplicates_with_different_haplogroups = 0\n\t\tcounter = 0\n\t\tfor row in reader:\n\t\t\tecotypeid = int(row[ecotypeid_idx])\n\t\t\thaplo_name = row[haplo_name_idx]\n\t\t\tnativename = row[nativename_idx]\n\t\t\tif ecotypeid in ecotypeid2tg_ecotypeid:\n\t\t\t\ttg_ecotypeid = ecotypeid2tg_ecotypeid[ecotypeid]\n\t\t\t\tif tg_ecotypeid not in tg_ecotypeid2row:\n\t\t\t\t\ttg_ecotypeid2row[tg_ecotypeid] = row\n\t\t\t\telse:\n\t\t\t\t\tno_of_duplicates += 1\n\t\t\t\t\told_row = tg_ecotypeid2row[tg_ecotypeid]\n\t\t\t\t\told_ecotypeid = int(old_row[ecotypeid_idx])\n\t\t\t\t\told_haplo_name = old_row[haplo_name_idx]\n\t\t\t\t\told_nativename = row[nativename_idx]\n\t\t\t\t\tif old_haplo_name!=haplo_name:\n\t\t\t\t\t\tsys.stderr.write(\"ecotype %s(%s) in haplotype group %s, while duplicate %s(%s) in haplotype group %s.\\n\"%\\\n\t\t\t\t\t\t\t\t\t\t (ecotypeid, nativename, haplo_name, old_ecotypeid, old_nativename, old_haplo_name))\n\t\t\t\t\t\tno_of_duplicates_with_different_haplogroups += 1\n\t\t\t\t\tif ecotypeid==tg_ecotypeid:\t#replace if the new ecotypeid matching the tg_ecotypeid whether the haplotype group is same or not.\n\t\t\t\t\t\ttg_ecotypeid2row[tg_ecotypeid] = row\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"Warning: ecotype %s not in ecotypeid2tg_ecotypeid.\\n\"%(ecotypeid))\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"no_of_duplicates: %s, out of which %s encompass different haplotype groups. %s accessions in total. Done.\\n\"%\\\n\t\t\t\t\t\t (no_of_duplicates, no_of_duplicates_with_different_haplogroups, counter))\n\t\treturn tg_ecotypeid2row", "def _Dedup(self):\n kegg_id_to_index = {}\n for i, c in enumerate(self.reactants):\n first_i = kegg_id_to_index.setdefault(c.compound.kegg_id, i)\n if i != first_i:\n self.reactants[first_i].coeff += c.coeff\n c.coeff = 0\n \n self.reactants = filter(lambda x: x.coeff != 0, self.reactants)\n \n # always make sure that H2O is the last reactant (so that it will\n # appear last in the chemical formula)\n i_h2o = self._FindCompoundIndex('C00001')\n if i_h2o is not None:\n self.reactants = self.reactants[:i_h2o] + \\\n self.reactants[(i_h2o + 1):] + \\\n [self.reactants[i_h2o]]", "def purgeTrp(atoms):\n for a in atoms:\n found = False\n if getAtype(a) == \"N\":\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = True\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False", "def group_remotion(a2_data, retained):\n for i in a2_data['I'].keys():\n for r in a2_data['I'][i]['R'].keys():\n for g in a2_data['I'][i]['R'][r].keys():\n if g not in retained:\n a2_data['I'][i]['R'][r].pop(g)\n return a2_data" ]
[ "0.6324132", "0.59182566", "0.5790216", "0.57542723", "0.57150894", "0.5654134", "0.55529314", "0.5501398", "0.54495615", "0.5417837", "0.5416808", "0.5410812", "0.53797144", "0.53654283", "0.5352083", "0.5328368", "0.53084546", "0.530087", "0.5300535", "0.5298115", "0.529656", "0.5293976", "0.527256", "0.5271022", "0.52537477", "0.52323747", "0.5232325", "0.52261007", "0.5222202", "0.52065635" ]
0.6166888
1
Convert epis reconstructed on the scanner.
def ConvertRtEpis(self): if self.verbose: print 'Convert EPIs to brik' for entry in self.entry_map['epi']: if ('epirt' in self.info[entry]['psdname'] or \ self.info[entry]['psdname'] == 'epi' or \ self.info[entry]['psdname'] == '*epfid2d1_64') and \ self.info[entry]['data_filetype'] == 'dicom': series = self.info[entry]['series'] if self.info[entry]['skip'] > 0: skip = '--skip=%s' % self.info[entry]['skip'] else: skip = '' cmd = 'convert_file %s %s %s brik' % \ (skip, entry, self.info[entry]['imgfile']) checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile']) self.CheckExec(cmd, [checkname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def into_epochs(self):\n #divide into epochs\n new_events = mne.make_fixed_length_events(self.raw, duration=2.)\n event_dict = {'divide':1}\n #reject data with extreme/flat amplitude\n reject_criteria = {'eeg' : 400e-6} # 400 µV\n flat_criteria = {'eeg' : 1e-6} # 1 µV\n\n# self.epochs = mne.Epochs(self.raw,new_events, reject=reject_criteria, flat=flat_criteria,\n# reject_by_annotation=False, preload=True)\n self.epochs = mne.Epochs(self.raw,new_events, reject_by_annotation=False, preload=True)\n# self.epochs.plot()\n return self.epochs", "def convert_to_evoros_input(self, enki_input):\n # ramp inclines\n ramp_inclines = [\n enki_input['incline1'],\n enki_input['incline2'],\n enki_input['incline3'],\n enki_input['incline4'],\n enki_input['incline5']\n ]\n\n # convert to Evo-ROS input\n evoros_input = {\n 'genome': PID_SETTINGS,\n 'enki_genome': ramp_inclines\n }\n return evoros_input", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def decode_pes(self, pes: bytes)-> PES.PES:\n pesdk = PES.PES()\n try:\n pesdk.stream_id, PES_packet_length = struct.unpack('>BH', pes[0:3])\n if pesdk.stream_id not in [33, 188, 190, 191, 240, 241, 242, 248, 255]:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 190 (0xBE) - padding_stream\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n if pesdk.stream_id >> 4 == 14:\n pesdk.stream_type = 'video-stream'\n pesdk.stream_number = (pesdk.stream_id & 15)\n elif pesdk.stream_id >> 5 == 6:\n pesdk.stream_type = 'audio-stream'\n pesdk.stream_number = (pesdk.stream_id & 31)\n b1, b2, PES_header_data_length = struct.unpack('>BBB', pes[3:6])\n pesdk.PES_scrambling_control = (b1 & 16) >> 4\n # PES_priority = bool((b1 & 8) >> 3)\n # data_alignment_indicator = bool((b1 & 4) >> 2)\n pesdk.copyright = bool((b1 & 2) >> 1)\n pesdk.original_or_copy = bool(b1 & 1)\n pesdk.PTS_DTS_flags = (b2 & 192) >> 6\n pesdk.ESCR_flag = bool((b2 & 32) >> 5)\n pesdk.ES_rate_flag = bool((b2 & 16) >> 4)\n pesdk.DSM_trick_mode_flag = bool((b2 & 8) >> 3)\n pesdk.additional_copy_info_flag = bool((b2 & 4) >> 2)\n pesdk.PES_CRC_flag = bool((b2 & 2) >> 1)\n pesdk.PES_extension_flag = bool(b2 & 1)\n pos = 6\n if pesdk.PTS_DTS_flags in [2, 3]:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos+5])\n pesdk.PTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n if pesdk.PTS_DTS_flags == 3:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos + 5])\n pesdk.DTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n elif pesdk.stream_id == 190:\n # 190 (0xBE) - padding_stream\n pass\n else:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n pass\n return pesdk\n except Exception as err:\n logging.warning('PES parsing error:' + str(err))\n return None", "def convert_season_episode(self, se_input):\n if type(se_input) == str:\n se_input = se_input[1:]\n se_input.replace(' ', '')\n\n e_ndx = se_input.index('E')\n\n #sometimes it looks like \"S14 E10\" and sometimes it's \"S14 Ep10\"\n if \"Ep\" in se_input:\n ep_offset = 2\n else:\n ep_offset = 1\n\n # return two ints\n return int(se_input[:e_ndx]), int(se_input[e_ndx+ep_offset:])\n\n else:\n # return it as \"S14 Ep10\"\n return \"S%s Ep%s\" % (se_input[0], se_input[1])", "def read(self, generic=False, to_xarray=False):\n return read_eps_l2(self.filename, generic, to_xarray)", "def _convert(self):\n root = cElementTree.fromstring(self.html)\n for el in root.getiterator():\n if el in self.visited:\n continue\n self.visited.update([el])\n if el.tag == 'p':\n parser = ParagraphParser(el)\n self.document_state.append(parser.tag)\n self.visited.update(el.getiterator())", "def _unpack_ies(buf):\n\t\t# each IE starts with an ID and a length\n\t\ties = []\n\t\toff = 0\n\t\tbuflen = len(buf)\n\t\t# logger.debug(\"lazy dissecting: %s\" % buf)\n\n\t\twhile off < buflen:\n\t\t\tie_id = buf[off]\n\t\t\ttry:\n\t\t\t\tparser = IEEE80211.ie_decoder[ie_id]\n\t\t\texcept KeyError:\n\t\t\t\t# some unknown tag, use standard format\n\t\t\t\tparser = IEEE80211.IE\n\n\t\t\tdlen = buf[off + 1]\n\t\t\t# logger.debug(\"IE parser is: %d = %s = %s\" % (ie_id, parser, buf[off: off+2+dlen]))\n\t\t\tie = parser(buf[off: off + 2 + dlen])\n\t\t\ties.append(ie)\n\t\t\toff += 2 + dlen\n\n\t\treturn ies", "def prepare_for_ESR(self):\r\n _debug('Anapico: prepare ESR')\r\n # This is all for the ANAPICO to use the external trigger. \r\n # BONUS for preparing the list with the external trigger. \r\n print('Testing query: ', self.query('*IDN?'))\r\n print('Source for Trigger?: ', self.query('TRIG:SEQ:SOUR?'))\r\n self.write('TRIG:SEQ:SOUR EXT') # Set the external trigger to ext\r\n print('Source for Trigger?: ', self.query('TRIG:SEQ:SOUR?'))\r\n print('First frequency?: ', self.query('SOUR:FREQ:STAR?'))\r\n print('Last frequency?: ', self.query('SOUR:FREQ:STOP?'))\r\n \r\n # Prepare the list mode\r\n self.write('SOUR:FREQ:MODE LIST') # Set the frequency mode to list\r\n print('Frequency mode ?: ', self.query('SOUR:FREQ:MODE?'))\r\n self.write('SOUR:POW:MODE LIST') # Set the power mode to list\r\n print('Power mode ?: ', self.query('SOUR:POW:MODE?'))\r\n self.write('SOUR:LIST:MODE AUTO') # Set the list mode to auto\r\n print('List mode ?: ', self.query('SOUR:LIST:MODE?'))\r\n# self.api.write('TRIG:SEQ:TYPE GATE') # An external trigger signal repeatedly starts and stops the waveform’s playback.\r\n self.write('TRIG:SEQ:TYPE POIN')# Upon triggering, only a single point of the sweep (list) is played.\r\n print('Trig type?: ', self.query('TRIG:SEQ:TYPE?'))\r\n \r\n # Set stuff for the modulation\r\n self.write('SOUR:PULM:SOUR EXT')# Set the pulse modulation to be external\r\n print('Pulse modulation source?: ', self.query('SOUR:PULM:SOUR?'))\r\n self.write('SOUR:PULM:STAT ON') # Switch the pulse modulation ON\r\n print('State of pulse modulation? ', self.query('SOUR:PULM:STAT?'))\r\n self.write('SOUR:PULM:POL NORM') # Polarity NORMal, in case it was INVerted\r\n print('Polarity of modulation?: ', self.query('SOUR:PULM:POL?')) \r\n # This is all for the ANAPICO to use the external trigger. \r\n # BONUS for preparing the list with the external trigger. \r", "def __init__(self, config):\n self.config = config\n self.outpath = prepDir(config.outpath)\n self.xslpath = config.xslpath\n self.imagespath = config.imagespath\n self.errors = []\n self.xeps = []\n files = []\n if config.xeps:\n for xep in config.xeps:\n if os.path.isfile(xep):\n files.append(os.path.abspath(xep))\n elif os.path.isdir(xep):\n fltr = os.path.join(os.path.abspath(xep), '*.xml')\n files += glob.glob(fltr)\n else:\n if os.path.isfile(\"xep-{0}.xml\".format(xep)):\n files.append(\n os.path.abspath(os.path.join(os.getcwd(), \"xep-{0}.xml\".format(xep))))\n else:\n # no xeps given, try all xml-files in curdir\n fls = glob.glob(os.path.join(os.getcwd(), '*.xml'))\n for fle in fls:\n files.append(os.path.abspath(fle))\n # try if we can find an existing XEP-table:\n if os.path.isfile(os.path.join(self.outpath, \"xeps.xml\")):\n self.xeptable = os.path.join(self.outpath, \"xeps.xml\")\n else:\n self.xeptable = None\n # read files to xeps\n for fle in sorted(set(files)):\n try:\n self.xeps.append(\n xeputils.xep.XEP(fle,\n outpath=self.outpath,\n xslpath=self.xslpath,\n imagespath=self.imagespath))\n except:\n e = \"Error while parsing {}\\n\".format(fle)\n e += \"FATAL: {} is not included\\n\".format(fle)\n e += traceback.format_exc()\n self.errors.append(e)", "def partid2eids(self, partid, etype): # -> None:\n ...", "def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes", "def convert_ere2eer(input_filename, output_filename):\n with codecs.open(input_filename, \"r\") as input_file:\n with codecs.open(output_filename, \"w\") as output_file:\n for line in input_file:\n line = line.strip().split('\\t')\n if len(line)<3:\n output_file.write('\\t'.join([str(c) for c in line])+'\\n')\n continue\n\n line = [line[0],line[2],line[1]]\n # print(line)\n output_file.write('\\t'.join([str(c) for c in line])+'\\n')", "def from_endf(cls, ev, resonances):\n file_obj = io.StringIO(ev.section[32, 151])\n\n # Determine whether discrete or continuous representation\n items = endf.get_head_record(file_obj)\n n_isotope = items[4] # Number of isotopes\n\n ranges = []\n for iso in range(n_isotope):\n items = endf.get_cont_record(file_obj)\n abundance = items[1]\n fission_widths = (items[3] == 1) # Flag for fission widths\n n_ranges = items[4] # Number of resonance energy ranges\n\n for j in range(n_ranges):\n items = endf.get_cont_record(file_obj)\n # Unresolved flags - 0: only scattering radius given\n # 1: resolved parameters given\n # 2: unresolved parameters given\n unresolved_flag = items[2]\n formalism = items[3] # resonance formalism\n\n # Throw error for unsupported formalisms\n if formalism in [0, 7]:\n error = 'LRF='+str(formalism)+' covariance not supported '\\\n 'for this formalism'\n raise NotImplementedError(error)\n\n if unresolved_flag in (0, 1):\n # Resolved resonance region\n resonance = resonances.ranges[j]\n erange = _FORMALISMS[formalism].from_endf(ev, file_obj,\n items, resonance)\n ranges.append(erange)\n\n elif unresolved_flag == 2:\n warn = 'Unresolved resonance not supported. Covariance '\\\n 'values for the unresolved region not imported.'\n warnings.warn(warn)\n\n return cls(ranges)", "def sensordaten_einlesen(self):\n self.caldata = []\n self.caldata_raw = np.genfromtxt(self.sensorfile, usecols = np.asarray(self.sensorspalte), skip_header = 1)\n for ele in self.caldata_raw:\n self.caldata.append(int(ele))\n self.Sensordata = Channel()", "def partid2eids(self, partid, etype=...):\n ...", "def partid2eids(self, partid, etype=...):\n ...", "def read(self, generic=False, to_xarray=False):\n return read_eps_l1b(self.filename, generic, to_xarray)", "def parse_eeg_file(path):\n if os.path.splitext(path)[-1].lower() != '.edf':\n NotImplementedError(\"Only EDFs are supported currently. More files coming.\")\n\n try: #edf\n edf_file = mne.io.read_raw_edf(path, stim_channel=None, verbose=False)\n except RuntimeError: #edf+\n edf_file = mne.io.read_raw_edf(path, preload=True, stim_channel=None, verbose=False)\n\n # TODO edf++\n\n eeg_data = {}\n eeg_data['meas_date'] = datetime.datetime.fromtimestamp(edf_file.info[\"meas_date\"])\n eeg_data['nchan'] = edf_file.info[\"nchan\"]\n eeg_data['sfreq'] = edf_file.info[\"sfreq\"]\n eeg_data['subject_info'] = edf_file.info[\"subject_info\"]\n eeg_data['ch_names'] = edf_file.ch_names\n\n return {\"eeg_\"+key: value for key, value in eeg_data.items()}", "def _emiss_ep(self,Eph):\n if self.weight_ep == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n eps = (Eph / mec2).decompose().value\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_1(gam,eps),\n self._gam, axis=0).to(u.cm**2 / Eph.unit)\n return emiss", "def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return", "def convert(self):\n return", "def elementStream():\n try:\n es = ExpatElementStream()\n return es\n except ImportError:\n if SuxElementStream is None:\n raise Exception(\"No parsers available :(\")\n es = SuxElementStream()\n return es", "def convert(self, sm):\n return self.visit(sm)", "def auto_convert(self):\n nodes_converted = []\n for node_type in self.conversion_spec_sheet:\n print('searching for: %s' % node_type)\n found_nodes = self.list_nodes(node_type)\n print('found: %s nodes' % len(found_nodes))\n for node in found_nodes:\n new_node = self.convert(node)\n nodes_converted.append([node, new_node])\n\n return nodes_converted", "def dnde_ee(_: PseudoScalarMediatorBase, egams, cme):\n return dnde_xx_to_p_to_ffg(egams, cme, me)", "def psea2HEC(pseq): # -> list[Unknown]:\n ...", "def processDuplicitous(self):\n\n ims = bytearray()\n key = ekey = b'' # both start same. when not same means escrows found\n while True: # break when done\n for ekey, edig in self.db.getLdeItemsNextIter(key=key):\n try:\n pre, sn = splitKeySN(ekey) # get pre and sn from escrow item\n # check date if expired then remove escrow.\n dtb = self.db.getDts(dgKey(pre, bytes(edig)))\n if dtb is None: # othewise is a datetime as bytes\n # no date time so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event datetime\"\n \" at dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed event datetime \"\n \"at dig = {}.\".format(bytes(edig)))\n\n # do date math here and discard if stale nowIso8601() bytes\n dtnow = datetime.datetime.now(datetime.timezone.utc)\n dte = fromIso8601(bytes(dtb))\n if (dtnow - dte) > datetime.timedelta(seconds=self.TimeoutLDE):\n # escrow stale so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Stale event escrow \"\n \" at dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Stale event escrow \"\n \"at dig = {}.\".format(bytes(edig)))\n\n # get the escrowed event using edig\n eraw = self.db.getEvt(dgKey(pre, bytes(edig)))\n if eraw is None:\n # no event so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event at.\"\n \"dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed evt at dig = {}.\"\n \"\".format(bytes(edig)))\n\n eserder = Serder(raw=bytes(eraw)) # escrowed event\n ims.extend(eserder.raw)\n\n # get sigs and attach\n sigs = self.db.getSigs(dgKey(pre, bytes(edig)))\n if not sigs: # otherwise its a list of sigs\n # no sigs so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event sigs at.\"\n \"dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed evt sigs at \"\n \"dig = {}.\".format(bytes(edig)))\n\n counter = Counter(code=CtrDex.ControllerIdxSigs,\n count=len(sigs))\n ims.extend(counter.qb64b)\n for sig in sigs: # stored in db as qb64b\n ims.extend(sig)\n\n # process event\n self.processOne(ims=ims) # default framed True\n\n # If process does NOT validate event with sigs, becasue it is\n # still out of order then process will attempt to re-escrow\n # and then raise OutOfOrderError (subclass of ValidationError)\n # so we can distinquish between ValidationErrors that are\n # re-escrow vs non re-escrow. We want process to be idempotent\n # with respect to processing events that result in escrow items.\n # On re-escrow attempt by process, Ooe escrow is called by\n # Kevery.self.escrowOOEvent Which calls\n # self.db.addOoe(snKey(pre, sn), serder.digb)\n # which in turn will not enter dig as dup if one already exists.\n # So re-escrow attempt will not change the escrowed ooe db.\n # Non re-escrow ValidationError means some other issue so unescrow.\n # No error at all means processed successfully so also unescrow.\n\n except LikelyDuplicitousError as ex:\n # still can't determine if duplicitous\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Kevery unescrow failed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Kevery unescrow failed: %s\\n\", ex.args[0])\n\n except Exception as ex: # log diagnostics errors etc\n # error other than likely duplicitous so remove from escrow\n self.db.delLde(snKey(pre, sn), edig) # removes one escrow at key val\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Kevery unescrowed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Kevery unescrowed: %s\\n\", ex.args[0])\n\n else: # unescrow succeeded, remove from escrow\n # We don't remove all escrows at pre,sn because some might be\n # duplicitous so we process remaining escrows in spite of found\n # valid event escrow.\n self.db.delLde(snKey(pre, sn), edig) # removes one escrow at key val\n logger.info(\"Kevery unescrow succeeded in valid event: \"\n \"event=\\n%s\\n\", json.dumps(eserder.ked, indent=1))\n\n if ekey == key: # still same so no escrows found on last while iteration\n break\n key = ekey # setup next while iteration, with key after ekey" ]
[ "0.5776446", "0.5423936", "0.5320594", "0.5170984", "0.51104087", "0.5029466", "0.5025242", "0.50205046", "0.48759243", "0.4801463", "0.47459334", "0.47000486", "0.46995685", "0.4694864", "0.46835348", "0.467961", "0.4648427", "0.46379155", "0.46379155", "0.4625023", "0.45730945", "0.45613006", "0.4542094", "0.45276928", "0.45153186", "0.45150012", "0.44994313", "0.44905585", "0.44898614", "0.4485389" ]
0.6517192
0
Correct for motion and call SliceTimeCorrect.
def CorrectMotion(self): if self.verbose: print "Correct for motion" for entry in self.entry_map['epi']: info = self.info[entry] if os.path.exists(info['imgfile_m'] + info['suffix']): return # Always use brik for 3dDeconvolve. suffix = '+orig' epifile = '%s%s' % (info['imgfile'], suffix) prefix = info['imgfile_m'] base_entry = info['base_entry'] if info['base'] == 'start': # Use the first frame specified in template file. Defaults # to zero. base = info['motion_ref_frame'] else: # Use the last frame. base = self.info[base_entry]['tdim'] - info['skip']-1 base = ('%d' % base).replace(' ','') # Correct for slice-timing. self.SliceTimeCorrect(info, epifile) plane = info['plane'] anat_tgt = info['anat_tgt'] # anat_entry = self.anat_entry[plane] if info['catmats']: # Include additonal transformation in motion correction such # that final image is in register with the fieldmap, which has # been registered to the structural image that will be used for # spatial normalization. self.MotcorCatenate(info, base, anat_tgt) else: # Assume fieldmap is in register with the structural. self.Motcor(info, base) if info.get('fmapname', None) is None: # No fieldmap correction. if self.fsl_flip: # Flip the way fslview likes it. self.FSLFlip(info['imgfile_m'], info['imgfile_final']) elif info['suffix'] == '.nii': # Copy motion-corrected images from /tmp to output directory outfile = info['imgfile_final'] + info['suffix'] cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile) self.CheckExec(cmd, [outfile], force=True) cmd = '/bin/rm %s+orig*' % info['imgfile_m'] self.CheckExec(cmd, [], force=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_drift_correction(self, pos):\n\n\t\tprint(\"function not supported yet\")", "def refframe_correct(self, ra, dec, obstime, sobjs=None):\n # Correct Telescope's motion\n refframe = self.par['calibrations']['wavelengths']['refframe']\n if refframe in ['heliocentric', 'barycentric'] \\\n and self.par['calibrations']['wavelengths']['reference'] != 'pixel':\n msgs.info(\"Performing a {0} correction\".format(self.par['calibrations']['wavelengths']['refframe']))\n # Calculate correction\n radec = ltu.radec_to_coord((ra, dec))\n vel, vel_corr = wave.geomotion_correct(radec, obstime,\n self.spectrograph.telescope['longitude'],\n self.spectrograph.telescope['latitude'],\n self.spectrograph.telescope['elevation'],\n refframe)\n # Apply correction to objects\n msgs.info('Applying {0} correction = {1:0.5f} km/s'.format(refframe, vel))\n if (sobjs is not None) and (sobjs.nobj != 0):\n # Loop on slits to apply\n gd_slitord = self.slits.slitord_id[np.logical_not(self.extract_bpm)]\n for slitord in gd_slitord:\n indx = sobjs.slitorder_indices(slitord)\n this_specobjs = sobjs[indx]\n # Loop on objects\n for specobj in this_specobjs:\n if specobj is None:\n continue\n specobj.apply_helio(vel_corr, refframe)\n\n # Apply correction to wavelength image\n self.vel_corr = vel_corr\n self.waveimg *= vel_corr\n\n else:\n msgs.info('A wavelength reference frame correction will not be performed.')", "def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()", "def drift_correction(self, pos=None, fix_triggered=False):\n\t\t\n\t\tif pos == None:\n\t\t\tpos = self.dispsize[0] / 2, self.dispsize[1] / 2\n\t\tif fix_triggered:\n\t\t\treturn self.fix_triggered_drift_correction(pos)\t\t\n\t\tself.draw_drift_correction_target(pos[0], pos[1])\n\t\tpressed = False\n\t\twhile not pressed:\n\t\t\tpressed, presstime = self.kb.get_key()\n\t\t\tif pressed:\n\t\t\t\tif pressed == 'escape' or pressed == 'q':\n\t\t\t\t\tprint(\"libeyetribe.EyeTribeTracker.drift_correction: 'q' or 'escape' pressed\")\n\t\t\t\t\treturn self.calibrate()\n\t\t\t\tgazepos = self.sample()\n\t\t\t\tif ((gazepos[0]-pos[0])**2 + (gazepos[1]-pos[1])**2)**0.5 < self.pxerrdist:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tself.errorbeep.play()\n\t\treturn False", "def update_dead_reckoning(self):\n now = time.time()\n time_diff_s = now - self._last_observation_s\n self._last_observation_s = now\n\n self._prediction_step(time_diff_s)", "def non_causal_timecrop(self, length):\n assert length < self.time_length\n\n cut = (self.time_length - length) / 2\n\n _, i_start = _find_nearest(self.times, cut)\n _, i_end = _find_nearest(self.times, self.time_length - cut)\n\n h = np.fft.ifftshift(np.fft.fftshift(self.in_time)[..., i_start:i_end])\n\n new_response = self.from_time(self.fs, h)\n\n if new_response.time_length != length:\n w = f\"Could not precisely shrink to {length}s with fs = {self.fs}\"\n warnings.warn(w)\n\n return new_response", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop()\n ev3.Sound.beep().wait()\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(\n position_sp=-arm_revolutions_for_full_range,\n speed_sp=self.MAX_SPEED,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this\n # line is correct as is).", "def recalc_spd(*args):\n return _ida_frame.recalc_spd(*args)", "def acoustic_lasting(self):\n for i in range(self._number_of_events):\n if self._number_of_events == 1:\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n else:\n if i == 0:\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i+1].pitch_classes_octave\n\n elif i == self._number_of_events - 1:\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i-1].pitch_classes_octave\n else:\n self._events[i].corrected_pitch_classes += self._events[i+1].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i-1].pitch_classes_octave\n\n self._events[i].corrected_pitch_classes = self.sort_single_event_notes(self._events[i].corrected_pitch_classes)\n self._events[i].corrected_pitch_classes = remove_identical(self._events[i].corrected_pitch_classes)", "def _update_transition(self, dt, time, direction): #pylint:disable-msg=C0103,C0301\r\n pass", "def sos_correction(self, ratio):\n\n # Correct velocities\n self.u_mps = self.u_mps * ratio\n self.v_mps = self.v_mps * ratio", "def handleUpdateTimer(self):\n self.mustRun(task = self.position,\n ret_signal = self.positionUpdate)", "def runTask1(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSweep()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSuctionOff()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.reset()\r\n\t\tself._motion.terminate()", "def revolver(self):\r\n\t\tself.__revuelto=True", "def test2(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tself._motion.terminate()", "def _calibrate(self, t_send, t_recv, server_timestamp):\n pass", "def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step", "def update(self, dt):", "def update(self, dt):", "def set_up_orbit_correctors(ps_beg, delay, id_slice1, ds_slice, zplot, id_slices, U_core, lambdaref):\n SXSS = Chicane(3.2716, 0.362, 0.830399, delay[0])\n HXSS = Chicane(3.2, 0.3636, 0.5828, delay[1])\n\n OC2 = [CORR08, D1_SXSS, SXSS, D2_SXSS, QUAD09, CORR09]\n OC3 = [CORR15, D1_HXSS, HXSS, D2_HXSS, QUAD16, CORR16]\n\n ps_end1 = beam_transportation(ps_beg, U_core[0])\n\n # ps_end1 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the first undulator section.\n\n # The id of the slice on the axis in the second undulator section\n on_axis_id_U2 = int(id_slice1+delay[0]/ds_slice+ (8*110)*lambdaref/ds_slice) # The last part is slippage\n\n print(on_axis_id_U2)\n\n ps_end_slice1 = beam_property_along_s(ps_end1, id_slices)[0:4, :]\n ps_on_axis_2 = np.ravel(ps_end_slice1[:, on_axis_id_U2])\n\n # print(ps_on_axis_2)\n\n OC2_optimized = analyze_orbit_corrector(OC2[0], OC2[-1], OC2[1:-1], ps_on_axis_2)\n print(OC2_optimized)\n CORR08_new = Orbit_Corrector(OC2[0].length, OC2_optimized[0], OC2_optimized[2])\n CORR09_new = Orbit_Corrector(OC2[-1].length, OC2_optimized[1], OC2_optimized[3])\n\n # The whole U2 with optimized orbit correctors\n U2_new = [CORR08_new] + OC2[1:-1] + [CORR09_new] + U_core[1]\n ps_end2 = beam_transportation(ps_end1, U2_new)\n\n # ps_end2 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the second undulator section.\n\n # The id of the slice on the axis in the third undulator section\n on_axis_id_U3 = int(id_slice1+(delay[0]+delay[1])/ds_slice +(14*110*lambdaref)/ds_slice) # The last term is the slipage\n\n print(on_axis_id_U3)\n\n ps_end_slice2 = beam_property_along_s(ps_end2, id_slices)[0:4, :]\n ps_on_axis_3 = np.ravel(ps_end_slice2[ :, on_axis_id_U3])\n\n # print(ps_on_axis_3)\n\n OC3_optimized = analyze_orbit_corrector(OC3[0], OC3[-1], OC3[1:-1], ps_on_axis_3)\n print(OC3_optimized)\n CORR15_new = Orbit_Corrector(OC3[0].length, OC3_optimized[0], OC3_optimized[2])\n CORR16_new = Orbit_Corrector(OC3[-1].length, OC3_optimized[1], OC3_optimized[3])\n\n U3_new = [CORR15_new] + OC3[1:-1] + [CORR16_new] + U_core[2]\n\n Undulator_Beamline = U_core[0]+U2_new+U3_new\n\n return Undulator_Beamline", "def update_apc22(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 1:\n\n\n try:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = 0.5*self.delta_t/(self.time-self.get_old(0, 2))\n\n pos = self.pos+delta_t*((1+beta)*self.vel-beta*self.get_old(0, 0))\n vel = self.vel+delta_t*((1+beta)*kap[1]-beta*self.get_old(0, 1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n pos = self.pos+delta_t/2.0*(self.vel+vel)\n vel = self.vel+delta_t/2.0*(self.force(pos, vel, self.time+delta_t,\n drag=False)+kap[1])\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = 0.5*col.delta_t/(self.time-self.get_old(0, 2))\n vel = self.vel+col.delta_t*(1+beta)*kap[1]-beta*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.time += delta_t\n\n else:\n ## reduced to using the Adams first order method for the first timestep:\n\n kap = update_apc1(self)\n\n self.set_old(kap, 1)\n\n return kap", "def _update_active_rides_fast(self, time: datetime) -> None:\n pass", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def on_correct_utterance(self, event):\n if not self.quitting:\n number = event.utterance_number\n instance = event.instance_name\n self.the_mediator.correct_utterance(instance, number)", "def update(self):\n super().update()\n self.decelerate()\n #check for a collisison with all rock types\n self.checkForRockCollisions()\n #when the ship gets hit by a rock, it enters a period of invulnerability. we need to make sure that period ends at the proper time\n self.checkGracePeriodDuration()\n #movement stuff\n if (self.isAcceleratingForward):\n self.accelerateForwards()\n if (self.isRotatingLeft):\n self.rotateLeft()\n if (self.isRotatingRight):\n self.rotateRight()", "def motion_correct(mov, max_iters=5, shift_threshold=1., reslice=slice(None,None), in_place=True, verbose=True, compute_kwargs={}, apply_kwargs={}):\n if not in_place:\n mov = mov.copy()\n mov = mov[reslice]\n \n all_vals = []\n for it in range(max_iters):\n if verbose:\n print('Iteration {}'.format(it)); sys.stdout.flush()\n template,vals = compute_motion(mov, **compute_kwargs)\n mov = apply_motion_correction(mov, vals, **apply_kwargs)\n maxshifts = np.abs(vals[:,[0,1]]).max(axis=0)\n all_vals.append(vals)\n if verbose:\n print('Shifts: {}'.format(str(maxshifts))); sys.stdout.flush()\n if np.all(maxshifts < shift_threshold):\n break\n\n # combine values from iterations\n all_vals = np.array(all_vals)\n return_vals = np.empty([all_vals.shape[1],all_vals.shape[2]])\n return_vals[:,[0,1]] = all_vals[:,:,[0,1]].sum(axis=0)\n return_vals[:,2] = all_vals[-1,:,2]\n\n return mov,template,return_vals", "def update(self):\n self.syncSpriteCoordinates()\n self.moveBasedOnCurrentMomentum()\n #self.decelerate()\n self.checkCanvasBoundsAndWrap()", "def routine(self):\n # Read in time table data\n\n _, t1, t2, t3, t4, _, _ = self.times\n _, p1, p2, p3, p4, _, _ = self.positions\n \n # Carry out routine\n self.throttle_valve_set(p1)\n self.valve_pulse(1, t1)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n \n self.throttle_valve_set(p2) \n self.purge(t2)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n \n \n ## Check selected method for t3 in main recipe table,\n ## Carry out an operation based on the selection\n mode = self.settings['t3_method']\n self.throttle_valve_set(p3)\n \n if mode == 'Shutter':\n self.shutter_pulse(t3)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n elif mode == 'PV':\n self.valve_pulse(2, t3)\n if self.interrupt_measurement_called:\n self.shutoff()\n return \n elif mode == 'RF':\n power = self.seren.settings['recipe_power']\n self.plasma_dose(t3, power)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n# elif mode == 'PV/Purge':\n# '''Run sub_cyc number of subroutine cycles.\n# Subroutine consists of a valve pulse and a purge period.'''\n# for _ in range(sub_cyc):\n# self.valve_pulse(2, sub_t0)\n# self.purge(sub_t1)\n \n \n self.throttle_valve_set(p4)\n self.purge(t4)\n if self.interrupt_measurement_called:\n self.shutoff()\n return", "def target_velocity(self, time):\n pass", "def target_velocity(self, time):\n pass" ]
[ "0.60318804", "0.54270923", "0.53947157", "0.53792375", "0.5303855", "0.5262295", "0.5260591", "0.52238935", "0.5190531", "0.5141736", "0.5131086", "0.50906426", "0.5085911", "0.5074954", "0.50716", "0.506681", "0.50448614", "0.50436366", "0.50436366", "0.50377357", "0.5031351", "0.50200623", "0.5010756", "0.5002735", "0.50016814", "0.500161", "0.5000479", "0.4999179", "0.4985841", "0.4985841" ]
0.6062027
0
Call the jump_censor program to characterize the degree of motion.
def JumpCensor(self): if self.verbose: print 'Computing censor files.' for entry in self.entry_map['epi']: if self.censor_interleave: input_file = '%s+orig' % self.info[entry]['imgfile'] interleave = '--interleave' else: interleave = '' if os.path.exists(self.info[entry]['mot_file']): input_file = self.info[entry]['mot_file'] else: input_file = '%s+orig' % self.info[entry]['imgfile'] cmd = \ "jump_censor -v --prefix=%s %s --store-plot --threshold=%f %s" % \ (self.info[entry]['censor_prefix'], interleave, self.censor_thresh, input_file) try: self.CheckExec(cmd, ['%s_censor.1D' % self.info[entry]['censor_prefix']], force=False) except: print 'Error computing censor files.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def censoring_fcn(self, q):\n return 1.0", "def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y", "def on_jump_press(self) -> None:\r\n if not self.node:\r\n return\r\n if don.jumpFly:\r\n self.node.handlemessage(\"impulse\",self.node.position[0],self.node.position[1]-2,self.node.position[2],self.node.velocity[0]*0,20 +0.01,self.node.velocity[2]*0,10,5,0,0,self.node.velocity[0]*-0.1,20 + 0.01,self.node.velocity[2]*0)\r\n self.node.handlemessage(\"impulse\",self.node.position[0],self.node.position[1],self.node.position[2],self.node.velocity[0]*0,20 +0.01,self.node.velocity[2]*0,10,5,0,0,self.node.velocity[0]*-0.1,20 + 0.01,self.node.velocity[2]*0)\r\n self.node.color = ((0+random.random()*6.5),(0+random.random()*6.5),(0+random.random()*6.5))\r\n self.node.highlight = ((0+random.random()*6.5),(0+random.random()*6.5),(0+random.random()*6.5)) \r\n self.node.jump_pressed = True\r\n self._turbo_filter_add_press('jump')", "def jump(self, x):\n self.change_y += x * self.speed", "def jump(self):\n self.vy = -9", "def censor(text: str) -> str:\n\n # Split up individual words in the text\n tokens: List[str] = text.split(\" \")\n\n # Create a mapping of 0 if the word is okay, 1 if it should be censored\n censor_mask: List[int] = predict([word for word in tokens])\n\n # A list of tuples with the first element being the word and the second being 0 or 1\n censor_map: List[Tuple[str, int]] = list(zip(tokens, censor_mask))\n\n # A list of the words that make up the censored text\n censored_text: List[str] = [\n censor_word(word) if should_censor else word\n for word, should_censor in censor_map\n ]\n\n return \" \".join(censored_text)", "def content_jump(self, jump: np.ndarray, data: np.ndarray) -> None:\n if len(jump) != 1:\n raise ValueError(\"`jump` must be a one dimensional vector\")\n if jump > 0.5:\n euc_dist = np.abs(self._buffer - data).sum(axis=1)\n self._head = np.argmin(euc_dist)", "def censor(text: Optional[str]) -> str:\n char = \"*\"\n text = text if text else \"\"\n return text[0] + (len(text) - 1) * char if text else text", "def jog(self, axis:str=\"x\", distance:float=1):\n self.sendCommand(\"G91\")\n axis.capitalize()\n self.sendCommand(f'$J={axis}{distance} F1000')", "def evaluate(self,joystick,keys):\n \n self.AG_twinklers.do() \n \n \n if joystick.isUp(keys)==True and self.solomon.current_state[\"jumping\"]==0: \n self.solomon.current_state[\"jumping\"]=1 \n\n\n walkcheck=False\n \n if self.solomon.A_wandswish.overide==False:\n \n self.solomon.current_state[\"wandswish\"]=0 \n\n if joystick.isDown(keys)==True: \n self.solomon.current_state[\"crouching\"]=1 \n self.solomon.current_state[\"standing\"]=0\n else: \n self.solomon.current_state[\"crouching\"]=0 \n \n if joystick.isRight(keys)==True:\n self.solomon.facing=1 \n self.solomon.current_state[\"walking\"]=1\n self.solomon.current_state[\"standing\"]=1\n walkcheck=True\n elif joystick.isLeft(keys)==True: \n self.solomon.facing=-1 \n walkcheck=True\n self.solomon.current_state[\"walking\"]=1\n self.solomon.current_state[\"standing\"]=0\n else:\n self.solomon.current_state[\"walking\"]=0 \n self.solomon.current_state[\"standing\"]=1\n\n canwalk=False\n if walkcheck:\n result=self.detect(self.solomon.x+self.solomon.facing*self.solomon.step*5.0,self.solomon.y) \n if (len(result)==0 or result[0][0]==\".\") and self.solomon.current_state[\"walking\"]==1:\n #self.solomon.x+=self.solomon.step*self.solomon.facing \n self.solomon.current_state[\"standing\"]=0 \n self.solomon.current_state[\"walking\"]=1\n canwalk=True\n #elif result[0][0] in [\"]\n\n result1=self.grid[int(self.solomon.y-0)][int(self.solomon.x+0.5+self.solomon.step*2*self.solomon.facing)]\n result2=self.grid[int(self.solomon.y-0)][int(self.solomon.x+0.5-self.solomon.step*2*self.solomon.facing)]\n #print \"fall check\" + str((result1,result2,self.solomon.x,self.solomon.y))\n if result1==\".\" and result2==\".\":\n self.solomon.y-=self.solomon.step\n self.solomon.current_state[\"walking\"]=0\n canwalk=False\n\n if canwalk==True: self.solomon.x+=self.solomon.step*self.solomon.facing\n\n if joystick.isFire(keys)==True and self.solomon.current_state[\"wandswish\"]==0: \n self.solomon.A_wandswish.kick()\n self.solomon.A_wandswish.overide=True\n self.solomon.current_state[\"wandswish\"]=1 \n\n \n if self.solomon.current_state[\"jumping\"]==1:\n self.solomon.AG_jump.do()\n print \"he's jumping\"\n print str(self.solomon.AG_jump.action(\"jump_displacement\").tick)\n self.solomon.y+=0.2\n #print \"co-ordinates \"+str((self.solomon.x,self.solomon.y))", "def on_r_joy_y(self):\r\n self.log()", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCR_HEIGHT:\n self.change_y = -8", "def jump(self):\n\n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(\n self, self.platforms, False)\n self.rect.y -= 2\n\n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= WIN_HEIGHT:\n self.change_y = -10", "def teleopPeriodic(self):\n self.drive.arcadeDrive(1, 0)\n self.brushless.set(1)\n self.spark.set(self.joystick.getY())", "def jump(self):\n if self.y_pos > self.max_pos_y + self.height:\n self.isJump = True\n self.y_velocity = -13.5\n sounds[\"jump\"].play()", "def on_key(window, key, scancode, action, mods):\n if action != glfw.PRESS:\n return\n \n global controller\n\n if key == glfw.KEY_SPACE:\n controller.fillPolygon = not controller.fillPolygon\n\n elif key == glfw.KEY_ESCAPE:\n glfw.set_window_should_close(window, True)\n\n # Si detecta la tecla [Q] cambia el estado del efecto 1 : zoom\n elif key == glfw.KEY_Z:\n controller.effect1 = not controller.effect1\n\n # Si detecta la tecla [W] cambia el estado del efecto 2 : corte\n elif key == glfw.KEY_C:\n controller.effect2 = not controller.effect2\n\n else:\n print('Unknown key')", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0: #or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def apply_action(self, action):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n robot_x = robot_state.pose.position.x\n robot_y = robot_state.pose.position.y\n # Set the distance moved in an action such that it is at least as large as the\n # minimum distance that would let a robot in the middle of the goal go to either side\n #self.move_dist = max(((C.GOAL_TOP + C.GOAL_BOTTOM) / 2) / C.NUM_POS_SENDS, 0.5)\n if action == Learn.MOVE_LEFT:\n print(\"Move left\")\n self.set_robot(robot_x, robot_y+self.move_dist)\n elif action == Learn.MOVE_RIGHT:\n print(\"Move right\")\n self.set_robot(robot_x, robot_y-self.move_dist)\n else:\n print(\"Stay put\")", "def ev_controlleraxismotion(self, event: tcod.event.ControllerAxis) -> T | None:", "def jump(self):\n \n # move down and see if there's a platform below us.\n # Move down 2 pixels because it doesn't work well if you only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set the speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def jump(self):\n\t\tself._is_falling = True\n\t\tself._dy = -5", "def ev_joyhatmotion(self, event: tcod.event.JoystickHat) -> T | None:", "def on_r_joy_x(self):\r\n self.log()", "def on_l_joy_y(self):\r\n self.log()", "def ev_joyaxismotion(self, event: tcod.event.JoystickAxis) -> T | None:", "def jump(self, xvel = 0, yvel = 0): #TODO: figure out how a monster's jumping ability is determined.\n self.xvel += xvel\n self.yvel -= yvel\n self.animation.iter()\n self.ai_count = 25 #TEMP\n self.onGround = False", "def _take_action(self, action):\n\n if isinstance(action, list) or isinstance(action, np.ndarray):\n action = action[0]\n\n if self.continuous:\n increment = np.array([1.5*np.cos(action),1.5*np.sin(action)])\n else:\n increment = np.array([0.0,0.0])\n if action == 0:\n increment[0] = 1.5\n elif action == 1:\n increment[0] = 1.225\n increment[1] = 1.225\n elif action == 2:\n increment[1] = 1.5\n elif action == 3:\n increment[0] = -1.225\n increment[1] = 1.225\n elif action == 4:\n increment[0] = -1.5\n elif action == 5:\n increment[0] = -1.225\n increment[1] = -1.225\n elif action == 6:\n increment[1] = -1.5\n elif action == 7:\n increment[0] = 1.225\n increment[1] = -1.225\n else:\n print('NOP!')\n\n self.dog_pose += increment\n self._update_environment()", "def jump(self):\n print(\"Inside ElfRider.jump\")", "def censor_a_word(text, censor):\n censored_item = \"\"\n for x in range(len(censor)):\n if censor[x] == \" \":\n censored_item = censored_item + \" \"\n else:\n censored_item = censored_item + \"X\"\n return text.replace(censor, censored_item)", "def on_cmyk_slide(self,c,m,y,k):\n if not self.active:\n return\n cyan = c / 100.0\n magenta = m / 100.0\n yellow = y / 100.0\n black = k / 100.0\n self.cmyk = colormodel.CMYK(cyan, magenta, yellow, black)\n temp = a3.cmyk_to_rgb(self.cmyk)\n assert (temp == None or type(temp) == colormodel.RGB), 'cmyk_to_rgb does not return a RGB object'\n self.rgb = self.rgb if temp is None else temp\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.update()" ]
[ "0.59280413", "0.541607", "0.5290688", "0.52872306", "0.5283519", "0.51918846", "0.5162083", "0.5111304", "0.50946474", "0.50678855", "0.5063725", "0.50507736", "0.50380087", "0.5027665", "0.50178564", "0.49831063", "0.4976855", "0.4957412", "0.49509645", "0.49407175", "0.49123913", "0.49025196", "0.48670948", "0.48562348", "0.48345843", "0.48306543", "0.48277506", "0.48266277", "0.4821847", "0.48047793" ]
0.7094361
0
Compute the temporal SNR for each epi, save in a nifti file, and store a summmary in a png file.
def ComputeSNR(self): for epi in self.entry_map['epi']: epifile = self.info[epi]['imgfile_final'] + self.info[epi]['suffix'] prefix = self.info[epi]['imgfile_final'] + '_snr' if not os.path.exists('%s_snr.png' % prefix): if self.verbose: print 'TemporalSnr(epifile=%s, prefix=%s)' % \ (epifile, prefix) try: TemporalSnr(epifile=epifile, prefix=prefix)() except: print("Error computing temporal SNR")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_SNR(snid: int, photo_data: pd.DataFrame, \n head_data: pd.DataFrame, code_zenodo: int, \n snana_file_index: int, code_snana: int):\n \n types_names = {90: 'Ia', 62: 'Ibc', 42: 'II', 67: '91bg', 52: 'Iax',\n 64:'KN', 95: 'SLSN', 994: 'PISN', 992: 'ILOT', \n 993: 'CaRT', 15: 'TDE', 88: 'AGN', 92: 'RRL', \n 65: 'M-dw', 16: 'EB', 53: 'Mira', 991: 'BMicroL',\n 6: 'MicroL'}\n \n # LSST filters\n filters = [b'u ', b'g ', b'r ', b'i ', b'z ', b'Y ']\n\n flag_id_photo = photo_data['SNID'] == snid\n\n flux = photo_data[flag_id_photo]['FLUXCAL'].values\n fluxerr = photo_data[flag_id_photo]['FLUXCALERR'].values\n\n SNR_all = flux/fluxerr\n \n indx = np.random.choice(range(flux.shape[0]))\n\n flag_id_head = head_data['SNID'].values == snid\n redshift = head_data['SIM_REDSHIFT_CMB'].values[flag_id_head][0]\n \n # store values\n line = [snid, snana_file_index, code_zenodo, code_snana, \n types_names[code_zenodo], redshift]\n \n for fil in filters: \n line.append(head_data['SIM_PEAKMAG_' + str(fil)[2]].values[flag_id_head][0])\n \n # calculate SNR statistics \n for f in [np.mean, max, np.std]: \n for fil in filters: \n \n flag_fil = photo_data[flag_id_photo]['FLT'] == fil\n neg_flag = flux > -100\n flag2 = np.logical_and(flag_fil, neg_flag)\n \n if sum(flag2) > 0:\n SNR_fil = SNR_all[flag2] \n line.append(f(SNR_fil))\n \n if len(line) == 30:\n return line\n else:\n return []", "def save(self, compute_snrs=True):\n if not self.save_mode:\n raise RuntimeError('Need to enable save mode to save')\n\n fn = os.path.join(self.output_dir,\n 'data_' + time_string() + '.h5')\n save_dict(fn=fn, d=self.data)\n if compute_snrs:\n from src.analyzer import DataAnalyzer\n da = DataAnalyzer.fromfilename(fn)\n da.snr_list()\n return fn", "def do_SEIR(self, t_max=200, dt=1.):\n dt = float(dt)\n g = Graph()\n\n for node in ['S', 'E', 'I', 'R']:\n g.add_node(node, 0)\n\n g.set_node('S', self.population)\n g.set_node('E', 0)\n g.set_node('I', self.N_init)\n g.set_node('R', 0)\n\n # cumulative time series\n S = [g.get_node_value('S')] # Susceptible\n E = [g.get_node_value('E')] # Exposed\n I = [g.get_node_value('I')] # noqa Infected\n R = [g.get_node_value('R')] # Recovered\n\n ts = [0.] # time series\n nms = ['prob', 'lag']\n\n g.add_edge('S', 'S', nms, [0.1, 2])\n g.add_edge('E', 'E', nms, [0.4, 21])\n g.add_edge('I', 'I', nms, [0.1, 2])\n\n g.add_edge('S', 'E', nms, [1.2, 1])\n g.add_edge('E', 'I', nms, [0.1, 14]) # [, tiempo de incubacion]\n g.add_edge('I', 'R', nms, [0.7, 2]) # [, tiempo de recuperacion]\n\n t, time_steps = 0., 0\n while t < t_max:\n\n time_steps = time_steps + 1\n\n t = t + dt\n ts.append(t)\n\n # (( S ))\n prob_SS = g.get_edge('S', 'S', 'prob') # beta\n\n dS = - S[-1] * (I[-1] / self.population) * prob_SS\n\n # n_S = min(S[-1] + min(dS * dt, 0), self.population)\n n_S = S[-1] + dS * dt\n\n # (( E ))\n prob_EE = g.get_edge('E', 'E', 'prob')\n dE = - dS - prob_EE * E[-1]\n\n # n_E = min(E[-1] + max(dE * dt, 0), self.population)\n n_E = E[-1] + dE * dt\n\n # (( I ))\n prob_EI = g.get_edge('E', 'I', 'prob')\n lag_EI = g.get_edge('E', 'I', 'lag')\n update_EI = E[-lag_EI] if lag_EI < len(E) else 0.\n\n prob_IR = g.get_edge('I', 'R', 'prob')\n lag_IR = g.get_edge('I', 'R', 'lag')\n update_IR = I[-lag_IR] if lag_IR < len(I) else 0.\n\n prob_II = g.get_edge('I', 'I', 'prob')\n\n dI = prob_EI * update_EI - prob_IR * update_IR\n dI = -dI # porque ????\n n_I = min(I[-1] + dI * dt, self.population)\n\n # (( R ))\n prob_II = g.get_edge('I', 'I', 'prob')\n dR = prob_II * I[-1]\n n_R = min(R[-1] + max(dR * dt, 0), self.population)\n\n S.append(n_S)\n E.append(n_E)\n I.append(n_I)\n R.append(n_R)\n\n df = pd.DataFrame(\n {'ts': ts, 'S': S, 'E': E, 'I': I, 'R': R}).set_index(\"ts\")\n\n extra = attr.asdict(self)\n extra[\"model_name\"] = \"SEIR\"\n return ModelResultFrame(df=df, extra=extra)", "def run(self):\r\n #print 'WriteFITS_IDI.run'\r\n\r\n # construct the name of the file\r\n readfits = self.previous_results['readfits']\r\n obs_date = readfits['obs date']\r\n idifitsfile = '%s.idi.fits' % obs_date\r\n\r\n configxml = 'firi.xml'\r\n\r\n # midnight on date to Julian day\r\n obs_date_midnight = astro_time.Time('%s-%s-%sT00:00:00' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:8]), format='isot')\r\n obs_date_midnight = obs_date_midnight.jd\r\n\r\n rdate = astro_time.Time(obs_date_midnight, format='jd',\r\n out_subfmt='date')\r\n rdate = rdate.iso\r\n\r\n # number of days after midnight at obs start\r\n obs_date_time = astro_time.Time('%s-%s-%s:%s:%s' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:11], obs_date[11:13],\r\n obs_date[13:]), format='isot')\r\n obs_date_time = obs_date_time.jd - obs_date_midnight\r\n\r\n # get specific items from the results that will be need in\r\n # the reduction\r\n reduce_interferogram = self.previous_results['reduceinterferogram']\r\n data_quality = reduce_interferogram['data_quality']\r\n scan_uvspectra = reduce_interferogram['scan_uvspectra']\r\n\r\n wavenumber = scan_uvspectra[0].wavenumber\r\n\r\n # construct lists of the values to be stored in each Table column\r\n n_uvspectra = max(scan_uvspectra.keys()) + 1\r\n mcomplex = 3\r\n mstokes = 1\r\n mfreq = len(wavenumber)\r\n mra = 1\r\n mdec = 1\r\n\r\n uv_data = np.zeros([n_uvspectra, mdec, mra, mfreq, mstokes, mcomplex])\r\n u = np.zeros([n_uvspectra])\r\n v = np.zeros([n_uvspectra])\r\n w = np.zeros([n_uvspectra])\r\n dates = np.zeros([n_uvspectra])\r\n times = np.zeros([n_uvspectra])\r\n baselines = np.zeros([n_uvspectra], dtype=np.int)\r\n freqid = np.ones([n_uvspectra], dtype=np.int)\r\n\r\n for k,val in scan_uvspectra.items():\r\n uv_data[k,0,0,:,0,0] = val.spectrum.real\r\n uv_data[k,0,0,:,0,1] = val.spectrum.imag\r\n uv_data[k,0,0,:,0,2] = np.ones(val.spectrum.real.shape)\r\n u[k] = np.mean(val.baseline_x)\r\n v[k] = np.mean(val.baseline_y)\r\n w[k] = np.mean(val.baseline_z)\r\n dates[k] = obs_date_midnight\r\n times[k] = obs_date_time + (np.mean(val.time) / (3600 * 24))\r\n baselines[k] = 258\r\n\r\n # external_params is referred to inside config.xml and can be\r\n # used to set parameters there\r\n light_speed = constants.c.to('m/s').value\r\n external_params = {'NCHAN':len(wavenumber),\r\n 'RDATE':rdate,\r\n 'REF_FREQ':0.0 * 100 * light_speed,\r\n 'CHAN_BW':np.abs(wavenumber[1] - wavenumber[0]) * \\\r\n 100 * light_speed}\r\n\r\n print \"Out: %s\\nConfig: %s\"%(idifitsfile, configxml)\r\n\r\n print('\\nConfiguring Array geography')\r\n print('--------------------------')\r\n # Meaningless numbers, hopefully not needed by any CASA method \r\n # that we want to use\r\n (latitude, longitude, elevation) = ('00:00:00.00', '00:00:00.00', 0)\r\n now = datetime.datetime.now()\r\n\r\n # Make ourselves an Array (pyEphem observer)\r\n array_geometry_m = np.array([\r\n [0.0, 0.0, 0.0],\r\n [0.0, 80.0, 0.0]], dtype = 'float32')\r\n beach = Array(lat=latitude, long=longitude, elev=elevation, date=now,\r\n antennas=array_geometry_m)\r\n\r\n print('\\nConfiguring phase source')\r\n print('--------------------------')\r\n # The source is our phase centre for UVW coordinates\r\n line = \"%s,f,%s,%s,%s,%d\" % ('Deep Space', '00:00:00',\r\n '00:00:00', '1', 2000)\r\n source = ephem.readdb(line)\r\n source.compute(beach)\r\n print \"Name: %s \\nRA: %s \\nDEC: %s\"%(source.name, source.ra, source.dec)\r\n\r\n # Make a new blank FITS HDU\r\n print('\\nCreating PRIMARY HDU')\r\n print('------------------------------------')\r\n hdu = make_primary(config=configxml, external_params=external_params)\r\n print repr(hdu.header)\r\n\r\n # Go through and generate required tables\r\n print('\\nCreating ARRAY_GEOMETRY')\r\n print('------------------------------------')\r\n tbl_array_geometry = make_array_geometry(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_array_geometry = config_array_geometry(tbl_array_geometry,\r\n array_geometry_m)\r\n print repr(tbl_array_geometry.header)\r\n\r\n print('\\nCreating FREQUENCY')\r\n print('------------------------------------')\r\n tbl_frequency = make_frequency(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_frequency = config_frequency(tbl_frequency,\r\n external_params=external_params)\r\n print repr(tbl_frequency.header)\r\n\r\n print('\\nCreating SOURCE')\r\n print('------------------------------------')\r\n tbl_source = make_source(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_source = config_source(tbl_source, source)\r\n print repr(tbl_source.header)\r\n\r\n print('\\nCreating ANTENNA')\r\n print('------------------------------------')\r\n tbl_antenna = make_antenna(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_antenna = config_antenna(tbl_antenna)\r\n print repr(tbl_antenna.header)\r\n\r\n print('\\nCreating UV_DATA')\r\n print('------------------------------------')\r\n\r\n print 'Data dimensions: %i dumps, %i chans, %i pols, %i data' % (\r\n n_uvspectra, mfreq, mstokes, mcomplex)\r\n\r\n print('Generating blank UV_DATA rows...')\r\n tbl_uv_data = make_uv_data(config=configxml, num_rows=n_uvspectra,\r\n external_params=external_params)\r\n\r\n timesorted = np.argsort(times)\r\n\r\n for k in timesorted:\r\n tbl_uv_data.data[k]['FLUX'] = uv_data[k,0,0,:,0,:].ravel()\r\n tbl_uv_data.data[k]['UU'] = u[k] / light_speed\r\n tbl_uv_data.data[k]['VV'] = v[k] / light_speed\r\n tbl_uv_data.data[k]['WW'] = w[k] / light_speed\r\n tbl_uv_data.data[k]['BASELINE'] = baselines[k]\r\n tbl_uv_data.data[k]['DATE'] = dates[k]\r\n tbl_uv_data.data[k]['TIME'] = times[k]\r\n tbl_uv_data.data[k]['SOURCE'] = 1\r\n tbl_uv_data.data[k]['FREQID'] = 1\r\n tbl_uv_data.data[k]['INTTIM'] = 3\r\n\r\n print repr(tbl_uv_data.header)\r\n \r\n hdulist = pyfits.HDUList(hdus=\r\n [hdu,\r\n tbl_array_geometry,\r\n tbl_source, \r\n tbl_frequency,\r\n tbl_antenna,\r\n tbl_uv_data])\r\n\r\n print('Verifying integrity...') \r\n hdulist.verify()\r\n \r\n if(os.path.isfile(idifitsfile)):\r\n print('Removing existing file...')\r\n os.remove(idifitsfile)\r\n print('Writing to file...')\r\n hdulist.writeto(idifitsfile)\r\n\r\n print('Done.')\r\n\r\n self.result['idifitsfile'] = idifitsfile\r\n\r\n return self.result", "def save_nifti(self, path):\n meta = {'te': self.te, 'tr': self.tr, 'sw': self.sw}\n if self.sequence_type == 'STEAM':\n meta['tm'] = self.tm\n\n # store real and imaginary components in last 2 dims\n component_fid = np.stack((np.real(self.fid),np.imag(self.fid)), -2)\n nifti = nib.Nifti2Image(component_fid, self.transform.get_matrix(), extra=meta)\n nib.save(nifti, path)", "def write_seisan(filename, args):\n bf = BaikalFile(filename)\n if not bf.valid:\n print(\"Invalid file {}\".format(filename))\n return\n header = bf.MainHeader\n # datetime\n date = datetime.datetime(header[\"year\"], header[\"month\"], header[\"day\"])\n delta = datetime.timedelta(seconds=header[\"to\"])\n dt = date + delta\n _time = dt.time() # time\n # make utc datetime\n utcdatetime = UTCDateTime(date.year, date.month, date.day,\n _time.hour, _time.minute, _time.second, _time.microsecond, precision=3)\n bf.traces = bf.traces.astype(np.int32)\n bf.traces = bf.traces[:3]\n traces = []\n for channel, data in zip(CHANNELS, bf.traces):\n stats = DEFAULT_STATS.copy()\n stats.update({\n \"station\": header['station'].upper()[:3],\n 'channel': channel,\n 'sampling_rate': int( 1./header[\"dt\"] ),\n \"delta\": header[\"dt\"],\n \"npts\": data.size,#shape[0]\n 'starttime': utcdatetime,\n })\n # save coordinates\n stats['gse2'][\"lat\"] = header['latitude']\n stats['gse2'][\"lon\"] = header[\"longitude\"]\n trace = Trace(data=data, header=stats)\n traces.append(trace)\n # create Stream\n stream = Stream(traces)\n #== write seisan\n # date\n name = \"{year:04}-{month:02}-{day:02}\".format(**header)\n # time\n name += \"-{t.hour:02}-{t.minute:02}\".format(t=stats['starttime'])\n # + station name + Day_of_Year\n name += \"{0}__{1:03}\".format(stats[\"station\"], stats['starttime'].timetuple().tm_yday)\n print('Writing GSE2 file %s.' % name)\n writeGSE2(stream, os.path.join(args.outdir, name))", "def in_situ_tair_snd(sno0, year0=2016, npr_date=-1, ascat_date=-1):\n if npr_date < 0:\n npr_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n if ascat_date < 0:\n ascat_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n snd_name = \"snow\"\n print 'the %d was processing' % sno0\n sno = str(sno0)\n tair_name = \"Air Temperature Observed (degC)\"\n if sno0 in [2065, 2081]:\n if year0 == 2016:\n tair_name = \"Air Temperature Average (degC)\"\n # read measurements\n hr_list = [5, 7, 9, 14, 18, 21]\n t_air_one_year = read_site.in_situ_series(sno, y=year0, hr=hr_list) # [:, :, 0] temperature at 7:00 (local)\n # time_above_zero_0 = data_process.zero_find(t_air_one_year[:, :, 0], w=10, th=-0.1) #\n # time_above_zero_1 = data_process.zero_find(t_air_one_year[:, :, 1], w=10, th=-0.1)\n # time_above_zero_2 = data_process.zero_find(t_air_one_year[:, :, 3], w=10, th=-0.1)\n time_above_zero_list = [data_process.zero_find(t_air_one_year[:, :, i], w=10, th=-0.1)\n for i in range(0, len(hr_list))]\n date_tuple = bxy.time_getlocaltime(time_above_zero_list, ref_time=[2000, 1, 1, 0], t_source='US/Alaska')\n t_value, t_date = read_site.read_measurements\\\n (sno, tair_name, np.arange(1, 365), year0=year0, hr=18, t_unit='sec')\n\n\n tair_zero_day2 = data_process.zero_find(np.array([t_date, -t_value]), w=7, th=0) # in unit of sec\n tair_zero_day1 = data_process.zero_find_gt(np.array([t_date, t_value]), w=7, th=1)\n air_win = 7 # check days during window shown air temperature gt 0 degC\n w, w_valid = data_process.n_convolve3(t_value, air_win)\n air0_index0 = np.where(w>5)\n for ind0 in air0_index0[0]:\n if t_date[ind0] > bxy.get_total_sec('%d0307' % year0):\n tair_zero_day = t_date[ind0] - air_win*24*3600\n break\n # check\n zero_date = bxy.time_getlocaltime([tair_zero_day,tair_zero_day2, npr_date[0], ascat_date[0]],\n ref_time=[2000, 1, 1, 0], t_source=\"US/Alaska\")[-2]\n i_zero = np.where(bxy.time_getlocaltime(t_date, ref_time=[2000, 1, 1, 0],\n t_source=\"US/Alaska\")[-2] == zero_date[0])[0][0]\n t_check = t_value[i_zero - 3: i_zero + 4]\n air_0, air00 = read_site.read_measurements(sno, tair_name, 366+np.arange(50, 70), hr=18)\n a_extend = np.array([-3600*24, 3600*24])\n period0, period1 = np.array(sorted([tair_zero_day, npr_date])) + a_extend, \\\n np.array(sorted([tair_zero_day, ascat_date])) + a_extend\n snow_value, snow_date = read_site.read_measurements\\\n (sno, snd_name, np.arange(1, 365), year0=year0, hr=0, t_unit='sec')\n # get the in situ measurements during a period\n snow2date0 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period0)\n snow2date1 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period1)\n air2date0, air2date1 = data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period0),\\\n data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period1)\n return tair_zero_day, snow2date0, snow2date1, air2date0, air2date1", "def plot_seaice_trend(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/trends/SIC/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' SIE trends'\n\n\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n\n if seaice_source == 'nsidc':\n seaice = seaice * area /250\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = xr.apply_ufunc(scipy.stats.linregress, seaice[seaicename].time.values.astype(float), seaice[seaicename], input_core_dims=[['time'],['time']], vectorize=True, dask='parallelized', output_dtypes=[float]*5, output_core_dims=[[]]*5)\n if seaice_source =='ecmwf':\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice[seaicename].time.values.astype(float), seaice[seaicename])\n \n seaice_m = seaice_m * 1e9 * 60 * 60 * 24 * 365\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n # seaice_m = seaice_m*area\n seaice_m = seaice_m.where(seaice_m != 0)\n # seaice_m = seaice_m.where(seaice_p_value <= 0.05)\n max_ = seaice_m.max()\n min_ = seaice_m.min() \n # max_ = 1\n divnorm = TwoSlopeNorm(vmin=min_, vcenter=0, vmax=max_)\n fig = plt.figure(figsize = (5,5))\n ax = fig.add_subplot(111, projection = ccrs.SouthPolarStereo())\n # Plotting\n contor = ax.contourf(seaice_m.x, seaice_m.y, seaice_m, cmap = 'RdBu', levels = 11, norm = divnorm, transform=ccrs.SouthPolarStereo())\n ax.coastlines()\n ax.set_axis_off()\n cbar = plt.colorbar(contor)\n cbar.set_label('Trend in SIE (km$^2$ yr$^{-1}$)')\n plt.title(title)\n plt.savefig(imagefolder + seaicename + '.pdf')\n plt.show()", "def plot_seaice_predict(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/prediction/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' SIE prediction'\n\n\n# Loading Seaice Trends\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n seaice = seaice\n\n\n# Index contributions\n filename = f'processed_data/regressions/spatial_multiple/regr_{temp_decomp}_{temporal_resolution}_{dt}_{spatial_resolution}'\n dataset = xr.open_dataset(filename + '.nc')\n indicies = np.array([i for i in dataset])\n values = np.array([dataset[i].values for i in dataset])\n index_data = {}\n for indexname in indicies[:-1]:\n filename = f'{indexname}_{temp_decomp}_{temporal_resolution}_{dt}'\n index_data[indexname] = xr.open_dataset('processed_data/INDICIES/' + filename +'.nc')[indexname]\n index_data[indexname] = (index_data[indexname] - index_data[indexname].mean()) \n index_data[indexname] = index_data[indexname] / index_data[indexname].std()\n\n times = list(set.intersection(set(seaice.time.values), *(set(index_data[i].time.values)for i in indicies[:-1])))\n\n prediction = seaice.copy() * 0\n for indexname in indicies:\n if indexname in index_data.keys():\n prediction += index_data[indexname] * dataset[indexname]\n else:\n prediction += dataset[indexname]\n\n seaice = seaice.sortby('time').sel(time=times).sortby('time')\n prediction = prediction.sortby('time').sel(time=times).sortby('time')\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # ax2 = ax.twinx()\n # ax2.plot([],[])\n ln1 = ax.plot(seaice.time, (seaice[seaicename]*area/250).sum(dim = ('x', 'y')), label = 'SIE')\n ln2 = ax.plot(seaice.time, (prediction[seaicename]*area/250).sum(dim = ('x', 'y')), label = 'Prediction')\n # ax.set_xlim([min(times),max(times)])\n\n lines = ln1 + ln2\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 2, loc = 'upper right')\n\n\n data = (prediction[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10')\n data = (seaice[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#177E89')\n plt.title(title)\n\n plt.savefig(imagefolder + seaicename + '.pdf')\n plt.show()", "def plot_index_sic_timeseries(anomlous = False, temporal_resolution = 'monthly', detrend = False, imagefolder = 'images/timeseries/SIC_INDICIES', indexname = 'SAM', n = 5, seaice_source = 'nsidc'):\n output_folder = 'processed_data/'\n\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n if detrend:\n dt = 'detrended'\n else:\n dt = 'raw'\n\n filename = f'{indexname}_{temp_decomp}_{temporal_resolution}_{dt}'\n indicies = xr.open_dataset(output_folder + 'INDICIES/' + filename +'.nc')[indexname]\n data = indicies.copy()\n data = data.loc[data.time.dt.year >= 1979]\n seaicename = f'{temp_decomp}_{temporal_resolution}_{n}_{dt}'\n\n seaice = xr.open_dataset(output_folder + 'SIC/' + seaicename +'.nc')\n\n\n times = list(set.intersection(set(seaice.time.values), set(data.time.values)))\n\n seaice = seaice_area_mean(seaice.sel(time=times).sortby('time'), 1)\n data = data.sel(time=times).sortby('time')\n\n\n if seaice_source == 'ecmwf':\n seaice = xr.open_dataset(output_folder + 'ERA5/SIC/' + seaicename +'.nc')\n if seaice_source == 'ecmwf':\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice[seaicename].time.values.astype(float), seaice[seaicename].mean(dim = ('longitude', 'latitude')))\n if seaice_source == 'nsidc':\n mean_seaice = seaice_area_mean(seaice[seaicename],1)\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(mean_seaice.time.values.astype(float), mean_seaice)\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n title += dt + ' '\n\n title += temporal_resolution\n title += f' mean {indexname} and SIC'\n fig, ax = plt.subplots()\n ax2 = plt.twinx(ax)\n ax2.plot([],[])\n\n if anomlous or detrend: ax.axhline(0, alpha = 0.5)\n\n ln1 = ax.plot(data.time, data, label = f'{indexname}', color = '#EA1B10')\n ax.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10')\n if seaice_source == 'ecmwf':\n ln2 = ax2.plot(seaice.time, seaice[seaicename].mean(dim = ('longitude', 'latitude')), label = 'SIC', color = '#177E89')\n if seaice_source == 'nsidc':\n ln2 = ax2.plot(mean_seaice.time, mean_seaice, label = 'SIC', color = '#177E89')\n ax2.plot(seaice.time, seaice_m * seaice.time.values.astype(float) + seaice_b, color = '#177E89')\n\n yabs_max = abs(max(ax.get_ylim(), key=abs))\n ax.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n if anomlous or detrend:\n yabs_max = abs(max(ax2.get_ylim(), key=abs))\n ax2.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n\n # ylabels\n ax.set_ylabel(f'{indexname}')\n ax2.set_ylabel(f'Mean SIC')\n\n # legend\n lines = ln1 + ln2\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 2, loc = 'upper right')\n\n plt.title(title)\n plt.savefig(imagefolder + f'/SIC_{indexname}_{filename}_{seaice_source}' + '.pdf')\n plt.show()", "def snr(mag=20, itime=1., read=24.5, sky=8.43, npix=24., zero=26.44, dark=0.0):\n # 2009-02-20 14:40 IJC: Initiated\n \n star = itime * 10**(0.4*(zero-mag))\n noise = npix * (itime*(sky+dark)+read**2)\n\n return star * (star+noise)**-0.5", "def writeNoise(self):\n\n if (self.noise_file == None or self.noise_file == \"\"):\n return\n ofname = self.noise_file\n ofh = open(ofname,'w')\n\n # these have to be there as long as we've read the FAST file already\n ## not true: we don't store these in the dict.\n have_data = False\n if (\"TipRad\" in self.fstDict and 'TowerHt' in self.fstDict and 'Twr2Shft' in self.fstDict):\n tiprad = self.fstDict['TipRad']\n towerht = self.fstDict['TowerHt']\n twr2shft = self.fstDict['Twr2Shft']\n have_data = True\n\n for line in self.lines_noise:\n if (have_data and line.find('Observer location') >= 0):\n xdist = -1.0 * (tiprad + (towerht + twr2shft))\n ofh.write('{:.1f} 0.0 0.0'.format(xdist))\n ofh.write(' (x,y,z) Observer location in tower-base coordinate system. Use -(RotRad+HubHt)\\n')\n else:\n ofh.write(line)\n ofh.close()", "def generate_nrrd(file):\n for root, dirs, files in os.walk(file):\n path = root.split(file)\n if path[1] != \"\":\n patient_id = int(path[1].split('/')[1][3:])\n path = file + \"_nrrd\" + path[1]\n # if path.find(\"frisk\")==-1 & path.find(\"M+\")==-1 & path.find('T2M')==-1:\n if (path.find('T2M') != -1 & path.find('frisk') ==\n -1 & path.find('+') == -1\n ) or path.find('masks') != -1: # Only T2M or mask can be found\n os.makedirs(path, exist_ok=True)\n print(path)\n Nrrd = ImageCollection(os.path.join(root, \"*.tiff\"),\n plugin='tifffile',\n load_func=convert_to_gray)\n Nrrd = np.asarray(Nrrd)\n\n if get_image_info(patient_id):\n print(patient_id)\n (spacings, thickness) = get_image_info(patient_id)\n thicknesses = [float('nan'), float('nan'), thickness]\n spacing_direction = np.eye(3)\n # Note: All header fields are specified in Fortran order,\n # per the NRRD specification, regardless of the index order. For example,\n # a C-ordered array with shape (60, 800, 600) would have a sizes field of (600, 800, 60).\n if len(Nrrd) > 0:\n header = {\n 'spacings': spacings,\n 'thicknesses': thicknesses\n \n }\n nrrd.write(os.path.join(path,\n str(patient_id) + '.nrrd'),\n Nrrd,\n header,\n index_order='C')", "def plot_snr(tseries, lb=0, ub=None, fig=None):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n ax_spectra = fig.add_subplot(1, 2, 1)\r\n ax_snr_info = fig.add_subplot(1, 2, 2)\r\n\r\n A = []\r\n info = []\r\n s_n_r = []\r\n coh = []\r\n noise_spectra = []\r\n signal_spectra = []\r\n #If you only have one channel, make sure that everything still works by\r\n #adding an axis\r\n if len(tseries.data.shape) < 3:\r\n this = tseries.data[np.newaxis, :, :]\r\n else:\r\n this = tseries.data\r\n\r\n for i in range(this.shape[0]):\r\n A.append(nta.SNRAnalyzer(ts.TimeSeries(this[i],\r\n sampling_rate=tseries.sampling_rate)))\r\n info.append(A[-1].mt_information)\r\n s_n_r.append(A[-1].mt_snr)\r\n coh.append(A[-1].mt_coherence)\r\n noise_spectra.append(A[-1].mt_noise_psd)\r\n signal_spectra.append(A[-1].mt_signal_psd)\r\n\r\n freqs = A[-1].mt_frequencies\r\n\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, lb, ub)\r\n freqs = freqs[lb_idx:ub_idx]\r\n\r\n coh_mean = np.mean(coh, 0)\r\n snr_mean = np.mean(s_n_r, 0)\r\n info_mean = np.mean(info, 0)\r\n n_spec_mean = np.mean(noise_spectra, 0)\r\n s_spec_mean = np.mean(signal_spectra, 0)\r\n\r\n ax_spectra.plot(freqs, np.log(s_spec_mean[lb_idx:ub_idx]), label='Signal')\r\n ax_spectra.plot(freqs, np.log(n_spec_mean[lb_idx:ub_idx]), label='Noise')\r\n ax_spectra.set_xlabel('Frequency (Hz)')\r\n ax_spectra.set_ylabel('Spectral power (dB)')\r\n\r\n ax_snr_info.plot(freqs, snr_mean[lb_idx:ub_idx], label='SNR')\r\n ax_snr_info.plot(np.nan, np.nan, 'r', label='Info')\r\n ax_snr_info.set_ylabel('SNR')\r\n ax_snr_info.set_xlabel('Frequency (Hz)')\r\n ax_info = ax_snr_info.twinx()\r\n ax_info.plot(freqs, np.cumsum(info_mean[lb_idx:ub_idx]), 'r')\r\n ax_info.set_ylabel('Cumulative information rate (bits/sec)')\r\n return fig", "def noe_analysis():\n\n files = ['f34k_7p2_noe_heights.txt', 'dphs_7p4_noe_heights.txt']\n proteins = ['f34k','dphs']\n\n heights = [nmrfn.parse_noe(f) for f in files]\n heights = pd.concat(heights, axis=1, keys=proteins)\n\n results = [noe_calcs(heights[i]) for i in proteins]\n results = pd.concat(results, axis=1, keys=proteins)\n\n fig = plt.figure(figsize=(10,6))\n val = plt.subplot(211)\n dif = plt.subplot(212)\n \n seqmin = 6\n seqmax = 144\n noemin = 0.5\n noemax = 0.95\n\n dy = results.dphs.noe - results.f34k.noe\n dif_plot(val, dy.index, dy.ix[8:141], seqmin, seqmax)\n noe_plot(proteins, results, seqmin, seqmax, noemin, noemax, xax=False)\n\n s1 = plt.Rectangle((0, 0), 1, 1, fc=proteins['f34k'])\n s2 = plt.Rectangle((0, 0), 1, 1, fc=proteins['dphs'])\n val.legend([s1, s2], ['F34K', '∆+PHS'], loc=8)\n\n title = 'HN NOE: F34K pH 7.2 compared with ∆+PHS pH 7.4'\n savefile = 'f34k_7p2_noe_analysis.pdf'\n save_plot(title, savefile)", "def plot_seaice_timeseries(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/SIC/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' mean SIC in Antarctica'\n\n\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n\n if seaice_source == 'nsidc':\n seaice = seaice\n mean_seaice = seaice_area_mean(seaice[seaicename],1)\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(mean_seaice.time.values.astype(float), mean_seaice)\n if seaice_source =='ecmwf':\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice[seaicename].time.values.astype(float), seaice[seaicename].sum(dim = ('longitude', 'latitude')))\n ax = plt.gca()\n if anomlous or detrend: ax.axhline(0, alpha = 0.5)\n if seaice_source == 'nsidc':\n mean_seaice = seaice_area_mean(seaice[seaicename],1)\n plt.plot(seaice.time, mean_seaice)\n\n if seaice_source == 'ecmwf':\n plt.plot(seaice.time, seaice[seaicename].mean(dim = ('longitude', 'latitude')))\n plt.plot(seaice.time, (seaice_m * seaice.time.values.astype(float) + seaice_b), color = '#177E89')\n plt.title(title)\n plt.savefig(imagefolder + seaicename+f'_{seaice_source}.pdf')\n plt.show()", "def SaveNIFTI(data, file_path):\n if(np.iscomplex(data).any()):\n data = abs(data)\n nii = nib.Nifti1Image(data, np.eye(4)) \n nib.save(nii, file_path)", "def plotSate(s,i,seed):\r\n fig, ax = plt.subplots()\r\n\r\n im = ax.imshow(s)\r\n\r\n plt.xticks([i for i in range(dim)], \"\")\r\n plt.yticks([i for i in range(dim)], \"\")\r\n\r\n fig.tight_layout()\r\n plt.savefig(\"Systems/\" + str(dim) + \"_\" + str(seed) + \"/Images/\" + str(i) +\r\n \".jpeg\",quality=80,optimize=True,\r\n dpi=80,progressive=True,transparent=True)\r\n fig.clear()\r\n plt.close(fig)", "def fun_cnoise_Stim(self, t_stim = 10*s, sexp = 0, cutf = 0, do_csd = 1, t_qual = 0, freq_used = np.array([]), K_mat_old = np.array([]), inh_factor = [1], onf = None, equi = 0):\n self.barrier() # wait for other nodes\n \n filename = str(self.pickle_prefix) + \"_results_pop_cnoise.p\"\n filepath = self.data_dir + \"/\" + filename\n \n if self.id == 0: print \"- filepath:\", filepath \n \n if self.do_run or (os.path.isfile(filepath) is False):\n\n tstart = 0; \n fs = 1 / self.dt # sampling rate \n fmax = fs / 2 # maximum frequency (nyquist)\n \n t_noise = arange(tstart, t_stim, self.dt) # create stimulus time vector, make sure stimulus is even!!!\n\n #print self.syn_ex_dist\n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_ex_dist == []):\n for nt in range(self.n_celltypes): # loop over all cells\n #print \"nt\", nt\n if hasattr(self.cells[nt][0], 'input_vec'):\n self.syn_ex_dist.append([1] * len(self.cells[nt][0].input_vec)) # default ex for all by default!!!\n else: \n self.syn_ex_dist.append([1] * self.n_syn_ex[nt]) # default ex for all by default!!!\n \n #print self.syn_ex_dist\n \n if (self.syn_ex_dist[0] == []):\n nemax = 1\n else:\n nemax = max([item for sublist in self.syn_ex_dist for item in sublist])\n \n if (self.syn_inh_dist == []): # and (any(self.n_syn_inh) > 0)\n for nt in range(self.n_celltypes): # loop over all cells\n self.syn_inh_dist.append([0] * self.n_syn_inh[nt]) # default no inh for all by default!!!\n \n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_inh_dist[0] == []):\n nimax = 0\n else:\n nimax = max([item for sublist in self.syn_inh_dist for item in sublist]) \n \n #print \"self.syn_inh_dist, self.syn_ex_dist\", self.syn_inh_dist, self.syn_ex_dist\n \n n_noise = max([nemax,nimax]) # number of noise sources\n #print n_noise,nemax,nimax\n # create reproduceable input\n noise_data = []\n\n for nj in range(n_noise):\n \n if self.id == 0: # make sure all have the same signal !!!\n if len(freq_used) == 0: \n noise_data0 = create_colnoise(t_noise, sexp, cutf, self.seed+nj, onf = onf)\n else:\n noise_data0, _, _, _ = create_multisines(t_noise, freq_used) # create multi sine signal\n else:\n noise_data0 = np.empty(len(t_noise), dtype=np.float64)\n\n noise_data0 = self.broadcast(noise_data0, fast = True) \n \n noise_data.append(noise_data0)\n noise_data0 = [] \n \n noise_data_points = len(noise_data[0]) \n\n # Create signal weight vector inh_factor if it is not fully given\n if len(noise_data) > len(inh_factor):\n inh_factor = [inh_factor[0]] * len(noise_data) \n print \"inh_factor:\", inh_factor\n\n #if equi:\n #pass\n # tstop = t_stim\n \n if max(self.n_syn_ex) == 0: # this means current input\n \n self.set_IStim() # sets amp\n \n if self.fluct_s != []:\n if self.fluct_s[self.a_celltype[0]] > 0:\n if self.id == 0: print \"- adding i fluct\"\n self.connect_fluct()\n \n for i, m in enumerate(self.method_interpol):\n if \"syn\" in m: self.method_interpol[i] = \"syn \" + str(self.syn_tau1/ms) + \"/\" + str(self.syn_tau2/ms) + \"ms\"\n if \"bin\" in m: self.method_interpol[i] = \"bin \" + str(self.bin_width/ms) + \"ms\"\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, self.amp[self.a_celltype[0]], ihold = 0, delay_baseline = self.delay_baseline) # , tail_points = 0\n stimulus.append(stimulus0)\n tstop = t[-1]\n \n self.set_IPlay2(stimulus, t)\n if self.id == 0: print \"- starting colored noise transfer function estimation! with amp = \" + str(np.round(self.amp[self.a_celltype[0]],4)) + \", ihold = \" + str(np.round(self.ihold[self.a_celltype[0]],4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n else:\n\n self.give_freq = False\n ihold = self.set_i(self.ihold) # just sets amp, ihold should not change! \n\n if 'gsyn_in' not in self.method_interpol: \n pass\n else:\n self.g_syn_ex = [1]*len(self.N)\n \n \n if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):\n if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):\n if self.id == 0: print \"- adding g fluct\"\n self.connect_gfluct(E_i=-65)\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) # self.amp\n stimulus.append(stimulus0)\n \n noise_data = [] \n tstop = t[-1]\n \n if self.N[self.a_celltype[0]] > 1:\n self.set_IStim(ihold = [0]*self.n_celltypes, ihold_sigma = [0]*self.n_celltypes, random_start = True, tstart_offset = 1)\n if self.id == 0: print \"- add random start\"\n \n #print \"Enter Synplay()\"\n self.set_SynPlay(stimulus, t, t_startstop = t_startstop) \n #print \"Exit Synplay()\"\n\n if self.id == 0: print \"- starting colored noise transfer function estimation with synaptic input! with amp = \" + str(np.round(self.amp,4)) + \", ihold = \" + str(np.round(self.ihold,4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n amp_vec = []\n mag_vec = [] \n pha_vec = []\n freq_used = []\n ca = []\n SNR_mat = []\n VAFf_mat = []\n Qual_mat = []\n CF_mat = [] \n VAF_mat = []\n stim = []\n stim_re_mat = []\n resp_mat = []\n current_re = []\n ihold1 = []\n tk = []\n K_mat = []\n gsyn_in = []\n fmean = []\n fmax = [] \n fmstd = [] \n fcvm = [] \n fmeanA = []\n fmaxA = [] \n fmstdA = [] \n fcvmA = [] \n t_all_vec_input_sorted = []\n id_all_vec_input_sorted = []\n \n if (self.id == 0) and (max(self.n_syn_ex) > 0):\n print range(self.n_celltypes), np.shape(self.t_all_vec_input)\n for l in range(self.n_celltypes): \n ie = argsort(self.t_all_vec_input[l]) \n t_all_vec_input_sorted.append( self.t_all_vec_input[l][ie] )\n id_all_vec_input_sorted.append( self.id_all_vec_input[l][ie].astype(int) )\n \n #if (self.id == 0): \n # print self.g_syn_ex\n # print np.array(self.g_syn_ex)>= 0\n \n #print \"g_syn_ex:\",self.g_syn_ex\n if np.array(np.array(self.g_syn_ex)>= 0).any():\n \n if hasattr(self.cells[self.a_celltype[0]][0], 'get_states') and equi:\n print \"- Equilibrate!\"\n self.run(tstop, do_loadstate = False)\n m = md5.new()\n cell_exe_new = self.cell_exe[0]\n m.update(cell_exe_new)\n filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'\n self.cells[self.a_celltype[0]][0].get_states(filename)\n else:\n self.run(tstop, do_loadstate = False)\n \n i_startstop = []\n \n results = self.get(t_startstop, i_startstop) \n time = results.get('time')\n current = results.get('current') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n gsyn = results.get('gsyn') \n freq_times = results.get('freq_times')\n spike_freq = results.get('spike_freq')\n t_all_vec_vec = results.get('t_all_vec_vec')\n id_all_vec_vec = results.get('id_all_vec_vec')\n gsyns = results.get('gsyns')\n gsyn_in = results.get('gsyn_in')\n \n fmax = results.get('fmax')\n fmstd = results.get('fmstd')\n fcvm = results.get('fcvm')\n \n fmeanA = results.get('fmeanA') \n fmaxA = results.get('fmaxA')\n fmstdA = results.get('fmstdA')\n fcvmA = results.get('fcvmA')\n \n fbaseA = results.get('fbaseA') \n fbase = results.get('fbase')\n fbstdA = results.get('fbstdA')\n \n \n else: # do not run, analyse input!!!\n \n time = t\n voltage = []\n for l in range(self.n_celltypes): \n voltage.append(np.zeros(len(t)))\n current = []\n \n freq_times = []\n spike_freq = []\n gsyn = []\n gsyn_in = []\n \n t_all_vec_vec = []\n id_all_vec_vec = []\n \n fmean = []\n fmax = []\n fmstd = []\n fcvm = []\n fstdm = []\n \n fmeanA = []\n fmaxA = []\n fmstdA = []\n fcvmA = []\n fbaseA = []\n fbase = []\n fbstdA = []\n \n if self.id == 0:\n \n current = self.n_train_ex\n \n #t_all_vec = self.t_all_vec_input\n #id_all_vec = self.id_all_vec_input\n\n #ie = argsort(t_all_vec) \n #t_all_vec_vec.append( t_all_vec[ie] )\n #id_all_vec_vec.append( id_all_vec[ie].astype(int) )\n \n t_all_vec_vec = t_all_vec_input_sorted\n id_all_vec_vec = id_all_vec_input_sorted\n \n freq_times = arange(0, tstop, self.bin_width)\n spike_freq = np.zeros(len(freq_times))\n \n for j in self.a_celltype:\n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)\n\n if self.tau2_ex[0] > 0:\n spike_freq = np.concatenate((zeros(1),num_spikes)) \n print \"NOSYN TEST: start convolution with Ksyn\"\n Ksyn = syn_kernel(arange(0,10*self.tau2_ex[0],self.bin_width), self.tau1_ex[0], self.tau2_ex[0]) \n Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))\n spike_freq = np.convolve(Ksyn, spike_freq, mode='same')\n print \"NOSYN TEST: convolution finished\"\n else:\n\n if isinstance(self.factor_celltype[j], ( int, long ) ):\n f = self.factor_celltype[j] \n else:\n f = self.factor_celltype[j][0] \n \n spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width\n\n fmean.append(self.fmean_input)\n fmax.append(self.fmax_input) \n fmstd.append(self.fmstd_input) \n fcvm.append(self.fcvm_input) \n fstdm.append(self.fstdm_input)\n\n if self.no_fmean == True:\n fmean.append(ihold)\n \n #plt.figure('spike_freq') \n #plt.plot(freq_times, spike_freq)\n #plt.savefig(\"./figs/Pub/Spike_freq_\" + str(self.pickle_prefix) + \".pdf\", dpi = 300, transparent=True) # save it \n #plt.clf()\n \n fmeanA = fmean[0]\n fmaxA = fmax[0]\n fmstdA = fmstd [0] \n fcvmA = fcvm[0]\n fstdmA = fstdm[0]\n \n \n if self.id == 0: \n \n if any([i<0 for i in inh_factor]):\n \n p0 = []\n inhf_idx = []\n for i, inhf in enumerate(inh_factor):\n if inhf < 0: \n p0.append(0) \n inhf_idx.append(i)\n \n plsq = fmin(self.residuals_compute_Transfer, p0, args=(stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor))\n p = plsq\n \n ip = 0\n for i in inhf_idx:\n inh_factor[i] = p[ip]\n ip += 1\n \n\n print \"Final inh_factor: \", inh_factor\n \n \n results = self.compute_Transfer(stimulus, spike_freq = spike_freq, freq_times = freq_times, \n t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, \n do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor=inh_factor)\n \n mag_vec, pha_vec, ca, freq, freq_used, fmean_all = results.get('mag_mat'), results.get('pha_mat'), results.get('ca_mat'), results.get('freq'), results.get('freq_used'), results.get('fmean') \n SNR_mat, VAFf_mat, Qual_mat, CF_mat, VAF_mat = results.get('SNR_mat'), results.get('VAFf_mat'), results.get('Qual_mat'), results.get('CF_mat'), results.get('VAF_mat') \n stim, resp_mat, stim_re_mat, tk, K_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat'), results.get('tk'), results.get('K_mat') \n \n \n self.barrier() # wait for other nodes\n \n \n if self.id == 0:\n \n if t_qual > 0:\n #print t_startstop[0], t_startstop[0]/self.dt, (t_startstop[0]+t_qual)/self.dt\n current_re = current[int(t_startstop[0]/self.dt):int((t_startstop[0]+t_qual)/self.dt)]\n current_re = current_re[int(len(K_mat[self.a_celltype[0]])):int(len(current_re))-int(len(K_mat[self.a_celltype[0]]))]\n \n if len(self.i_holdrs) > 0:\n ihold1 = self.i_holdrs[self.a_celltype[0]][0]\n else:\n ihold1 = []\n \n for l in range(len(self.method_interpol)): # unwrap \n pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase\n \n # only return fraction of actual signal, it is too long!!! \n if time[-1] > self.tmax: \n imax = -1*int(self.tmax/self.dt)\n time = time[imax:]; current = current[imax:]; gsyn = gsyn[imax:]; gsyn_in = gsyn_in[imax:]\n for n in range(self.n_celltypes): \n voltage[n] = voltage[n][imax:]\n \n if freq_times != []: \n if freq_times[-1] > self.tmax:\n imax2 = where(freq_times > self.tmax)[0][0] # for spike frequency \n freq_times = freq_times[0:imax2]; spike_freq = spike_freq[0:imax2] \n \n bvec = [\"_syn\" in st for st in self.method_interpol]\n if np.any(bvec):\n # normalize synaptic integration with others \n mag_vec[1,:]= mag_vec[0,0]*mag_vec[1,:]/mag_vec[1,0] \n \n if self.id == 0: print \"start pickle\"\n \n results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage,'tk':tk,'K_mat':K_mat, 'ihold1': ihold1, 't_startstop':t_startstop, #'stimulus':stimulus,\n 'current':current,'t1':time,'freq_times':freq_times,'spike_freq':spike_freq, 'stim':stim, 'stim_re_mat':stim_re_mat, 'resp_mat':resp_mat, 'current_re':current_re, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fbaseA':fbaseA, 'fbase':fbase, 'fbstdA':fbstdA,\n 'fmean':fmean,'method_interpol':self.method_interpol, 'SNR':SNR_mat, 'VAF':VAFf_mat, 'Qual':Qual_mat, 'CF':CF_mat, 'VAFs':VAF_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'inh_factor':inh_factor, 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec} \n \n if self.id == 0:\n if self.dumpsave == 1:\n pickle.dump( results, gzip.GzipFile( filepath, \"wb\" ) )\n print \"pickle done\" \n \n \n if self.plot_train:\n \n for a in self.a_celltype:\n\n #i_start = mlab.find(t_all_vec_vec[a] >= 0)[0]\n #i_stop = mlab.find(t_all_vec_vec[a] >= 5)[0]\n \n #t_all_cut = t_all_vec_vec[a][i_start:i_stop]\n #id_all_cut = id_all_vec_vec[a][i_start:i_stop]\n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(use_spikes,use_id,'|', ms=2)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n \n plt.clf()\n \n if len(t_all_cut) > 0:\n \n tbin = 100*ms\n tb = np.arange(0,t[-1],tbin)\n [all_rate, _] = neuronpy.util.spiketrain.get_histogram(t_all_cut, bins = tb)\n all_rate = np.concatenate((np.zeros(1),all_rate)) / self.N[a] / tbin\n \n plt.figure('results_train2') \n plt.plot(tb,all_rate)\n plt.savefig(\"./figs/Pub/PSTH_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n plt.figure('results_noise') \n plt.plot(time,current)\n plt.savefig(\"./figs/Pub/Noise_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n \n if self.plot_input:\n \n if len(t_all_vec_input_sorted[0]) > 0:\n \n i_start = mlab.find(t_all_vec_input_sorted[0] >= 0)[0]\n i_stop = mlab.find(t_all_vec_input_sorted[0] >= 5)[0]\n \n t_all_cut = t_all_vec_input_sorted[0][i_start:i_stop]\n id_all_cut = id_all_vec_input_sorted[0][i_start:i_stop]\n \n plt.figure('results_input') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(t_all_cut,id_all_cut,'|', ms=2)\n plt.text(0.5, 1.1, r'fmean=' + str(round(self.fmean_input,1)) + ',fmax=' + str(round(self.fmax_input,1)) + ',fmstd=' + str(round(self.fmstd_input,1)) + ',fcvm=' + str(round(self.fcvm_input,1)) + ',fstdm=' + str(round(self.fstdm_input,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Input_\" + str(self.pickle_prefix) + \"_N\" + str(self.N[self.a_celltype[0]]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n\n else:\n \n if self.id == 0:\n results = pickle.load( gzip.GzipFile( filepath, \"rb\" ) )\n \n #print results\n #print {key:np.shape(value) for key,value in results.iteritems()}\n \n if self.minimal_dir: # save only info needed for plot\n \n print {key:np.shape(value) for key,value in results.iteritems()}\n \n if \"Fig6_pop_transfer_grc_syngr_nsyn4_cn_a1_noisesynlow_inhlow_adjfinh_varih_N100_CFo6.0_results_pop_cnoise.p\" in filename:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = [] \n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_is0.14_CFo9.0_results_pop_cnoise.p\" in filename) \\\n :\n\n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_is0.14_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo14.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo14.0_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig4_pop_transfer_grc_cn_addn100_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4_pop_transfer_grc_cn_addn1_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_twopop_N[50, 50]_CF[0.0055, 0.0055]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_N[100]_CF[0.0055]_amod[None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_twopop_N[50, 50]_CF[0.0051, 0.0051]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_N[100]_CF[0.0051]_amod[None]_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig2_pop_transfer_\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n \n else:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n\n print {key:np.shape(value) for key,value in results.iteritems()}\n\n pickle.dump( results, gzip.GzipFile( self.minimal_dir + \"/\" + filename, \"wb\" ) ) \n \n else:\n results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 'tk':[],'K_mat':[], 'ihold1':[], 't_startstop':[], #'stimulus':[],\n 'current':[],'t1':[],'freq_times':[],'spike_freq':[], 'stim':[], 'stim_re_mat':[], 'current_re':[], 'gsyn_in':[], 'fmeanA':[], 'fmaxA':[], 'fmstdA':[], 'fcvmA':[], 'fbaseA':[], 'fbase':[], 'fbstdA':[],\n 'fmean':[],'method_interpol':self.method_interpol, 'SNR':[], 'VAF':[], 'Qual':[], 'CF':[], 'VAFs':[], 'fmax':[], 'fmstd':[], 'fcvm':[], 'inh_factor':[], 't_all_vec_vec':[], 'id_all_vec_vec':[]} \n \n if self.id == 0: \n\n if self.plot_train: \n\n for a in self.a_celltype:\n \n t1 = results.get('t1') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n fmax = results.get('fmax') \n fmstd = results.get('fmstd') \n \n \n if results.has_key('t_all_vec_vec'):\n \n if len(results['t_all_vec_vec']) > 0: \n t_all_vec_vec = results.get('t_all_vec_vec') \n id_all_vec_vec = results.get('id_all_vec_vec') \n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax97 = plt.subplot(1,1,1)\n ax97.plot(use_spikes,use_id,'|', ms=6)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax97.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n\n \n plt.figure('results_voltage') \n ax99 = plt.subplot(2,1,1)\n ax99.plot(t1,voltage[a])\n \n t_noise = arange(0, t_stim, self.dt)\n noise_data = create_colnoise(t_noise, sexp, cutf, 50, onf = onf)\n stimulus, t, t_startstop = construct_Stimulus(noise_data, 1/self.dt, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) \n ax98 = plt.subplot(2,1,2)\n ax98.plot(t[0:10/self.dt],stimulus[0:10/self.dt],color='k')\n \n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Voltage_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.show()\n plt.clf()\n \n if (self.id == 0) and (do_csd == 1):\n Qual = results.get('Qual') \n for i, ii in enumerate(self.method_interpol):\n print \"\\n[QUAL:] Interpol:\", ii, \"SNR0:\", Qual[i,0,0], \"SNR_cutff:\", Qual[i,0,1], \"SNR_mean:\", Qual[i,0,2], \"\\n VAF0:\", Qual[i,1,0], \"VAF_cutff:\", Qual[i,1,1], \"VAF_mean:\", Qual[i,1,2], \"\\n CF(subtracted):\", Qual[i,2,0], \"VAF(subtracted):\", Qual[i,2,1] \n \n VAF = results.get('VAF')\n freq_used = results.get('freq_used') \n iend = mlab.find(freq_used >= self.xmax)[0] \n print 'm(VAF)=' + str(np.mean(VAF[1][0,0:iend])) \n \n self.barrier() # wait for other nodes\n \n return results", "def plot_sic_sic_timeseries(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/SIC/'):\n output_folder = 'processed_data/'\n\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n if detrend:\n dt = 'detrended'\n else:\n dt = 'raw'\n\n filename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n indicies = xr.open_dataset(output_folder + 'ERA5/SIC/' + filename +'.nc')[filename].mean(dim = ('longitude', 'latitude'))\n data = indicies.copy()\n data = data.loc[data.time.dt.year >= 1979]\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + 'SIC/' + seaicename +'.nc')[seaicename]\n\n times = list(set.intersection(set(seaice.time.values), set(data.time.values)))\n\n seaice = seaice_area_mean(seaice.sel(time=times).sortby('time'), 1)\n data = data.sel(time=times).sortby('time')\n\n seaice = seaice_area_mean(seaice,1)\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice.time.values.astype(float), seaice)\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n title += dt + ' '\n\n title += temporal_resolution\n title += f' mean ERA5 and SIC'\n fig, ax = plt.subplots()\n ax2 = plt.twinx(ax)\n ax2.plot([],[])\n\n if anomlous or detrend: ax.axhline(0, alpha = 0.5)\n\n ln1 = ax.plot(data.time, data, label = f'ERA5', color = '#EA1B10')\n ax.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10')\n ln2 = ax2.plot(seaice.time, seaice, label = 'SIC', color = '#177E89')\n ax2.plot(seaice.time, seaice_m * seaice.time.values.astype(float) + seaice_b, color = '#177E89')\n\n if anomlous or detrend:\n yabs_max = abs(max(ax2.get_ylim(), key=abs))\n ax2.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n\n yabs_max = abs(max(ax.get_ylim(), key=abs))\n ax.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n\n # ylabels\n ax.set_ylabel(f'ECMWF')\n ax2.set_ylabel(f'Mean SIC')\n\n # legend\n lines = ln1 + ln2\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 2, loc = 'upper right')\n\n plt.title(title)\n plt.savefig(imagefolder + f'/SIC_ERA5_{filename}' + '.pdf')\n plt.show()", "def TICwriter(TIC, dataFile, saveDirectory):\n #Create savename from data file name:\n savefile = dataFile.split('/')[-1].split('.')[0] + '_TIC.png'\n #Create ouput directory:\n saveDirectory = os.path.join(saveDirectory, 'output/')\n os.makedirs(os.path.dirname(saveDirectory), exist_ok=True)\n #Plot figure:\n Plot = pl.figure()\n TICplot = Plot.add_subplot(111)\n TICplot.plot([d[0] for d in TIC], [d[1] for d in TIC])\n \n #Save and close plot:\n pl.savefig(saveDirectory + savefile)\n pl.close(Plot)", "def fn_intensitytrace(file_name,folder,data,time,i,x,y):\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n figure_name=file_name+'_intensity_trace'\n for a in range(0,len(data),int(len(data)/2)):\n if i==a:\n x_coord=x[i+1]\n y_coord=y[i+1]\n max_int=np.max(data[i])\n min_int=np.min(data[i])\n #norm_int = [b / max_int for b in data[i]]\n plt.figure()\n #plt.plot(time[0:len(time)-1],norm_int,'g')\n plt.plot(time[0:len(time)-1],data[i],'g')\n plt.xlim(0, 100)\n plt.ylim(min_int, (max_int+100))\n plt.xlabel('Time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Photon counts (photons)', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.png', dpi=500)\n\n return (plt.show())", "def plot_TSNE(fig_name):\n dir = \"log/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n utils.construct_set(dir, pattern, pattern_valid)\n X, y = utils.load_all()\n utils.plot_TSNE(X, y)\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')", "def process_nitrate(store, site):\n constituent = 'NitrateSurr'\n db_path = '/said/{}/'.format(site['id'])\n iv_path = db_path + 'iv'\n df = store.get(iv_path)\n\n n_error = np.maximum(0.5, df[constituent]*.1)\n df[constituent+'_U90.0'] = df[constituent] + n_error\n df['NitrateSurr_L90.0'] = df.NitrateSurr - n_error\n #clip values below 0\n df['NitrateSurr_L90.0'] = np.maximum(0, df['NitrateSurr_L90.0'])\n\n update_table(store, iv_path, df)", "def snr_stats(\r\n t,\r\n y,\r\n period,\r\n duration,\r\n T0,\r\n transit_times,\r\n transit_duration_in_days,\r\n per_transit_count,\r\n):\r\n\r\n snr_per_transit = numpy.zeros([len(transit_times)])\r\n snr_pink_per_transit = numpy.zeros([len(transit_times)])\r\n intransit = transit_mask(t, period, 2 * duration, T0)\r\n flux_ootr = y[~intransit]\r\n\r\n try:\r\n pinknoise = pink_noise(flux_ootr, int(numpy.mean(per_transit_count)))\r\n except:\r\n pinknoise = numpy.nan\r\n\r\n # Estimate SNR and pink SNR\r\n # Second run because now the out of transit points are known\r\n if len(flux_ootr) > 0:\r\n std = numpy.std(flux_ootr)\r\n else:\r\n std = numpy.nan\r\n for i in range(len(transit_times)):\r\n mid_transit = transit_times[i]\r\n tmin = mid_transit - 0.5 * transit_duration_in_days\r\n tmax = mid_transit + 0.5 * transit_duration_in_days\r\n if numpy.isnan(tmin) or numpy.isnan(tmax):\r\n idx_intransit = []\r\n mean_flux = numpy.nan\r\n else:\r\n idx_intransit = numpy.where(numpy.logical_and(t > tmin, t < tmax))\r\n if len(y[idx_intransit]) > 0:\r\n mean_flux = numpy.mean(y[idx_intransit])\r\n else:\r\n mean_flux = numpy.nan\r\n\r\n intransit_points = numpy.size(y[idx_intransit])\r\n try:\r\n snr_pink_per_transit[i] = (1 - mean_flux) / pinknoise\r\n if intransit_points > 0 and not numpy.isnan(std):\r\n std_binned = std / intransit_points ** 0.5\r\n snr_per_transit[i] = (1 - mean_flux) / std_binned\r\n else:\r\n snr_per_transit[i] = 0\r\n snr_pink_per_transit[i] = 0\r\n except:\r\n snr_per_transit[i] = 0\r\n snr_pink_per_transit[i] = 0\r\n\r\n return snr_per_transit, snr_pink_per_transit", "def writetipsy(self, outfile=None, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = self._base()\n if outfile is None: outfile = s.filename+'.gtp'\n print \"write tipsy file to \", outfile\n sout = new(star=self._nhalos) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None: hubble = s.properties['h']\n sout.properties['h'] = hubble\n ### ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc*sout.properties['h']\n lboxmpch = lboxkpc*sout.properties['h']/1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8./3.)**.5\n tipsymunitmsun = rhocrithhco * lboxmpch**3 / sout.properties['h']\n\n print \"transforming \", self._nhalos, \" halos into tipsy star particles\"\n for ii in xrange(self._nhalos):\n h = self[ii+1].properties\n sout.star[ii]['mass'] = h['m']/hubble / tipsymunitmsun\n ## tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['pos'][0][0]/lboxmpch - 0.5\n sout.star[ii]['y'] = h['pos'][0][1]/lboxmpch - 0.5\n sout.star[ii]['z'] = h['pos'][0][2]/lboxmpch - 0.5\n sout.star[ii]['vx'] = h['vel'][0][0]/tipsyvunitkms\n sout.star[ii]['vy'] = h['vel'][0][1]/tipsyvunitkms\n sout.star[ii]['vz'] = h['vel'][0][2]/tipsyvunitkms\n sout.star[ii]['eps'] = h['r']/lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n print \"writing tipsy outfile %s\"%outfile\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout", "def summarize_series(fglob, outfile):\n with open(outfile, mode='w') as of:\n #Iterate over files\n flist = glob(fglob)\n flist = sorted(flist)\n lgrho = [] #list of log(rho) values, parallel to the list of rxnmap maps\n rxns = [] #list of maps of form 'rxn_name' --> energy release [erg/g/s]\n for f in flist:\n rxnmap = {}\n currxn = ''\n eps_nuc = ''\n for line in open(f,mode='r'):\n if not currxn and line.count('reaction name') == 1:\n i1 = line.index('<') + 1\n i2 = line.index('>')\n currxn = line[i1:i2]\n elif currxn and line.count('eps_nuc') == 1:\n eps_nuc = float(line.partition(':')[2].strip())\n rxnmap[currxn] = eps_nuc\n currxn = ''\n elif line.count('log rho') == 1:\n lgrho.append(line.partition('rho')[2].strip())\n srtmap = sorted(rxnmap.items(), key=operator.itemgetter(1), reverse=True) #sort on values\n rxns.append(srtmap)\n\n #Write header\n of.write('log(rho): ' + (' {:3.3s} |'*len(lgrho)).format(*lgrho) + '\\n')\n\n #Write rows of data for each logrho, include top ten rxns\n start = ' '\n for i in range(10):\n of.write(start)\n for tup in rxns:\n of.write('{:23s}'.format(tup[i][0]))\n of.write('\\n')\n of.write(start)\n for tup in rxns:\n of.write('{:<23.8e}'.format(tup[i][1]))\n of.write('\\n\\n')", "def runErdosRenyi(n,p):\n s = z.Optimize()\n g = ig.Graph.Erdos_Renyi(n, p, directed=True, loops=True)\n while g.is_dag():\n g = ig.Graph.Erdos_Renyi(n, p, directed=True, loops=True)\n\n return MFAS_set_cover(s,g), u.get_feedback_arc_set(g)", "def simul_and_export(file, config, i):\n\n simulate_UVSPEC(file, config)\n\n load_skymap(config)\n\n sim = files_sim(config)[i]\n export_sim_rad(sim, config)", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return" ]
[ "0.5856596", "0.58353704", "0.58029324", "0.5760767", "0.57194346", "0.5685501", "0.565635", "0.5610267", "0.5596072", "0.5580801", "0.5550373", "0.55146116", "0.55122143", "0.54595554", "0.53850317", "0.53831476", "0.53568536", "0.5345135", "0.53432506", "0.5322765", "0.53195083", "0.53150904", "0.5302821", "0.52760804", "0.52736145", "0.5260181", "0.5254817", "0.5254216", "0.52344", "0.5229716" ]
0.78557074
0
Empties the models within the bag
def empty_bag(self): if self.peds is not None: for _, model in self.peds.items(): model.reset() self.drone.reset() self.subject.reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear():\n\t\tModel.counter = 0", "def ClearModels(self):\n self._modelFileNames = []\n self._models = []\n self.Modified(readAgain=True)", "def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n ModelPhenotypeTable(dbpath).empty()\n ModelScoreTable(dbpath).empty() \n self._end()", "def reset_bag(self):", "def clear(self) -> None:\n self.objects = []", "def clear(self):\n self.beginResetModel()\n self.root_item = RootItem()\n self.requests_items = {}\n self.endResetModel()", "def resetmodel(self):\n for key, value in self._dentsvertsdata.items():\n value.free()\n self._dentsvertsdata.clear()", "def clear_batch(self):\n self._batch_idx = 0\n self.variant_states = None\n self.object_specs = None\n self.object_attribute_values = None", "def clear(self):\n for ob in self.obs:\n ob.clear()\n return", "def clear(self):\r\n\t\tself.free_objects[:] = []", "def clear(self) -> None:\n # Creates a new, empty bag and assigns self.da to the new, empty bag.\n new_bag = Bag()\n self.da = new_bag.da", "def clear():\n MIGRATIONS.clear()", "def clear(self):\n self.vars = []", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def _finalize(self):\n for model in self.models:\n model._finalize()", "def clear(self):\n self._items = []", "def clear(self) -> None:\n self.saved.clear()", "def clear(self):\n\n\t\tfor chain in self.chain:\n\t\t\tchain.clear()\n\n\t\tself.chain = []\n\t\tself.remark = []", "def clear(self):\r\n self.orderitem_set.all().delete()", "def reset(self):\n self.vna.write(edit_list(self.model))\n self.vna.write(clear_list(self.model))", "def clear(self):\n self._pkcache = {}\n self._typecache = defaultdict(dict)\n self.init()", "def clear(self):\n self._store = {}", "def reset(self):\n self.entities = set()\n self.frozen = False", "def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None", "def reset(self):\n self._setupObjects()", "def reset(self):\n # Clear mutable data, but leave the immutables intact\n self.train_data = {}\n self.val_data = {}\n self.test_data = {}\n self.model_files = []\n self.custom_data = {}\n # Remove all the physical assets\n for item in os.scandir(self.root_path):\n os.remove(item.path)\n # Reserialize\n self.serialize()", "def clear(self):\n self.versions = {}\n self.clearItems()", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()" ]
[ "0.77507436", "0.76478666", "0.7637113", "0.74050826", "0.7388874", "0.73118776", "0.7252325", "0.709604", "0.7079502", "0.6967123", "0.69053054", "0.6871042", "0.68186563", "0.6815605", "0.68100023", "0.68016225", "0.6794553", "0.6774137", "0.67709094", "0.67597353", "0.6744278", "0.6739675", "0.6722856", "0.67019886", "0.6701017", "0.6696451", "0.66916823", "0.6679739", "0.6679739", "0.6679739" ]
0.8674077
0
Determines if an input is a float.
def is_float(self, input): try: float(input) return True except ValueError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True", "def isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False", "def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False", "def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def is_float(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def isfloat(s):\n try:\n x = float(s)\n return True\n except:\n return False", "def is_float(input_string):\r\n try:\r\n float(input_string)\r\n return True\r\n except ValueError:\r\n return False", "def is_float(input_string):\r\n try:\r\n float(input_string)\r\n return True\r\n except ValueError:\r\n return False", "def is_float(value):\n try:\n float(value)\n except ValueError:\n return False\n else:\n return True", "def is_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def is_float(val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def isit_float(s):\r\n try:\r\n int(s)\r\n return False\r\n except ValueError:\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def could_be_float(val):\n if val == None:\n return False\n\n if isinstance(val, float):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n f = float(val)\n if not isinstance(f, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_floatable(value):\n\n try:\n float(value)\n return True\n except:\n return False", "def is_float(string):\n try:\n return float(string)\n except ValueError:\n return False", "def isfloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isFloat(string):\n return (True)", "def is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isFloat(string):\n try: float(string)\n except ValueError: return 0\n else: return 1", "def is_valid_float(input_string):\n assert input_string is not None\n try:\n float(input_string)\n return True\n except ValueError:\n return False", "def is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_float(string: str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isfloat(string:str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_float(self, string):\n try:\n return decimal.Decimal(string)\n except decimal.DecimalException:\n return False", "def is_float(*args): \n try:\n for i in args:\n float(i)\n return True\n except Exception:\n return False", "def is_float_or_int(value):\n if type(value) is float:\n return True\n elif type(value) is int:\n return True\n else:\n return False", "def check_for_float(check):", "def isfloat(str):\n\n try:\n float(str)\n return True\t\t\t#Returns true if the string is a floating point number\n except (ValueError, TypeError):\n return False\t\t\t#Returns false otherwise", "def is_float(value):\n if isinstance(value, float):\n return True\n\n if isinstance(value, np.ndarray):\n return value.dtype == np.float64\n\n return False" ]
[ "0.82458997", "0.8190943", "0.81595534", "0.81558645", "0.81085587", "0.8093256", "0.807079", "0.807079", "0.80703425", "0.8061959", "0.8060793", "0.8011374", "0.7838578", "0.7823464", "0.77867234", "0.77559304", "0.7752695", "0.7747797", "0.7746936", "0.77243704", "0.7696662", "0.7658992", "0.7640902", "0.76312405", "0.76285326", "0.76165867", "0.7559674", "0.7531927", "0.75238013", "0.7520814" ]
0.884747
0
gpu_model_to_scale is a dict from model string to scale.
def avail_gpu_compute(self, gpu_model_to_scale): self._check_spy_stats_available() l = [] for u, model in zip(self._util.gpu_compute, self._capacity.gpu_model): found = False for k, scale in gpu_model_to_scale.items(): if k in model: found = True break if found: l.append(scale * (1 - u)) else: raise Exception('Unknown GPU model %s found on host %s' % (model, self.name)) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale_model(model, scale):\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)", "def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res", "def petab_scale_to_amici_scale(scale_str):\n\n if scale_str == 'lin':\n return amici.ParameterScaling_none\n if scale_str == 'log':\n return amici.ParameterScaling_ln\n if scale_str == 'log10':\n return amici.ParameterScaling_log10\n raise ValueError(\"Invalid pscale \" + scale_str)", "def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)", "def get_scales(min_scale=0.2, max_scale=0.9,num_layers=6):\n\n # this code follows the original implementation of wei liu\n # for more, look at ssd/score_ssd_pascal.py:310 in the original caffe implementation\n min_ratio = int(min_scale * 100)\n max_ratio = int(max_scale * 100)\n step = int(np.floor((max_ratio - min_ratio) / (num_layers - 2)))\n min_sizes = []\n max_sizes = []\n for ratio in xrange(min_ratio, max_ratio + 1, step):\n min_sizes.append(ratio / 100.)\n max_sizes.append((ratio + step) / 100.)\n min_sizes = [int(100*min_scale / 2.0) / 100.0] + min_sizes\n max_sizes = [min_scale] + max_sizes\n\n # convert it back to this implementation's notation:\n scales = []\n for layer_idx in range(num_layers):\n scales.append([min_sizes[layer_idx], np.single(np.sqrt(min_sizes[layer_idx] * max_sizes[layer_idx]))])\n return scales", "def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)", "def scale_module(module, scale):\n for p in module.parameters():\n p.detach().mul_(scale)\n return module", "def _hyperparam_to_scale(self, hyperparam):\n\n # If logscale is used, input hyperparam is log of the scale.\n if self.use_log_scale:\n scale = 10.0**hyperparam\n else:\n scale = numpy.abs(hyperparam)\n\n return scale", "def set_scale(self, motor_model):\n for driver_re, motor_dict in self.__SCALE_FACTORS_BY_MODEL.iteritems():\n if driver_re.match(self._apt.model_number) is not None:\n if motor_model in motor_dict:\n self.scale_factors = motor_dict[motor_model]\n return\n else:\n break\n # If we've made it down here, emit a warning that we didn't find the\n # model.\n logger.warning(\n \"Scale factors for controller {} and motor {} are unknown\".format(\n self._apt.model_number, motor_model\n )\n )", "def make_feature_scale_factors():\n X, y = make_X_and_y()\n sqm = make_sqm_X()\n scale_factors = {\n \"indoor_temp\": np.max(X[:,:,0]),\n \"outdoor_temp\": np.max(X[:,:,1]),\n \"gas_kwh\": np.max(X[:,:,2]),\n \"elec_kwh\": np.max(X[:,:,3]),\n \"floor_area\": np.max(sqm),\n \"htc\": np.max(y),\n }\n\n with open(os.path.join(_TRAINING_DATA_PATH, \"scalefactors.json\"), \"w+\") as f:\n json.dump(scale_factors, f)", "def _update_model_params(self, params, model_ID, model, param_grid):\n \n params = params.copy()\n param_grid = param_grid.copy()\n \n params_transform = {}\n \n for key in params.keys():\n \n if 'log10.' in key:\n log10_transform = True\n else:\n log10_transform = False\n \n key = key.replace('log10.','')\n \n type_str = str(type(param_grid[key][0]))\n \n if 'int' in type_str: \n if log10_transform:\n params_transform[key] = int(10**params['log10.'+key])\n else:\n params_transform[key] = int(params[key])\n \n elif 'float' in type_str:\n if log10_transform:\n params_transform[key] = float(10**params['log10.'+key])\n \n else:\n params_transform[key] = float(params[key])\n \n elif 'str' in type_str: #index the param grid for hyperparams using 'choice'\n params_transform[key] = param_grid[key][params[key]]\n \n if 'densenet' not in model_ID.lower(): \n model.__dict__[key] = params_transform[key]\n \n assert(type_str == str(type(params_transform[key]))), 'type(param_grid[key][0]) changed from '+type_str+' to '+str(type(param_grid[key][0]))+' after updating params for key:'+str(key)\n \n if 'str' in type_str:\n assert(params_transform[key] in param_grid[key]), 'params_transform['+key+']='+str(params_transform[key])+' is not in the list of valid parameter choices:'+str(param_grid[key])\n \n else:\n assert(params_transform[key]<=max(param_grid[key]) and params_transform[key]>=min(param_grid[key])), 'params_transform['+key+']='+str(params_transform[key])+' does not lie in the range of valid values:'+str([min(param_grid[key]),max(param_grid[key])] )\n \n if 'densenet' in model_ID.lower(): \n model = model(**params_transform)\n \n return params_transform, model", "def scale_data(x, y, x_scale_f = '../saved_models/scalers/9_params_21_2_x_scaler.pkl', y_scale_f = '../saved_models/scalers/9_params_21_2_y_scaler.pkl', par_slice = range(7) + range(8,9)):\n\tx_scaler = sklearn.externals.joblib.load(x_scale_f)\n\ty_scaler = sklearn.externals.joblib.load(y_scale_f)\n\tx_scaler.transform(x)\n\ty_scaler.transform(y)\n\tx = x[:,par_slice] \n\treturn x, y, x_scaler, y_scaler", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self):\n return self._gev_bijector.scale", "def scale(self, data: np.ndarray):\n if self.scale_type == \"min_max\":\n scaled_data = (data - self.predictor_min) / (\n self.predictor_max - self.predictor_mean\n )\n elif self.scale_type == \"normalize\":\n scaled_data = (data - self.predictor_mean) / (\n self.predictor_max - self.predictor_min\n )\n elif self.scale_type == \"standardize\":\n scaled_data = (data - self.predictor_mean) / self.predictor_std\n elif self.scale_type == \"scale\":\n scaled_data = data - self.predictor_mean\n else:\n scaled_data = data\n return scaled_data", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def task_scaling(input_array, scaling_factor):\n return(np.multiply(scaling_factor, input_array))", "def colorscale(self):\n return self[\"colorscale\"]", "def any_scale(scale):\n return scale", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")", "def yscale(value):\n impl.yscale(**locals())", "def scale_data(cube, user_input, plot_type):\n\n scaled = False\n for input_pair in user_input:\n scale_factor, user_plot_type = input_pair\n if user_plot_type == plot_type:\n assert scaled == False, \"Multiple scale factors entered for single plot type\"\n cube.data = cube.data / 10**int(scale_factor)\n cube.units = '10^%s %s' %(scale_factor, str(cube.units))\n scaled = True\n\n return cube", "def scale_uv(mesh_name, scale):\n mesh = bpy.data.meshes[mesh_name]\n if len(mesh.uv_layers) == 0:\n return\n uv_layer = mesh.uv_layers[0].data\n for uv_index in range(0, len(uv_layer)):\n uv = uv_layer[uv_index].uv\n uv *= scale", "def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)", "def _scale_independent_metrics() -> list:\n return ['mape', 'r2', 'nse']", "def scaleProcess(process,scale):\n #print '>>> scaleProcess(\"%s\",%.3f):'%(process.process(),scale)\n #print \">>> rate before = %s\"%(process.rate())\n process.set_rate(process.rate()*scale)\n #print \">>> rate after = %s\"%(process.rate())", "def _scale_to_hyperparam(self, scale):\n\n # If logscale is used, output hyperparam is log of scale.\n if self.use_log_scale:\n hyperparam = numpy.log10(numpy.abs(scale))\n else:\n hyperparam = numpy.abs(scale)\n\n return hyperparam", "def get_scale(units, compartmentId, volume, extracellularVolume):\r\n if compartmentId == 'c':\r\n V = volume\r\n else:\r\n V = extracellularVolume\r\n\r\n if units == 'uM':\r\n return 1. / N_AVOGADRO / V * 1e6\r\n elif units == 'mM':\r\n return 1. / N_AVOGADRO / V * 1e3\r\n elif units == 'molecules':\r\n return 1.\r\n else:\r\n raise Exception('Invalid units \"%s\"' % units)", "def get_scale_op(self):\n\t\treturn self.variables.get('scale')" ]
[ "0.67240673", "0.6272403", "0.5994706", "0.5941544", "0.5833461", "0.5681746", "0.5676147", "0.5382901", "0.5348", "0.53371793", "0.5301205", "0.52537143", "0.52262044", "0.51800525", "0.51736313", "0.5139863", "0.512118", "0.5083435", "0.50683033", "0.50677323", "0.50584626", "0.5052913", "0.5050851", "0.5048136", "0.50284874", "0.50180674", "0.5017098", "0.50109756", "0.5006812", "0.4985893" ]
0.6472911
1
From all the data, it takes the columns TopicID, and count the topic based on the gender
def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame: data_frame_topic = data_frame \ .filter(data_frame["Stratification1"].contains("Male")) \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") print("The following table represent the number of men group by the topic: ") data_frame_topic.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_male_female_topicsDF(data_dict, gender):\n dataDF = pd.DataFrame.from_dict(data_dict[gender], orient='index')\n outlet_gender_topicsDF = pd.json_normalize(dataDF['topic_mean'])\n outlet_gender_topicsDF.index = dataDF.index\n outlet_gender_topicsDF = outlet_gender_topicsDF.sort_index()\n outlet_gender_topicsDF = outlet_gender_topicsDF.transpose()\n return outlet_gender_topicsDF", "def construct_gender_df(data_dict):\n gender_dict = data_dict['perGenderTopics']\n topics = data_dict['topics']\n # Convert to Pandas DataFrame\n genderDF = pd.DataFrame.from_dict(gender_dict, orient='index').transpose()\n genderDF = genderDF[['female', 'male']]\n genderDF['diff'] = genderDF['female'] - genderDF['male']\n # Sort in order of the sum of mean values for each topic\n genderDF = genderDF.sort_values('diff')\n genderDF['topic'] = [f\"t{i}\" for i in genderDF.index]\n\n # Get a properly ordered list of topics based on prominence for axis labelling\n ordered_topics_dict = {idx: topics[idx] for idx in genderDF.index}\n ordered_names = [topics[idx]['name'] for idx in genderDF.index]\n top_5_words = get_top_n_words(ordered_topics_dict)\n # Get topic names for axis labels if they exist, otherwise return top-5 words per topic\n y_labels = [name if name else top_5_words[i] for i, name in enumerate(ordered_names)]\n genderDF['topic_names'] = y_labels\n return genderDF", "def topic_stats(df_topic_sents_keywords):\n\n # Number of Documents for Each Topic\n topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n # Percentage of Documents for Each Topic\n topic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n # Topic Number and Keywords\n topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n # Concatenate Column wise\n df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n # Change Column names\n df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n # Show\n df_dominant_topics", "def user_gender_statistics(df):\n print('Count of gender \\n')\n gender_counts=df['Gender'].value_counts()\n #loop through to print the total number of gender\n for index, gender_count in enumerate(gender_counts):\n print(' {}: {}'.format(gender_counts.index[index],gender_count))\n \n print()", "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics", "def count_gender(data):\n data = column_to_list(data, -2)\n male = data.count(\"Male\")\n female = data.count(\"Female\")\n return [male, female]", "def gender_word_counts(data):\n\n # We use the stopwords package from NLTK corpus.\n stop_words = set(stopwords.words('english'))\n data['tweet_words'] = data['text_cleaned'].str.split()\n # Ignoring all the stop words\n data['tweet_words'] = data['tweet_words'].apply(lambda tweet: [word for word in tweet if word not in stop_words])\n\n # Separating Male, Female and Brand profiles.\n male_profiles = data[data['gender'] == 'male']\n female_profiles = data[data['gender'] == 'female']\n brand_profiles = data[data['gender'] == 'brand']\n\n print(\"Top 20 most frequent words used by Men\")\n all_male_tweets = ' '.join(male_profiles['tweet_words'].astype(str))\n Male_words = pd.Series(all_male_tweets.split(\" \")).value_counts()[:20]\n print(Male_words)\n print()\n\n print(\"Top 20 most frequent words used by Women\")\n all_female_tweets = ' '.join(female_profiles['tweet_words'].astype(str))\n Female_words = pd.Series(all_female_tweets.split(\" \")).value_counts()[:20]\n print(Female_words)\n print()\n\n print(\"Top 20 most frequent words used by Brands\")\n all_brand_tweets = ' '.join(brand_profiles['tweet_words'].astype(str))\n Brand_words = pd.Series(all_brand_tweets.split(\" \")).value_counts()[:20]\n print(Brand_words)\n\n # Plotting horizontal bar graphs showing Top 20 tweet words used Vs. the word frequency.\n mp = Male_words.plot(kind='barh', stacked=True, colormap='plasma', title=\"Top 20 most frequently words used by Men\")\n mp.set_ylabel(\"Tweet words used by Males\")\n mp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n fp = Female_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Women\")\n fp.set_ylabel(\"Tweet words used by Females\")\n fp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n bp = Brand_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Brands\")\n bp.set_ylabel(\"Tweet words used by Brands\")\n bp.set_xlabel(\"Word Frequency\")\n plt.show()", "def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Black, non-Hispanic\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of black ethnicity people group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)", "def construct_outlet_gender_DF(data_dict):\n outlet_gender_dict = data_dict['perOutletGenderTopics']\n topics = data_dict['topics']\n male_outlet_topics = get_male_female_topicsDF(outlet_gender_dict, 'male')\n female_outlet_topics = get_male_female_topicsDF(outlet_gender_dict, 'female')\n # Plot the difference between the male-dominant and female-dominant topics\n diff = female_outlet_topics - male_outlet_topics\n # Calculate sum of all columns to decide sorting order\n diff['net'] = diff[diff.columns].sum(axis=1)\n diff = diff.sort_values('net').drop('net', axis=1)\n # Get a properly ordered list of topics based on prominence for axis labelling\n ordered_topics_dict = {idx: topics[idx] for idx in diff.index}\n ordered_names = [topics[idx]['name'] for idx in diff.index]\n top_5_words = get_top_n_words(ordered_topics_dict)\n # Get topic names for axis labels if they exist, otherwise return top-5 words per topic\n y_labels = [name if name else top_5_words[i] for i, name in enumerate(ordered_names)]\n return diff, y_labels", "def get_data_frame_count_type_of_topic(data_frame: DataFrame) -> pb.DataFrame:\n try:\n data_frame = data_frame \\\n .select(\"TopicID\", \"Question\") \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n except Py4JError:\n raise AnalysisException('One columns is incorrect')\n print(\"The following table represent the number of the type of each topic\")\n data_frame.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def init():\n\n # Reading the data from the CSV file using the latin1 encoding.\n data_read = pd.read_csv(\"gender-classifier-DFE-791531.csv\", encoding='latin1') # Dataset Size = 20050\n\n # If all the attribute values are empty for any of the rows, we drop them.\n data = data_read.dropna(how='all') # After dropping, data set size is still 20050\n\n # Checking the names of the columns/attributes which contains at least one null value\n columns_containing_missing_values = data.columns[data.isnull().any()].tolist()\n print(\"Column names which has missing values\")\n print(columns_containing_missing_values)\n\n # Since 'gender' is our target variable, we would like to have values for it.\n # So, dropping all the rows which have no values for the 'gender' attribute.\n data = data[data['gender'].notnull()] # After dropping, dataset size = 19953 rows\n # Also, dropping all the rows which have values as 'unknown' for the 'gender' attribute\n data = data[data['gender'] != 'unknown'] # After dropping, dataset size = 18836 rows\n\n male_profile_count = len(data[data['gender'] == 'male'])\n print(\"Male Profile Count \" + str(male_profile_count))\n female_profile_count = len(data[data['gender'] == 'female'])\n print(\"Female Profile Count \" + str(female_profile_count))\n brand_profile_count = len(data[data['gender'] == 'brand'])\n print(\"Brand Profile Count \" + str(brand_profile_count))\n\n return data", "def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def get_diseases(self):\n self.diseases = self.data.groupby('topic')['topic'].count()", "def user_stats_gender(df):\n # Display counts of gender\n print(\"Counts of gender:\\n\")\n start_time = time.time()\n gender_counts = df['Gender'].value_counts()\n # iteratively print out the total numbers of genders \n # in this loop , it will iterative over the user_counts and its numbering\n for index,gender_count in enumerate(gender_counts):\n print(\" {}: {}\".format(gender_counts.index[index], gender_count))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*50)", "def count_mentioned_countries(data):\n countries_mentioned = {}\n countries = get_countries()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for country in countries:\n if country in (subject_words + message_words):\n if country in countries_mentioned:\n countries_mentioned[country] += 1\n else:\n countries_mentioned[country] = 1\n\n return pd.DataFrame.from_dict(countries_mentioned, orient=\"index\")", "def count_gender(dictionary, gender_variable):\r\n boy = 0\r\n girl = 0\r\n for num in dictionary[gender_variable]:\r\n if num == 1:\r\n boy += 1\r\n elif num == 2:\r\n girl += 1\r\n return (boy, girl)", "def count_authors_by_gender(self, gender):\n count = 0\n for document in self.documents:\n try:\n if document.author_gender.lower() == gender.lower():\n count += 1\n except AttributeError:\n raise MissingMetadataError(['author_gender'])\n\n return count", "def test_extract_topics(base_bertopic):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(documents)\n c_tf_idf = base_bertopic._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def count_male_teams(self):\n return len(self.df['Adult male 11v11 (16-45)'].dropna())", "def index():\n import numpy as np\n import random\n\n total_gender = {}\n total_gender['Male'] = db(db.patient.sex == 'Male').count()\n total_gender['Female'] = db(db.patient.sex == 'Female').count()\n total_gender['Undeclared'] = db(db.patient.sex == 'Undeclared').count()\n\n groups = db(db.groups).select()\n freq_groups = {}\n grp_gender = {}\n for g in groups:\n freq_groups[g.code] = db(db.patient.groups.contains(g.id)).count()\n grp_gender[g.code] = {}\n grp_gender[g.code]['Male'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Male')).count()\n grp_gender[g.code]['Female'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Female')).count()\n grp_gender[g.code]['Undeclared'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Undeclared')).count()\n\n experiments = db(db.experiments).select()\n freq_experiments = {}\n exp_gender = {}\n for e in experiments:\n freq_experiments[e.code] = db(db.patient.experiments.contains(e.id)).count()\n exp_gender[e.code] = {}\n exp_gender[e.code]['Male'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Male')).count()\n exp_gender[e.code]['Female'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Female')).count()\n exp_gender[e.code]['Undeclared'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Undeclared')).count()\n\n grp_exp = {}\n for e in experiments:\n grp_exp[e.code] = {}\n for g in groups:\n grp_exp[e.code][g.code] = db(db.patient.experiments.contains(e.id) & db.patient.groups.contains(g.id)).count()\n\n return dict(message=T('Pain Network: A web-based tool for diagnosis of the Chronic Pain.'),\n freq_gender=total_gender,freq_groups=freq_groups,freq_experiments=freq_experiments,\n exp_gender=exp_gender,grp_gender=grp_gender,grp_exp=grp_exp)", "def uncategorized(df):\n\n counter = 0\n for movie in df.index:\n if len(df.loc[movie, 'imdbGenres']) == 1 and\\\n df.loc[movie, 'Political'] == 0:\n counter += 1\n\n return counter", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types:\\n\")\n user_counts = df['User Type'].value_counts()\n # printing out the total numbers of user types\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n\n # Display counts of gender", "def test_extract_topics_custom_cv():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n\n cv = CountVectorizer(ngram_range=(1, 2))\n model = BERTopic(vectorizer=cv)\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def assign_topics_to_sentences(self):\n \n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # Read the data - id and reponse column\n dt = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n\n\n \n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n \n # Split into sentences\n dt['sentences'] = self.getSentences(dt[self.col_name])\n \n \n \n\n \n \n\n # Store number of sentences in each response as a column\n dt['num_sent'] = dt['sentences'].apply(lambda x: len(x))\n\n # Split each row into multiple rows - one row for each sentence\n dt = (dt\n .set_index([self.id_col_name, self.col_name, 'num_sent'])['sentences']\n .apply(pd.Series)\n .stack()\n .reset_index()\n .drop('level_3', axis = 1)\n .rename(columns = {0:'sentences'}))\n\n\n # Clean the sentences\n dt['sentences_cleaned'] = self.cln.clean(dt['sentences'], typo = self.typo_ind)\n\n # Remove useless sentences\n dt['sentences_cleaned'] = self.getValid(dt['sentences_cleaned'])\n\n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n\n # Tokenize words in the cleaned sentences\n responses = list(self.sent_to_words(dt['sentences_cleaned'].values.tolist()))\n\n\n # Call the lexicon function\n topic_lexicons = self.prepare_lexicons()\n\n # Lists to store results\n count_topic_all = []\n actual_topic_all = []\n\n # Tag each response into a topic\n for response in responses:\n\n count_topic = []\n actual_topic = []\n\n for topic in topic_lexicons:\n\n # Count occurance of each word in word stock in the response\n temp = sum(dict((x, response.count(x)) for x in topic).values())\n count_topic.append(temp)\n\n\n for index, value in enumerate(count_topic):\n\n # Consider the topic if atleast one(?) word from its word-stock occurs in the response\n if value > 0:\n actual_topic.append(topic_dict[index])\n\n\n # If more than 3 topics are tagged for single sentence, refine by increasing\n # cutoff to at least 2 words instead of 1\n if len(actual_topic) > 3:\n\n actual_topic = []\n for index, value in enumerate(count_topic):\n\n if value > 1: # Increase cutoff\n actual_topic.append(topic_dict[index])\n\n count_topic_all.append(count_topic)\n actual_topic_all.append(actual_topic)\n\n\n dt['tags'] = actual_topic_all\n dt['num_tags'] = count_topic_all\n\n\n # Select only the most important columns\n dt_less = dt[[self.id_col_name, 'sentences', 'tags']]\n\n return dt, dt_less", "def calc_topic_mode_log_stats(user_exercise_graph, topic_id,\n just_earned_proficiency):\n topic = topic_models.Topic.get_by_id(topic_id)\n topic_exercises = topic.get_exercises()\n\n total_exercises = len(topic_exercises)\n count_proficient = len(set(ex.name for ex in topic_exercises) &\n set(user_exercise_graph.proficient_exercise_names()))\n just_completed = (just_earned_proficiency and total_exercises ==\n count_proficient)\n\n return {\n 'total_exercises': total_exercises,\n 'count_proficient': count_proficient,\n 'just_completed': just_completed,\n }", "def pre_process_data(df):\n # setting `passengerID` as Index since it wont be necessary for the analysis\n df = df.set_index(\"PassengerId\")\n\n # convert 'Sex' values\n df['gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # We see that 2 passengers embarked data is missing, we fill those in as the most common Embarked value\n df.loc[df.Embarked.isnull(), 'Embarked'] = df['Embarked'].mode()[0]\n\n # Replace missing age values with median ages by gender\n for gender in df['gender'].unique():\n median_age = df[(df['gender'] == gender)].Age.median()\n df.loc[(df['Age'].isnull()) & (df['gender'] == gender), 'Age'] = median_age\n\n # convert 'gender' values to new columns\n df = pd.get_dummies(df, columns=['gender'])\n\n # convert 'Embarked' values to new columns\n df = pd.get_dummies(df, columns=['Embarked'])\n\n # bin Fare into five intervals with equal amount of values\n df['Fare-bin'] = pd.qcut(df['Fare'], 5, labels=[1, 2, 3, 4, 5]).astype(int)\n\n # bin Age into seven intervals with equal amount of values\n # ('baby','child','teenager','young','mid-age','over-50','senior')\n bins = [0, 4, 12, 18, 30, 50, 65, 100]\n age_index = (1, 2, 3, 4, 5, 6, 7)\n df['Age-bin'] = pd.cut(df['Age'], bins, labels=age_index).astype(int)\n\n # create a new column 'family' as a sum of 'SibSp' and 'Parch'\n df['family'] = df['SibSp'] + df['Parch'] + 1\n df['family'] = df['family'].map(lambda x: 4 if x > 4 else x)\n\n # create a new column 'FTicket' as the first character of the 'Ticket'\n df['FTicket'] = df['Ticket'].map(lambda x: x[0])\n # combine smaller categories into one\n df['FTicket'] = df['FTicket'].replace(['W', 'F', 'L', '5', '6', '7', '8', '9'], '4')\n # convert 'FTicket' values to new columns\n df = pd.get_dummies(df, columns=['FTicket'])\n\n # get titles from the name\n df['title'] = df.apply(lambda row: re.split('[,.]+', row['Name'])[1], axis=1)\n\n # convert titles to values\n df['title'] = df['title'].map({' Capt': 'Other', ' Master': 'Master', ' Mr': 'Mr', ' Don': 'Other',\n ' Dona': 'Other', ' Lady': 'Other', ' Col': 'Other', ' Miss': 'Miss',\n ' the Countess': 'Other', ' Dr': 'Other', ' Jonkheer': 'Other', ' Mlle': 'Other',\n ' Sir': 'Other', ' Rev': 'Other', ' Ms': 'Other', ' Mme': 'Other', ' Major': 'Other',\n ' Mrs': 'Mrs'})\n # convert 'title' values to new columns\n df = pd.get_dummies(df, columns=['title'])\n\n df = df.drop(['Name', 'Ticket', 'Cabin', 'Sex', 'Fare', 'Age'], axis=1)\n\n return df", "def get_paper_counter_per_topic_id(all_topic_assignments):\n counter = {}\n for topic_assignment in all_topic_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index not in counter:\n counter[topic_index] = 0\n\n counter[topic_index] += 1\n\n return counter", "def topic(df, num_topics=5):\r\n# X, y = df[df.columns[:-1]], df[df.columns[-1]]\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)" ]
[ "0.6474987", "0.63504976", "0.6320572", "0.62746453", "0.627228", "0.6258495", "0.61637425", "0.6114557", "0.5977991", "0.58644605", "0.5836324", "0.5831244", "0.5673213", "0.5647648", "0.5610889", "0.5574155", "0.5492381", "0.54264843", "0.53719985", "0.53717935", "0.5295619", "0.52562594", "0.5255951", "0.52543795", "0.522742", "0.52179205", "0.519302", "0.51659757", "0.51119727", "0.51042306" ]
0.80123305
0
From all the data, it takes the columns TopicID, and count the topic based on the ethnicity
def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame: data_frame_topic = data_frame \ .filter(data_frame["Stratification1"].contains("Black, non-Hispanic")) \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") print("The following table represent the number of black ethnicity people group by the topic: ") data_frame_topic.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Male\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of men group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics", "def topic_stats(df_topic_sents_keywords):\n\n # Number of Documents for Each Topic\n topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n # Percentage of Documents for Each Topic\n topic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n # Topic Number and Keywords\n topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n # Concatenate Column wise\n df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n # Change Column names\n df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n # Show\n df_dominant_topics", "def get_data_frame_count_type_of_topic(data_frame: DataFrame) -> pb.DataFrame:\n try:\n data_frame = data_frame \\\n .select(\"TopicID\", \"Question\") \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n except Py4JError:\n raise AnalysisException('One columns is incorrect')\n print(\"The following table represent the number of the type of each topic\")\n data_frame.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)", "def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def get_diseases(self):\n self.diseases = self.data.groupby('topic')['topic'].count()", "def get_paper_counter_per_topic_id(all_topic_assignments):\n counter = {}\n for topic_assignment in all_topic_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index not in counter:\n counter[topic_index] = 0\n\n counter[topic_index] += 1\n\n return counter", "def get_rdd_count_type_of_topy(rdd: list) -> pb.DataFrame:\n data_frame_pandas = pb.DataFrame(rdd, columns=['Topic', 'Question'])\n print(data_frame_pandas)\n return data_frame_pandas", "def test_extract_topics(base_bertopic):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(documents)\n c_tf_idf = base_bertopic._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def connect_topic_id_to_topics(self, model):\n confidence = []\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key, tp_num, val / len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2), reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n self.log_writer.add_log(\n 'Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],\n conf[1],\n conf[2]))\n self.topic_indexes[conf[0]] = conf[1]\n\n for key, value in self.topic_indexes.items():\n self.topics_of_index[value] = [key]", "def ngram_detection(self, min_topic_count=5, min_text_id_count=4):\n\n for text_id, text in self.texts.items():\n # single-word topics act a bit different (no zips or comprehensions)\n # store data in self.topics, not zip_grams\n for word in text['doc']:\n word_lemma = word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_\n\n if {word.text}.intersection(self.punct) or {word.lemma_}.intersection(self.stop_words):\n continue\n\n if not (word.pos in self.nouns or word.ent_type in self.entities):\n continue\n\n if word_lemma in self.topics:\n self.topics[word_lemma][\"count\"] += 1\n self.topics[word_lemma][\"textIDs\"] |= {text_id}\n self.topics[word_lemma][\"verbatims\"] |= {word.text.lower()}\n else:\n self.topics[word_lemma] = {\"name\": word_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"verbatims\": {word.text.lower()},\n \"subtopics\": {}}\n\n # Populate self.ngrams and self.topics\n for text_id, text in self.texts.items():\n doc = text['doc']\n\n # Find pentagrams - ngrams with 5 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:], doc[4:]):\n self._ngram_counter(ngram, 5, text_id, doc)\n\n # Find pentagrams - ngrams with 4 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:]):\n self._ngram_counter(ngram, 4, text_id, doc)\n\n for ngram in zip(doc, doc[1:], doc[2:]):\n self._ngram_counter(ngram, 3, text_id, doc)\n\n for ngram in zip(doc, doc[1:]):\n self._ngram_counter(ngram, 2, text_id, doc)\n\n\n # Add text_id_count (the number of texts that the topic occurs in; so a topic might occur 50 times,\n # but it's only mentioned in 3 different texts, we'd show 3.\n for _, topic in self.topics.items():\n topic['textIDCount'] = len(topic['textIDs'])\n for _, ngram in self.ngrams.items():\n ngram['textIDCount'] = len(ngram['textIDs'])\n\n # Eliminate rarely occurring topics and ngrams.\n self.topics = {k: v for k, v in self.topics.items() if\n v['textIDCount'] >= min_text_id_count and v['count'] >= min_topic_count}\n self.ngrams = {k: v for k, v in self.ngrams.items() if\n v['textIDCount'] >= min_text_id_count}\n\n # Loop through each ngram pair: outer loop is all ngrams, inner loop is all ngrams\n for ngram_lemma, ngram in self.ngrams.items():\n for ngram_plus_lemma, ngram_plus in self.ngrams.items():\n # only stay in this loop if the inner ngram is one word longer than the outer loop and if the\n # inner loop lemma contains the outer group lemma (avoid partial word matches like man in woman)\n # r'\\b' + ngram_lemma + r'\\b' --> does the ngram lemma fit in ngram_plus lemma (\\b is word boundary)\n if ngram['n'] + 1 != ngram_plus['n']:\n continue\n\n if not re.search(r'\\b' + ngram_lemma + r'\\b', ngram_plus_lemma):\n continue\n\n # Is the absolute count of occurrences and the count of text_id occurrences both big enough to use it\n # instead of the other loop?\n if ngram_plus['count'] + 3 >= ngram['count'] and ngram_plus['textIDCount'] + 3 >= ngram['textIDCount']:\n # TODO: Is this the right action (deleting shorter, but not much more explanatory) phrase?\n # TODO: Is this enough? Or will I end up double explaining things sometimes?\n ngram['count'] = -1\n\n # Eliminate newly demoted items\n self.ngrams = {ngram_lemma: ngram for ngram_lemma, ngram in self.ngrams.items() if ngram['count'] > 0}", "def test_topics_for_products(self):\n desktop_topics = topics_for(product=self.desktop)\n eq_(len(desktop_topics), 3)\n\n mobile_topics = topics_for(product=self.mobile)\n eq_(len(mobile_topics), 2)", "def assign_topics_to_sentences(self):\n \n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # Read the data - id and reponse column\n dt = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n\n\n \n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n \n # Split into sentences\n dt['sentences'] = self.getSentences(dt[self.col_name])\n \n \n \n\n \n \n\n # Store number of sentences in each response as a column\n dt['num_sent'] = dt['sentences'].apply(lambda x: len(x))\n\n # Split each row into multiple rows - one row for each sentence\n dt = (dt\n .set_index([self.id_col_name, self.col_name, 'num_sent'])['sentences']\n .apply(pd.Series)\n .stack()\n .reset_index()\n .drop('level_3', axis = 1)\n .rename(columns = {0:'sentences'}))\n\n\n # Clean the sentences\n dt['sentences_cleaned'] = self.cln.clean(dt['sentences'], typo = self.typo_ind)\n\n # Remove useless sentences\n dt['sentences_cleaned'] = self.getValid(dt['sentences_cleaned'])\n\n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n\n # Tokenize words in the cleaned sentences\n responses = list(self.sent_to_words(dt['sentences_cleaned'].values.tolist()))\n\n\n # Call the lexicon function\n topic_lexicons = self.prepare_lexicons()\n\n # Lists to store results\n count_topic_all = []\n actual_topic_all = []\n\n # Tag each response into a topic\n for response in responses:\n\n count_topic = []\n actual_topic = []\n\n for topic in topic_lexicons:\n\n # Count occurance of each word in word stock in the response\n temp = sum(dict((x, response.count(x)) for x in topic).values())\n count_topic.append(temp)\n\n\n for index, value in enumerate(count_topic):\n\n # Consider the topic if atleast one(?) word from its word-stock occurs in the response\n if value > 0:\n actual_topic.append(topic_dict[index])\n\n\n # If more than 3 topics are tagged for single sentence, refine by increasing\n # cutoff to at least 2 words instead of 1\n if len(actual_topic) > 3:\n\n actual_topic = []\n for index, value in enumerate(count_topic):\n\n if value > 1: # Increase cutoff\n actual_topic.append(topic_dict[index])\n\n count_topic_all.append(count_topic)\n actual_topic_all.append(actual_topic)\n\n\n dt['tags'] = actual_topic_all\n dt['num_tags'] = count_topic_all\n\n\n # Select only the most important columns\n dt_less = dt[[self.id_col_name, 'sentences', 'tags']]\n\n return dt, dt_less", "def test_extract_topics_custom_cv():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n\n cv = CountVectorizer(ngram_range=(1, 2))\n model = BERTopic(vectorizer=cv)\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics", "def uncategorized(df):\n\n counter = 0\n for movie in df.index:\n if len(df.loc[movie, 'imdbGenres']) == 1 and\\\n df.loc[movie, 'Political'] == 0:\n counter += 1\n\n return counter", "def group_topics(sent_topics_sorteddf):\n new_topics=pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0).sum()\n plt.pie(new_topics,labels=new_topics.index,autopct='%.0f',pctdistance=.8)\n plt.title('Topic Share %');\n\n new_topic_words = pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0)['Keywords'].sum()\n [print(f'{topic}: ' + words) for topic,words in zip(new_topic_words.index,new_topic_words)]", "def test_get_topics(self):\n\n for m in self.models:\n\n topics = m.topics\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertEqual(topics.num_rows(), 25)\n self.assertEqual(topics.num_columns(), 2)\n z = m.topics[\"topic_probabilities\"]\n for k in range(m.num_topics):\n self.assertTrue(\n abs(sum(z.vector_slice(k)) - 1) < DELTA,\n \"Returned probabilities do not sum to 1.\",\n )\n\n # Make sure returned object is an SFrame of the right size\n topics = m.get_topics()\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertTrue(\n topics.num_columns() == 3,\n \"Returned SFrame should have a topic, word, and probs.\",\n )\n\n # Make sure that requesting a single topic returns only that topic\n num_words = 8\n topics = m.get_topics([5], num_words=num_words)\n self.assertTrue(\n all(topics[\"topic\"] == 5), \"Returned topics do not have the right id.\"\n )\n self.assertEqual(topics.num_rows(), num_words)\n topics = m.get_topics([2, 4], num_words=num_words)\n self.assertEqual(set(list(topics[\"topic\"])), set([2, 4]))\n self.assertEqual(topics.num_rows(), num_words + num_words)\n\n # Make sure the cumulative probability of the returned words is\n # is less than the cutoff we provided.\n # A cutoff of 1.0 should return num_words for every topic.\n cutoff = 1.0\n topics = m.get_topics(cdf_cutoff=cutoff, num_words=len(m.vocabulary))\n totals = topics.groupby(\n \"topic\", {\"total_score\": turicreate.aggregate.SUM(\"score\")}\n )\n self.assertTrue(\n all(totals[\"total_score\"] <= (cutoff + DELTA)),\n \"More words were returned than expected for this cutoff.\",\n )\n\n # Make sure we raise errors for bad input\n with self.assertRaises(ValueError):\n m.get_topics([-1])\n with self.assertRaises(ValueError):\n m.get_topics([10000])\n with self.assertRaises(ToolkitError):\n topics = m.get_topics(output_type=\"other\")\n\n # Test getting topic_words\n topic_words = m.get_topics(output_type=\"topic_words\", num_words=5)\n self.assertEqual(type(topic_words), turicreate.SFrame)\n\n # Test words are sorted correctly for the first topic\n # TODO: Make this more deterministic.\n\n # topic_probs = m.get_topics(num_words=5)\n # expected = [w for w in topic_probs['word'][:5]]\n # observed = topic_words['words'][0]\n # self.assertEqual(observed[0], expected[0])", "def test_topic_reduction_edge_cases():\n model = BERTopic()\n nr_topics = 5\n model.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents)\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents)\n new_freq = model.get_topic_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def export_topics(self):\n\n # format as a list (for json output), then sort descending by textIDCount\n topics = [{'name': topic['name'], 'count': topic['count'],\n 'verbatims': list(topic['verbatims']), 'textIDs': list(topic['textIDs']),\n 'textIDCount': topic['textIDCount'], 'rank': topic['rank'],\n 'children': '' if 'children' not in topic else topic['children']}\n for topic_id, topic in self.topics.items()]\n topics = sorted(topics, key=lambda topic: topic['textIDCount'], reverse=True)\n\n for i, topic in enumerate(topics):\n # Note that 'rank' is from topic, not child.\n topic['children'] = [{'name': child['name'], 'count': child['count'], 'rank': topic['rank'],\n 'verbatims': list(child['verbatims']), 'textIDs': list(child['textIDs']),\n 'textIDCount': child['textIDCount']}\n for _, child in topic['children'].items()]\n\n topic['children'] = sorted(topic['children'], key=lambda lemma: lemma['textIDCount'], reverse=True)\n\n # If the subtopic count is greater than the topic count, than calc a multiplier to size each subtopic\n child_count = sum([child['textIDCount'] for child in topic['children']])\n child_count_multiplier = 1 if child_count < topic['textIDCount'] else topic['textIDCount'] / child_count\n\n for child in topic['children']:\n child['size'] = child['textIDCount'] * child_count_multiplier\n\n topic['size'] = topic['textIDCount'] - (child_count * child_count_multiplier)\n\n # Prune topics over max_topics (default ~40): we stopped calc'ing rank over the max_topics\n self.model_output[\"children\"] = [topic for topic in topics]\n\n # Build file name and save\n if self.data_date:\n date = datetime.strptime(self.data_date, \"%Y-%m-%d\").strftime('%d') # from YYYY-MM-DD to DD\n file_name = '{}-{}-Topics.txt'.format(self.corpus_name, date)\n else:\n file_name = '{}-Topics.txt'.format(self.corpus_name)\n\n with open(config.OUTPUT_DIR + file_name, 'w') as file:\n json.dump(self.model_output, file)", "def find_dominant_topic(df_topic_sents_keywords):\n\n # Format\n df_dominant_topic = df_topic_sents_keywords.reset_index()\n df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n\n # Group top 5 sentences under each topic\n sent_topics_sorteddf = pd.DataFrame()\n\n sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n start = time.time()\n for i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf = pd.concat([sent_topics_sorteddf, \n grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], \n axis=0)\n print(f'Group done. Total time {time.time() - start} seconds.')\n\n # Reset Index \n sent_topics_sorteddf.reset_index(drop=True, inplace=True)\n\n # Format\n sent_topics_sorteddf.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Text\"]\n return sent_topics_sorteddf", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def __rank_topics(self, found_topics, explanation):\n max_value = 0\n scores = []\n for _,topic in found_topics.items():\n topic[\"score\"] = topic[\"times\"] * len(topic['grams'].keys())\n scores.append(topic[\"score\"])\n if topic[\"score\"] > max_value:\n max_value = topic[\"score\"]\n\n for _,topic in found_topics.items():\n if \"syntactic\" in topic:\n topic[\"score\"] = max_value\n\n\n\n\n # Selection of unique topics\n unique_topics = {}\n for t_p,topic in found_topics.items():\n prim_label = self.cso.get_primary_label_wu(t_p)\n if prim_label in unique_topics:\n if unique_topics[prim_label] < topic[\"score\"]:\n unique_topics[prim_label] = topic[\"score\"]\n else:\n unique_topics[prim_label] = topic[\"score\"]\n\n # ranking topics by their score. High-scored topics go on top\n sort_t = sorted(unique_topics.items(), key=lambda v: v[1], reverse=True)\n #sort_t = sorted(found_topics.items(), key=lambda k: k[1]['score'], reverse=True)\n\n\n # perform\n vals = []\n for t_p in sort_t:\n vals.append(t_p[1]) #in 0, there is the topic, in 1 there is the info\n\n\n #### suppressing some warnings that can be raised by the kneed library\n warnings.filterwarnings(\"ignore\")\n try:\n x_vals = range(1,len(vals)+1)\n t_kn = KneeLocator(x_vals, vals, direction='decreasing')\n if t_kn.knee is None:\n #print(\"I performed a different identification of knee\")\n t_kn = KneeLocator(x_vals, vals, curve='convex', direction='decreasing')\n except ValueError:\n pass\n\n ##################### Pruning\n\n try:\n knee = int(t_kn.knee)\n except TypeError:\n knee = 0\n except UnboundLocalError:\n knee = 0\n\n if knee > 5:\n try:\n knee += 0\n except TypeError:\n print(\"ERROR: \",t_kn.knee,\" \",knee, \" \", len(sort_t))\n\n else:\n try:\n if sort_t[0][1] == sort_t[4][1]:\n top = sort_t[0][1]\n test_topics = [item[1] for item in sort_t if item[1]==top]\n knee = len(test_topics)\n\n else:\n knee = 5\n except IndexError:\n knee = len(sort_t)\n\n final_topics = []\n final_topics = [self.cso.get_topic_wu(sort_t[i][0]) for i in range(0,knee)]\n self.reset_explanation()\n self.explanation = {self.cso.topics_wu[sort_t[i][0]]: explanation[sort_t[i][0]] for i in range(0,knee)}\n\n return final_topics", "def get_result_table_and_info(cls):\n winning_dict = cls.get_winning_topics()\n winning_topics = winning_dict['winning_topics']\n runoff_poll_warning = winning_dict['runoff_poll_warning']\n\n # Create table\n result_table = []\n all_categories = sorted(Category.objects.all(), key=attrgetter('sum_of_votes', 'weight'), reverse=True)\n for category in all_categories:\n category_hoechstzahls = filter(lambda hoechstzahl: hoechstzahl.topic.category == category, cls.all_hoechstzahls)\n category_hoechstzahls.sort(key=lambda hoechstzahl: hoechstzahl.value, reverse=True)\n runoff_poll_warning = second_runoff_poll_check(runoff_poll_warning, category_hoechstzahls, winning_topics)\n category_hoechstzahls += (max(config['openslides_topicvoting_posts'], 3) - len(category_hoechstzahls)) * [None]\n result_table.append(category_hoechstzahls)\n\n # Return table and flags as dictionary\n return {'result_table': result_table,\n 'winning_topics': winning_topics,\n 'runoff_poll_warning': runoff_poll_warning,\n 'topic_post_warning': winning_dict['topic_post_warning']}", "def test_extract_topics_custom_cv(base_bertopic_custom_cv):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic_custom_cv._update_topic_size(documents)\n c_tf_idf = base_bertopic_custom_cv._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic_custom_cv.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def display_topics(df, n_rows=10, n_cols=12):\n\n exemplar_scores, hovers = topic_exemplars(df)\n top_columns = sorted(range(len(exemplar_scores)),\n key=lambda i: exemplar_scores[i],\n reverse=True)[:n_cols]\n #I comented this line Im not 100% sure what was the purpuse of this\n # topics = df.pivot(index='pos', columns='topic',values='word*').replace([None], [''], regex=True)\n topics = df.pivot(index='pos', columns='topic',values='word*')\n\n topics_display = topics[top_columns].head(n_rows)\n\n return topics_display, top_columns", "def count_mentioned_countries(data):\n countries_mentioned = {}\n countries = get_countries()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for country in countries:\n if country in (subject_words + message_words):\n if country in countries_mentioned:\n countries_mentioned[country] += 1\n else:\n countries_mentioned[country] = 1\n\n return pd.DataFrame.from_dict(countries_mentioned, orient=\"index\")" ]
[ "0.670414", "0.6682198", "0.6473323", "0.63052845", "0.6284073", "0.5883223", "0.58540165", "0.5768975", "0.56314296", "0.5595589", "0.55580306", "0.5541051", "0.5484164", "0.54825205", "0.5471465", "0.5443077", "0.53968257", "0.5374345", "0.5362987", "0.5333066", "0.5320533", "0.5308043", "0.53065175", "0.52841324", "0.5240766", "0.52356154", "0.52329373", "0.52273995", "0.52240163", "0.521626" ]
0.7382751
0
Plot a data frame with bar type
def plot_type_of_topic(data_frame: pb.DataFrame) -> None: plt.interactive(False) plt.figure() data_frame.plot(kind='bar', x= data_frame['TopicID']) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def bar_plot(df, data_pt):\n \n x=df.loc[data_pt]\n y= df.columns.tolist()\n sorte=x.tolist()\n a=sorted(zip(sorte, y))[-10:]\n y=[y for _, y in a]\n ## soru burda yapıp altı ona göre duzeliyecegim birde\n \n x = df[y].loc[data_pt]\n \n # Here we modify the tickangle of the xaxis, resulting in rotated labels.\n #title={'text': \"<b>Comparing features with Golden for Cycle {}\".format(cycle),\n # 'y':0.9,'x':0.5,'xanchor': 'center','yanchor': 'top'}\n\n \n trace = {'type': 'bar',\n 'orientation':'h',\n 'x' : x,\n 'y' : y}\n data = Data([trace])\n layout = {'title' : \"<b>Reconstruction error in each dimension for cycle{}\".format(data_pt),\n 'titlefont':{'size' : 20},\n 'xaxis' : {'title': '<b>Reconstruction Error',\n 'titlefont':{'size' : 20},\n 'tickangle': -45, 'tickfont': {'size':15} ,},\n \n 'yaxis' : {'title': '<b>Features',\n 'titlefont':{'size' : 20},\n 'tickfont': {'size':15},},\n 'margin' : {'l':100, 'r' : 1, 'b': 200, 't': 100, 'pad' : 1},\n 'height' : 600, 'width' : 800,\n }\n \n fig = Figure(data = data, layout = layout)\n \n return pyo.iplot(fig)", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def bar_plot(df, field_name, graph_title, threshold_value, x_axis_label, y_axis_label):\n\n x = df[field_name].value_counts().sort_values()\n x[x > threshold_value].plot(kind='barh', figsize=(12, 8), title=graph_title, x=x_axis_label, y=y_axis_label)\n return", "def bar(\n df,\n x=None,\n y=\"value\",\n bars=\"variable\",\n order=None,\n bars_order=None,\n orient=\"v\",\n legend=True,\n title=True,\n ax=None,\n cmap=None,\n **kwargs,\n):\n\n # default x-axis to time-col attribute from an IamDataFrame, else use \"year\"\n x = x or time_col_or_year(df)\n\n # cast to DataFrame if necessary\n # TODO: select only relevant meta columns\n if not isinstance(df, pd.DataFrame):\n df = df.as_pandas()\n\n for col in set(SORT_IDX) - set([x, bars]):\n if len(df[col].unique()) > 1:\n msg = \"Can not plot multiple {}s in bar plot with x={}, bars={}\"\n raise ValueError(msg.format(col, x, bars))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # long form to one column per bar group\n _df = reshape_mpl(df, x, y, bars, **{x: order, bars: bars_order})\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.columns), colormap=cmap)[\n \"color\"\n ]\n rc = run_control()\n color = []\n for key in _df.columns:\n c = next(defaults)\n if \"color\" in rc and bars in rc[\"color\"] and key in rc[\"color\"][bars]:\n c = rc[\"color\"][bars][key]\n color.append(c)\n\n # change year to str to prevent pandas/matplotlib from auto-ordering (#474)\n if _df.index.name == \"year\":\n _df.index = map(str, _df.index)\n\n # plot data\n kind = \"bar\" if orient.startswith(\"v\") else \"barh\"\n _df.plot(kind=kind, color=color, ax=ax, **kwargs)\n\n # add legend\n ax.legend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5))\n if not legend:\n ax.legend_.remove()\n\n # add default labels if possible\n if orient == \"v\":\n ax.set_xlabel(x.capitalize())\n else:\n ax.set_ylabel(x.capitalize())\n units = df[\"unit\"].unique()\n if len(units) == 1 and y == \"value\":\n if orient == \"v\":\n ax.set_ylabel(units[0])\n else:\n ax.set_xlabel(units[0])\n\n # build a default title if possible\n _title = []\n for var in [\"model\", \"scenario\", \"region\", \"variable\"]:\n values = df[var].unique()\n if len(values) == 1:\n _title.append(\"{}: {}\".format(var, values[0]))\n if title and _title:\n title = \" \".join(_title) if title is True else title\n ax.set_title(title)\n\n return ax", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()", "def featuresBarPlot(barNames,barValues):\n plt.bar(range(0,len(barNames)),barValues)\n plt.xticks(range(0,len(barNames)), barNames,rotation='vertical')\n plt.show()", "def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()", "def plot_bv_bar(df, xcolname, ycolname, icol=0):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box\n sns.barplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()", "def BarPlot(data,colormap='Paired',ax=None,headers='show',value_max=None,x_ticklabels_rotation=90,**kws):\r\n if ax is None:\r\n ax=plt.subplot(111)\r\n\r\n if value_max is None:\r\n value_max=data.sum(1).max()\r\n\r\n data.plot(kind='bar', stacked=True,colormap=colormap, ax=ax,**kws)\r\n ax.set_ylim((0,value_max))\r\n\r\n\r\n #reverse legend order\r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(reversed(handles),reversed(data.columns),bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\n #AXES\r\n if (headers is None or headers=='hide'):\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_xaxis().set_ticks([])\r\n elif headers=='show':\r\n plt.setp(ax.get_xticklabels(),rotation=x_ticklabels_rotation)\r\n ax.set_xlabel(None,visible=False)\r\n\r\n\r\n #plt.tight_layout()\r\n\r\n\r\n return ax", "def bar_chart(\n df,\n orientation='v',\n bar_width=None,\n opacity=0.9,\n textpos=None,\n linewidth=1,\n linecolor='#2C3347',\n marker_color=None,\n **kwargs):\n\n traces = []\n rng = df.index.size if orientation == 'v' else df.columns.size\n otn = orientation\n for i in range(rng):\n x = [str(x) for x in df.columns] if otn == 'v' else df.iloc[:, i]\n y = df.iloc[i] if otn == 'v' else [str(x) for x in df.index]\n text = df.iloc[i] if otn == 'v' else df.iloc[:, i]\n name = df.iloc[i].name if otn == 'v' else df.columns[i]\n\n preset_args = dict(\n x=x,\n y=y,\n text=text,\n textposition=textpos,\n marker=dict(\n opacity=opacity,\n color=marker_color,\n line=dict(\n color=linecolor,\n width=linewidth)),\n name=name,\n width=bar_width,\n orientation=orientation\n )\n\n all_args = {**preset_args, **kwargs}\n bar = go.Bar(all_args)\n traces.append(bar)\n\n return traces", "def plot_bar_chart(objects, data, title='', ylabel='', bar_color = 'blue'):\n y_pos = np.arange(len(objects))\n\n plt.bar(y_pos, data, align='center', alpha=0.5)\n plt.xticks(y_pos, objects, rotation='vertical')\n plt.ylabel(ylabel, fontsize=12)\n plt.title(title, fontsize=12)\n plt.ylim([0,1300])\n plt.bar(range(len(data)), data, color=bar_color)\n\n return plt.show()", "def bar_plot(data, xtitle, title):\n label = list(set(data))\n height = count_elements(data)\n height = [height[i] for i in label]\n plt.bar(label, height=height, width=0.8)\n plt.ylabel('frequency')\n plt.xlabel(xtitle)\n plt.xticks(label)\n plt.savefig('./figures/{}.png'.format(title))\n plt.close()", "def drawStackedBarPlot(df, column, hue):\n plt.style.use('default')\n plt.style.use('dark_background')\n p_table = pd.pivot_table(df, index=column, \n columns=hue, aggfunc='size')\n p_table = p_table.div(p_table.sum(axis=1), axis=0)\n p_table.plot.bar(stacked=True, figsize=(14,7))\n plt.xlabel('Spekraltyp')\n plt.ylabel('Anteil')\n plt.show()", "def barplot(bars, title='', upColor='blue', downColor='red'):\n import pandas as pd\n import matplotlib.pyplot as plt\n from matplotlib.lines import Line2D\n from matplotlib.patches import Rectangle\n\n if isinstance(bars, pd.DataFrame):\n ohlcTups = [\n tuple(v) for v in bars[['open', 'high', 'low', 'close']].values]\n elif bars and hasattr(bars[0], 'open_'):\n ohlcTups = [(b.open_, b.high, b.low, b.close) for b in bars]\n else:\n ohlcTups = [(b.open, b.high, b.low, b.close) for b in bars]\n\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.grid(True)\n fig.set_size_inches(10, 6)\n for n, (open_, high, low, close) in enumerate(ohlcTups):\n if close >= open_:\n color = upColor\n bodyHi, bodyLo = close, open_\n else:\n color = downColor\n bodyHi, bodyLo = open_, close\n line = Line2D(\n xdata=(n, n),\n ydata=(low, bodyLo),\n color=color,\n linewidth=1)\n ax.add_line(line)\n line = Line2D(\n xdata=(n, n),\n ydata=(high, bodyHi),\n color=color,\n linewidth=1)\n ax.add_line(line)\n rect = Rectangle(\n xy=(n - 0.3, bodyLo),\n width=0.6,\n height=bodyHi - bodyLo,\n edgecolor=color,\n facecolor=color,\n alpha=0.4,\n antialiased=True\n )\n ax.add_patch(rect)\n\n ax.autoscale_view()\n return fig", "def plot_data_stats(data):\n sns.set_style(\"dark\")\n f, ax = plt.subplots(figsize=(6, 15))\n\n ax = sns.barplot(x='tag', y='count', data=tags_freqs)\n\n ax.axes.set_title(\"POS Tags Frequencies\",fontsize=20)\n ax.set_xlabel(\"POS Tags\", fontsize=16)\n ax.set_ylabel(\"Counts\", fontsize=16)\n ax.tick_params(labelsize=12)\n\n plt.show()", "def barplot(data, field_name, field_categories):\n\n\tcategories, counts = np.unique(data[field_name], return_counts=True)\n\n\tfig = plt.figure(figsize=(4, 3))\n\taxes = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height (range 0 to 1)\n\taxes.bar(range(len(categories)), counts, fc=\"gray\") # fc is the face color\n\n\taxes.set_xlabel(\"\")\n\taxes.set_ylabel('Count')\n\taxes.set_title(field_name)\n\tfig.autofmt_xdate(rotation=45)\n\n\taxes.set_xticks(range(len(categories)))\n\taxes.set_xticklabels([field_categories[c] for c in categories]);", "def _bar_example_4(quantity_by_fruit):\n ch = chartify.Chart(x_axis_type=\"categorical\", blank_labels=True)\n ch.set_title(\"Vertical bar plot with labels\")\n ch.set_subtitle(\"Hidden y-axis\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.style.color_palette.reset_palette_order()\n ch.plot.text(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n text_column=\"quantity\",\n color_column=\"fruit\",\n )\n # Adjust the axis range to prevent clipping of the text labels.\n ch.axes.set_yaxis_range(0, 1200)\n ch.axes.hide_yaxis()\n ch.show(_OUTPUT_FORMAT)", "def plotify_bar(title, data):\n\n x, y, z, labels = [], [], [], []\n\n for d in reversed(data[:len(data) - 1]):\n x.append(f\"{d['settimana_del']:%d-%b}\\n{d['settimana_fino_al']:%d-%b}\")\n y.append(d['nuovi_positivi'])\n z.append(\"lightgrey\" if d['giorni'] < 7 else 'green' if d['delta'] <= 0 else 'red' )\n labels.append(human_format(d['nuovi_positivi']) if d['giorni'] == 7 else f\"{human_format(d['nuovi_positivi'])}\\n(in corso)\" )\n\n x_pos = np.arange(len(x))\n\n # create a new figure\n plt.figure()\n\n plt.title(title)\n\n # Create bars with different colors\n plt.bar(x_pos, y, color=z)\n\n # Create names on the x-axis\n plt.xticks(x_pos, x, rotation=40)\n\n\n # Text on the top of each bar\n x_ticks = plt.gca().get_xticks()\n for i in range(len(y)):\n text = data[i]\n plt.text(x = x_ticks[i], y = y[i]+5, s = labels[i], size = 9, horizontalalignment='center', verticalalignment='bottom')\n\n # prettify y values\n current_values = plt.gca().get_yticks()\n plt.gca().set_yticklabels(['{:n}'.format(int(x)) for x in current_values])\n\n # responsive layout\n plt.tight_layout()\n\n\n\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n\n ### Release memory\n # Clear the current axes.\n plt.cla() \n # Clear the current figure.\n plt.clf() \n # Closes all the figure windows.\n plt.close('all') \n # plt.close(fig)\n gc.collect()\n\n return buf", "def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)", "def value_counts_plot(df):\n \n plt.figure(figsize=(15,10))\n \n #get rid of sort_index() to change the graph\n return df.value_counts().sort_index().plot(kind='bar')", "def message_genre_bar_chart(df):\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n return {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }", "def draw_bar(x_index, data_list, xticks, title, x_label, y_label):\n pyplot.bar(x_index, data_list)\n pyplot.xlabel(x_label)\n pyplot.ylabel(y_label)\n pyplot.xticks(x_index, xticks)\n pyplot.title(title)\n pyplot.show()\n pyplot.savefig()", "def plot(*args, **params):\n if len(args) == 1: # only support data for now\n if isinstance(args[0], list):\n bar_2d = Bar2D(data=args[0])\n bar_2d.plot()\n bar_2d.fig.show()\n else:\n bar_2d = Bar2D(**params)\n bar_2d.plot()\n bar_2d.fig.show()", "def _bar_example_3(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"categorical\")\n ch.set_title(\"Horizontal bar plot\")\n ch.set_subtitle(\"Horizontal with color grouping\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)", "def BarOverview(data):\n return dcc.Graph(id=\"BarOverview\", className=\"bar\", figure=dict(\n data=[go.Bar(\n x=data[\"frequencies\"],\n y=data[\"names\"],\n orientation='h',\n marker={\n 'color': '#ff4058'\n },\n )],\n layout=dict(\n title=\"<b>Most common Persons</b>\",\n font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),\n margin=dict(l=10, r=20, t=50, b=30),\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n xaxis=dict(tick0=0, dtick=max(data[\"frequencies\"])),\n yaxis=dict(ticks='outside',\n showgrid=True,\n showline=False,\n showticklabels=False),\n annotations=[dict(xref='paper', yref='y',\n x=0, y=yd,\n font=dict(\n color=\"#000000\",\n size=19\n ),\n text=str(yd),\n showarrow=False) for xd, yd in zip(data[\"frequencies\"], data[\"names\"])]\n )\n ))", "def plot_norm_bar(df, title, figsize=(12,7)):\n fig, ax = plt.subplots(ncols=1, figsize=figsize)\n fig.suptitle(title)\n cat_value_counts = df.fillna('missing').value_counts(normalize=True)\n sns.barplot(y = cat_value_counts.index, x= cat_value_counts.values*100)\n ax.set(xlabel= 'percentage', ylabel=str(df.name))\n \n plt.plot()\n\n return" ]
[ "0.72341543", "0.7051117", "0.7032588", "0.702026", "0.6910697", "0.6906154", "0.68671536", "0.67882943", "0.6782081", "0.6735545", "0.6699668", "0.6685477", "0.66498506", "0.663799", "0.6616477", "0.6604195", "0.6479067", "0.6469803", "0.64694345", "0.64263964", "0.6424513", "0.6419261", "0.640694", "0.64018095", "0.63064665", "0.63012624", "0.6273125", "0.6266547", "0.6259449", "0.6240214" ]
0.7253609
0
Returns the account for the given client. If it does not exist a new one is created and returned
def get_account(self, client: int): try: return self.accounts[client] except KeyError: return self._create_account(client)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]", "def client(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/users/clients/{params['id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n client = result[\"response\"][\"result\"][\"client\"]\n client_obj = FreshbooksClient(\n accounting_systemid=client['accounting_systemid'], \n first_name=client['fname'],\n last_name=client['lname'],\n email=client['email'],\n vat_name=client['vat_name'],\n vat_number=client['vat_number'],\n home_phone=client['home_phone'],\n organization=client['organization'],\n username=client['username']\n )\n return client_obj.__dict__", "def get_account_for_user(cls, user):\n email = user.email()\n assert email\n key = '<%s>' % email\n # Since usually the account already exists, first try getting it\n # without the transaction implied by get_or_insert().\n account = cls.get_by_key_name(key)\n if account is not None:\n return account\n nickname = cls.create_nickname_for_user(user)\n return cls.get_or_insert(key, user=user, email=email, nickname=nickname,\n fresh=True)", "def account(self, account_code):\r\n return acc.Account(self, account_code)", "def get_client_by_id(self, client_id=None):\n # search client_id in list and return the client object\n for client in self.client_list:\n if client_id == client.client_id:\n return client.copy()\n\n # return empty client otherwise\n return Client()", "def get_account():\n\n bus = session_bus()\n\n goa_manager = bus.get_object(GOA_NAME, GOA_PATH)\n\n goa_objects = goa_manager.GetManagedObjects(dbus_interface=OBJECT_MANAGER)\n\n accounts = [\n obj for obj in goa_objects\n if obj != GOA_MANAGER_PATH\n ]\n\n if len(accounts) > 1:\n sys.exit(\"More than one account found.\")\n\n (account_path,) = accounts\n\n return bus.get_object(GOA_NAME, account_path)", "def get_account(self, account):\n \n pass", "def account(self, account_id):\r\n return resources.Account(self, account_id)", "def _GetAccountFromUser(self):\n name = self._GetAccountNameFromUser()\n number = self._GetAccountNumberFromUser()\n # Validate that the number is a number (assumes no alphabet characters in\n # the account number).\n if re.match(\"^[0-9]*$\", number) is None:\n raise ValueError(\"Account number is invalid: %r\" % number)\n return accounts_lib.Account(name, int(number))", "def getaccount(self, vergeaddress):\n return self.proxy.getaccount(vergeaddress)", "def get_account(self):\n return self._account", "def get_account(self):\n return self._account", "def client(self, id):\n return self.query(Client).filter(Client.id == id).one()", "def get_account(self, *args):\n\n account_data = api.get_account(\n *args,\n api_key=self.__creds.api_key_v2)\n\n return en.Account(creds=self.__creds, **account_data)", "def get_account(self, name):\n return self._accounts[name]", "def account(self, account_id: str):\n return get_from_list(self.accounts, \"id\", account_id)", "def get_cached_account(username, registry):\n cache_key = get_account_cache_key(username, registry)\n cache = registry.cache\n cached_account = cache.get(cache_key)\n return cached_account", "def account(cls, key, code):\n\n if key and code:\n utable = current.auth.settings.table_user\n query = (utable.registration_key == cls.keyhash(key, code))\n account = current.db(query).select(utable.ALL, limitby=(0, 1)).first()\n else:\n account = None\n\n return account", "def account_id():\n return client.get_caller_identity()['Account']", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def get_account():\n\n # get user\n user = g.user\n\n # response\n return jsonify({'user_account': UserAccountAdminSchema().dump(user)}), 200", "def __get_account(self, address):\n\t\tfor acct in self.wallet:\n\t\t\tif acct[\"address\"] == address:\n\t\t\t\treturn acct\n\t\traise ValueError(\"The given address does not exist in the bunkr-wallet\")", "def get_account_by_name(self, account_name):\n accounts = self.service_old.management().accounts().list().execute()\n\n account = None\n if accounts.get('items'):\n account = next(acnt for acnt in accounts.get('items') if acnt[\"name\"] == account_name)\n\n if account is None:\n log_msg = \"The account named \" + account_name + \" does not exist!\"\n print(log_msg)\n\n return account", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def get_client_id():\n\n return str(get_account().Get(GOA_ACCOUNT_OAUTH2, 'ClientId',\n dbus_interface=PROPERTIES))", "def get_or_create(account, account_name):\n if account.account == account_name:\n return account\n return realization.get_or_create(account, account_name)", "def account(self, account_id):\r\n return Account(self, account_id)", "def account(self):\n return Account(self)" ]
[ "0.66133344", "0.6345957", "0.6296915", "0.62183553", "0.6195077", "0.61447585", "0.6143067", "0.61124474", "0.6101005", "0.6082482", "0.6080354", "0.60321856", "0.60321856", "0.60195756", "0.5999535", "0.59913605", "0.5953246", "0.5937958", "0.5926711", "0.59228164", "0.58934146", "0.5786306", "0.575765", "0.57551455", "0.573389", "0.56322896", "0.5619452", "0.55649114", "0.55547", "0.55461085" ]
0.9063625
0
Calculate Linke turbidity using Kasten pyrheliometric formula. Note that broadband aerosol optical depth (AOD) can be approximated by AOD measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an alternate approximation using AOD measured at 380 nm and 500 nm. Based on original implementation by Armel Oumbe.
def kasten96_lt(airmass_absolute, precipitable_water, aod_bb): # "From numerically integrated spectral simulations done with Modtran # (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth # of a clean and dry atmospshere (fictitious atmosphere that comprises only # the effects of Rayleigh scattering and absorption by the atmosphere gases # other than the water vapor) the following expression" # - P. Ineichen (2008) delta_cda = -0.101 + 0.235 * airmass_absolute ** (-0.16) # "and the broadband water vapor optical depth where pwat is the integrated # precipitable water vapor content of the atmosphere expressed in cm and am # the optical air mass. The precision of these fits is better than 1% when # compared with Modtran simulations in the range 1 < am < 5 and # 0 < pwat < 5 cm at sea level" - P. Ineichen (2008) delta_w = 0.112 * airmass_absolute ** (-0.55) * precipitable_water ** 0.34 # broadband AOD delta_a = aod_bb # "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke # turbidity at am = 2 can be written. The extension of the Linke turbidity # coefficient to other values of air mass was published by Ineichen and # Perez (2002)" - P. Ineichen (2008) lt = -(9.4 + 0.9 * airmass_absolute) * np.log( np.exp(-airmass_absolute * (delta_cda + delta_w + delta_a)) ) / airmass_absolute # filter out of extrapolated values return lt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_lwdown(tairK, rh):\n zeroC = 273.15\n\n sat_vapress = 611.2 * np.exp(17.67 * ((tairK - zeroC) / (tairK - 29.65)))\n vapress = np.maximum(5.0, rh) / 100. * sat_vapress\n lw_down = 2.648 * tairK + 0.0346 * vapress - 474.0\n\n return lw_down", "def derive_RiekeLebofsky(wavelength):\n filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', \n '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', \n '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]']\n #wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.25, 1.635, 2.2, \n # 3.77, 4.68, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n # 11.5, 12.0, 12.5, 13.0])\n \n # Wavelengths from Nishiyama+09 plot of RL+85 law...slightly different than standard, \n # drop N filter\n wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.17, 1.57, 2.12, \n 3.40, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n 11.5, 12.0, 12.5, 13.0])\n A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112,\n 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083,\n 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])\n # Want to change this from A/Av to A/AK\n k_ind = np.where(np.array(filters) == 'K')\n Ak_Av = A_Av[k_ind]\n Av_Ak = 1.0 / Ak_Av\n\n A_Ak = A_Av * Av_Ak\n \n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_Ak, k=3, s=0)\n A_Ak_at_wave = interpolate.splev(wavelength, spline_interp)\n\n return A_Ak_at_wave", "def getTauLos(losdens, kap):\n rhocell = losdens['valcell']\n nzs =len(rhocell)\n nz = nzs + 1\n zlos = losdens['zlos'] # cell\n zlosi = losdens['zlosi'] # wall\n\n tau, dtaustg, taustg = getTauz(zlosi, zlos, rhocell, kap)\n\n\n# dzlos = zlosi[1:] - zlosi[:-1] # zlos is in the same direction as tau\n# dtaustg = kap * rhocell * dzlos\n\n# tau = np.zeros(nz, dtype=np.float64)\n# for iz in range(nzs):\n# tau[iz+1] = sum(dtaustg[:iz+1])\n\n# taustg = 0.5*(tau[1:] + tau[:-1])\n\n return tau, dtaustg, taustg", "def calc_h_lat(dry_bulb_C, humidity_ratio_out_kgperkg):\n\n h_kJ_kg = humidity_ratio_out_kgperkg * (dry_bulb_C * CPW_kJ_kgC + h_we_kJ_kg)\n\n return h_kJ_kg", "def planckwavelen(wavel,Temp):\n wavel=wavel*1.e-6 #convert to meters\n c1=2.*h*c**2.\n c2=h*c/kb\n Blambda=1.e-6*c1/(wavel**5.*(np.exp(c2/(wavel*Temp)) -1))\n return Blambda", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def k_Wa92(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.31 * U2) * (660 / Sc) ** 0.5\n\n return k", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def test_klauder(self):\n ideal = np.array([0.14899879, -0.16633309, -0.42806931, 0.16605633,\n 0.70769336, 0.16605633, -0.42806931, -0.16633309])\n actual = misc.klauder(8)\n np.testing.assert_allclose(ideal, actual, atol=1e-8, rtol=1e-8)", "def calc_TEB(Imap_name='HFI_SkyMap_353_2048_R2.02_full.fits',\n Pmap_name='HFI_SkyMap_353_2048_R2.02_full.fits',\n nus=None, fwhm=0.063, nside=16, lmax=100,\n lmaps_only=False, filename=None):\n\n # read from file if it's there\n if filename is None:\n filename = 'bispectrum_lmax{}'.format(lmax)\n if nus is not None:\n filename += '_{}-{}-{}GHz.npy'.format(nus[0],nus[1],nus[1])\n else:\n filename += '_{}'.format(Imap_name[-5])\n if Imap_name != Pmap_name:\n filename += '_{}.npy'.format(Pmap_name[-5])\n else:\n filename += '.npy'\n print 'looking for {} ...'.format(filename)\n if os.path.exists(filename) and not lmaps_only:\n bispectrum = np.load(filename, 'r')\n return bispectrum\n\n # compute it, if the file doesn't exist\n if nus is not None:\n Imap_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nus[0])\n Pmap_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nus[1])\n title = '$I_{%i} P^2_{%i}$ (equilateral)' % (nus[0],nus[1])\n \n Imap = prepare_map( Imap_name, field=0,\n nside_out=nside, fwhm=fwhm )\n Tlm = hp.map2alm( Imap, lmax=lmax )\n\n Qmap, Umap = prepare_map( Pmap_name, field=(1,2),\n nside_out=nside, fwhm=fwhm )\n \n \n Elm,Blm = hp.map2alm_spin( (Qmap,Umap), 2, lmax=lmax )\n\n if lmax is None:\n lmax = hp.sphtfunc.Alm.getlmax(len(Tlm))\n ls, ms = hp.sphtfunc.Alm.getlm(lmax,np.arange(len(Tlm)))\n lmin = ls.min()\n mapsize = len(Imap)\n pixelsize = hp.pixelfunc.nside2pixarea(nside)\n \n Ylm = calc_Ylm(Imap, ls, ms)\n\n\n #return Ylm, Tlm, ls, ms\n print 'calculating Tl,El,Bl ...'\n \n Tl = sum_over_m(Tlm, Ylm, ls,\n lmax=lmax, lmin=lmin, mapsize=mapsize)\n El = sum_over_m(Elm, Ylm, ls,\n lmax=lmax, lmin=lmin, mapsize=mapsize)\n Bl = sum_over_m(Blm, Ylm, ls,\n lmax=lmax, lmin=lmin, mapsize=mapsize)\n\n if lmaps_only:\n return Tl,El,Bl\n \n hs = get_hs(lmin=lmin, lmax=lmax)\n\n print 'calculating bispectrum ...'\n bispectrum = calc_bispectrum(Tl, El, Bl, hs,\n pixelsize,\n lmax=lmax, lmin=lmin,\n mapsize=mapsize)\n clean_bispectrum_of_naninf(bispectrum, hs, inplace=True)\n np.save(filename, bispectrum)\n return bispectrum", "def PlankFunction(wavelen,T=5778.):\n\n c1=1.191042E8\n c2=1.4387752E4\n L=c1/(wavelen**5*(np.exp(c2/(wavelen*T))-1))\n return L", "def k_Li86(wind_ms, temp_C):\n from numpy import zeros_like\n\n U = wind_ms\n T = temp_C\n\n Sc = schmidt_number(T)\n k = zeros_like(temp_C)\n\n i1 = U <= 3.6\n i2 = (U > 3.6) & (U < 13.0)\n i3 = U >= 13.0\n\n k[i1] = (0.17 * U[i1]) * (Sc[i1] / 600) ** (-2.0 / 3.0)\n k[i2] = ((U[i2] - 3.4) * 2.8) * (600 / Sc[i2]) ** 0.5\n k[i3] = ((U[i3] - 8.4) * 5.9) * (600 / Sc[i3]) ** 0.5\n\n return k", "def wind_chill(T_a, v):\r\n return 13.12 + 0.6215*(T_a) - 11.37*(v)**0.16 + 0.3965*(T_a)*(v)**0.16", "def moonlongitude(time):\n B0 = 481267.8809\n C0 = 218.3162\n # fmt: off\n A = np.array([62888.e-4, 12740.e-4, 6583.e-4, 2136.e-4, 1851.e-4, \\\n 1144.e-4, 588.e-4, 571.e-4, 533.e-4, 458.e-4, 409.e-4, \\\n 347.e-4, 304.e-4, 154.e-4, 125.e-4, 110.e-4, 107.e-4, \\\n 100.e-4, 85.e-4, 79.e-4, 68.e-4, 52.e-4, 50.e-4, 40.e-4, \\\n 40.e-4, 40.e-4, 38.e-4, 37.e-4, 28.e-4, 27.e-4, 26.e-4, \\\n 24.e-4, 23.e-4, 22.e-4, 21.e-4, 21.e-4, 21.e-4, 18.e-4, \\\n 16.e-4, 12.e-4, 11.e-4, 9.e-4, 8.e-4, 7.e-4, 7.e-4, \\\n 7.e-4, 7.e-4, 6.e-4, 6.e-4, 5.e-4, 5.e-4, 5.e-4, \\\n 4.e-4, 4.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, \\\n 3.e-4, 3.e-4])\n B = np.array([477198.868, 413335.35, 890534.22, 954397.74, \\\n 35999.05, 966404.0, 63863.5, 377336.3, \\\n 1367733.1, 854535.2, 441199.8, 445267.1, \\\n 513197.9, 75870, 1443603, 489205, 1303870, \\\n 1431597, 826671, 449334, 926533, 31932, \\\n 481266, 1331734, 1844932, 133, 1781068, \\\n 541062, 1934, 918399, 1379739, 99863, \\\n 922466, 818536, 990397, 71998, 341337, \\\n 401329, 1856938, 1267871, 1920802, 858602, \\\n 1403732, 790672, 405201, 485333, 27864, \\\n 111869, 2258267, 1908795, 1745069, 509131, \\\n 39871, 12006, 958465, 381404, 349472, \\\n 1808933, 549197, 4067, 2322131.])\n C = np.array([44.963, 10.74, 145.70, 179.93, 87.53, 276.5, \\\n 124.2, 13.2, 280.7, 148.2, 47.4, 27.9, 222.5, \\\n 41, 52, 142, 246, 315, 111, 188, \\\n 323, 107, 205, 283, 56, 29, 21, \\\n 259, 145, 182, 17, 122, 163, 151, \\\n 357, 85, 16, 274, 152, 249, 186, \\\n 129, 98, 114, 50, 186, 127, 38, \\\n 156, 90, 24, 242, 223, 187, 340, \\\n 354, 337, 58, 220, 70, 191])\n # fmt: on\n RAD = 0.0174532925199433\n tempb = (B * time + C) * RAD\n amp = A * np.cos(tempb)\n moonlon = np.sum(amp)\n moonlon = (moonlon + B0 * time + C0) * RAD\n return moonlon", "def ReadTinker():\n # Total Potential Energy : {f} Kcal/mole\n total_line = \" Total Potential Energy :\"\n with open('LICHM_TINKEREnergy_0.log') as f:\n for line in f:\n if line.startswith(total_line):\n # print(line)\n TinkE = re.findall(r'\\-\\d+\\.*\\d*', line)\n TinkE = float(TinkE[0])\n # if AMOEBA == True:\n # if line.startswith(\"Polarization\"):\n # Epol = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Epol = float(Epol[0])\n # elif line.startswith(\"Implicit Solvation\")\n # Esolv = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Esolv = float(Esolv[0])\n f.close()\n # if AMOEBA == True:\n # TINKERPolForces = EPol + ESolv\n # TinkE += TINKERPolForces\n #\n TinkE *= kcal2ev\n return TinkE", "def earth_tide(theta, lamda, gtime):\n\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist # bpos common block\n global h, k, l # love common block\n h = [0.6114, 0.2891, 0.175]\n k = [0.304, 0.09421, 0.043]\n l = [0.0832, 0.0145, 0.0103]\n\n global azt, azs # azimut common block\n global etmut # tdiff common block\n global moon # sunny common block\n moon = 0\n # hardwire these - you can only send it ONE droptime\n deltat = 1\n NPT = 1\n\n temp_time = num2date(gtime)\n\n YY = temp_time.year\n MO = temp_time.month\n DD = temp_time.day\n HH = temp_time.hour\n MM = temp_time.minute\n SS = temp_time.second\n # Initialize variables\n irl = 1\n iflag = 0\n ntotl = 1\n iget = [0, 0, 0, 0, 0, 0, 0] # ' !!!\n ispc = [0, 0, 0, 0] # ' !!!\n ntw = [1, 0, 0] # ' !!!\n ioptn = 't'\n ielement = 0\n # \tdata statements for input and output unit numbers (on terminal I/O)\n inun = 5\n ioun = 6\n nptpb = 6\n\n yr1 = YY - 1900\n day1 = date2num(datetime(YY, MO, DD))\n # \tfind times in hours from 0 hr, 1 jan 1900\n # matlab:\n ts = (\n SS / 3600\n + MM / 60\n + HH\n + 24 * (day1 - 1)\n + 8760 * yr1\n + 24 * np.fix((yr1 - 1) / 4)\n )\n # python:\n dj = date_to_julian_day(datetime(YY, MO, DD))\n djref = date_to_julian_day(datetime(1899, 12, 31, 0, 0, 0))\n delta_dj = (\n dj - djref\n ) # difference in days from current date (0hr) to 0hr, 1 jan 1900\n delta_djhr = float(delta_dj) * 24.0 + HH - 12.0 + MM / 60.0 + SS / 3600.0\n te = ts + (NPT - 1) * deltat / 3600\n d = deltat / 3600\n # terms=(te-ts)/d + 1\n terms = NPT\n\n # done asking questions - begin execution\n i = 1\n tt = ts\n sph(theta, lamda, 0)\n etmut = 41.184 + yr1 - 70\n # matlab:\n # t = (tt+12 + (etmut/3600))/876600\n t = (delta_djhr + etmut / 3600) / 876600\n # t is ephemeris time in julian centuries from 12 hr 0 jan 1900\n ephem(t)\n\n # calculate normalized gravity tides\n [grav, tilt, strain, gdc] = elastd(ntw)\n\n gravtide = 1.0e5 * grav\n # convert m/s² to mgal: 1m/s² = 100 gal = 100 000 mgal\n\n iflag = 1\n\n iterms = np.fix(terms)\n i = 1\n return gravtide", "def derive_Hosek18b(wavelength):\n # Extinction law definition\n wave = np.array([0.8059, 0.962, 1.25, 1.53, 2.14, 3.545])\n A_AKs = np.array([7.943, 5.715, 3.142, 2.04, 1.0, 0.50])\n \n # Following Hosek+18, Interpolate over the curve with cubic spline interpolation\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # This curve already assumes A_Ks = 1.0, so we can go straight to\n # output \n return A_AKs_at_wave", "def halflife(self, humidity: _VectorisedFloat, inside_temp: _VectorisedFloat) -> _VectorisedFloat:\n # Updated to use the formula from Dabish et al. with correction https://doi.org/10.1080/02786826.2020.1829536\n # with a maximum at hl = 6.43 (compensate for the negative decay values in the paper). \n # Note that humidity is in percentage and inside_temp in °C.\n # factor np.log(2) -> decay rate to half-life; factor 60 -> minutes to hours\n hl_calc = ((np.log(2)/((0.16030 + 0.04018*(((inside_temp-273.15)-20.615)/10.585)\n +0.02176*(((humidity*100)-45.235)/28.665)\n -0.14369\n -0.02636*((inside_temp-273.15)-20.615)/10.585)))/60)\n \n return np.where(hl_calc <= 0, 6.43, np.minimum(6.43, hl_calc))", "def kth_func(Th, ThS, lbd, ksat):\n if Th < 0.0:\n # rwarn(\"water content < 0 IN kth_func\")\n Th = 0.0\n kth = ksat * (Th / ThS) ** (3 + (2 / lbd))\n\n return kth", "def calc_h_sen(dry_bulb_C):\n\n h_kJ_kg = dry_bulb_C * CPA_kJ_kgC\n\n return h_kJ_kg", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def stockdon2006(H,L,B):\n \n # Make sure parameters are double\n H = np.double(H)\n L = np.double(L)\n B = np.double(B)\n \n # Compute incident swash (equation 11) \n incSwash = 1.1 / 2 * 0.75 * B * (H*L)**0.5\n \n # Infragravity swash (Equation 12)\n igSwash = 1.1 / 2 * 0.06 * (H*L)**0.5\n \n # Compute R2% (Equation 19)\n setup = 1.1 * 0.35 * B * ((H * L)**0.5)\n swash = 1.1 / 2.0 * (H*L * (0.563 * B**2 + 0.004))**0.5 \n r2 = setup + swash\n \n return r2,setup,incSwash,igSwash", "def planck_w(lam, T):\n return ((2*h*c**2)/(lam**5))*(1./(np.exp((h*c)/(lam*k*T))-1))", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def Tloken(self, x):\n return 11.2 * (self.r500*0.7)**2. * ( 1. + 0.75*x)**(-1.6)", "def B_func(Th33, Th1500):\n\n D = ln(Th33) - ln(Th1500)\n B = (ln(1500) - ln(33)) / D\n\n def lbd_func(C):\n \"\"\"return the slope of logarithmic tension-moisture curve\"\"\"\n if C == 0:\n return 0.0\n lbd = 1 / C\n return lbd\n\n return lbd_func(B)", "def planckian(temp, wavelength):\n if wavelength==560: return 100.0\n if temp<60: temp=60 # For simplicity, in very low temperature\n num = wavelength**(-5)\n try:\n v=num / (math.exp(0.0143877687750393/(wavelength*(10**(-9))*temp)) - 1)\n except:\n print(temp)\n print(wavelength)\n raise ValueError\n v2=(560.0**(-5)) / (math.exp(0.0143877687750393/(560.0*(10**(-9))*temp)) - 1)\n return v*100.0/v2", "def kervella(magB=None, magV=None, magK=None):\n if magB is None or np.isnan(magB) or magB > 49:\n magB = np.nan\n if magV is None or np.isnan(magV) or magV > 49:\n magV = np.nan\n if magK is None or np.isnan(magK) or magK > 49:\n magK = np.nan\n const1 = np.array([0.0755, 0.0535])\n const2 = np.array([0.5170, 0.5159])\n mag = np.array([magV, magB])\n vals = 10**(const1*(mag-magK)+const2-0.2*magK)\n diameter = {}\n if not np.isnan(vals[0]):\n diameter['V'] = vals[0]*u.mas\n if not np.isnan(vals[1]):\n diameter['B'] = vals[1]*u.mas\n return diameter", "def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n # g = gbar * n * h\n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n h = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4\n ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10\n\t\t\n hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))\n htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10\n \n # Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms\n if htau < 5.0:\n htau = 5.0\n\n dn = (ninf-n)/ntau\n dh = (hinf-h)/htau\n \n return [dn,dh]", "async def wetbulb(self, temp, humidity, pressure):\n t = float(temp)\n rh = float(humidity)\n p = float(pressure)\n\n # Variables\n edifference = 1\n twguess = 0\n previoussign = 1\n incr = 10\n es = 6.112 * math.exp(17.67 * t / (t + 243.5))\n e2 = es * (rh / 100)\n\n while (abs(edifference) > 0.005):\n ewguess = 6.112 * math.exp((17.67 * twguess) / (twguess + 243.5))\n eguess = ewguess - p * (t - twguess) * 0.00066 * (1 + (0.00115 * twguess))\n edifference = e2 - eguess\n if edifference == 0:\n break\n\n if edifference < 0:\n cursign = -1\n if (cursign != previoussign):\n previoussign = cursign\n incr = incr / 10\n else:\n incr = incr\n else:\n cursign = 1\n if (cursign != previoussign):\n previoussign = cursign\n incr = incr / 10\n else:\n incr = incr\n\n twguess = twguess + incr * previoussign\n\n return await self.temperature(twguess)" ]
[ "0.6074756", "0.60489684", "0.6021683", "0.58620036", "0.5819184", "0.5817391", "0.5701168", "0.56542647", "0.5622918", "0.5620226", "0.5618999", "0.55917037", "0.55595964", "0.5550017", "0.5537136", "0.5530169", "0.5526478", "0.5487843", "0.5477624", "0.5460396", "0.54503703", "0.54500014", "0.54438895", "0.54244804", "0.54152185", "0.5406566", "0.54061073", "0.53903854", "0.5387455", "0.5368951" ]
0.64853966
0
input h (meters) and the coefficients for the linear profile for the free troposphere theta (ft_intercept (K) and slope gamma (K/m)) return the free tropospher theta at height h
def theta_ft(h,ft_intercept,gamma): theta_top = ft_intercept + h*gamma return theta_top
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_theta_surf_hex_h(theta_hs_in_h: float, theta_hs_out_h: float, v_hs: float) -> float:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n # sensible heating capacity of heat source for heating, W\n q_hs_h = (theta_hs_out_h - theta_hs_in_h) * c * rho * v_hs / 3600\n\n # sensible heat transfer coefficient on the surface of the heat exchanger of the internal unit, W/m2K\n alpha_c_hex_h = get_alpha_c_hex_h(v_hs)\n\n # effective area for heat exchange of the surface area of heat exchanger or the internal unit, m2\n a_e_hex = get_a_e_hex()\n\n return (theta_hs_in_h + theta_hs_out_h) / 2 + q_hs_h / (a_e_hex * alpha_c_hex_h)", "def trapezoidal(f, x0, h):\n return (h/2.0 * (f(x0)+f(x0+h)))", "def get_theta_surf_hex_test_h(theta_hs_in_h: float, q_hs_h: float, v_hs: float) -> float:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n theta_hs_out_h = theta_hs_in_h + q_hs_h / (c * rho * v_hs) * 3600\n\n # sensible heat transfer coefficient on the surface of the heat exchanger of the internal unit, W/m2K\n alpha_c_hex_h = get_alpha_c_hex_h(v_hs)\n\n # effective area for heat exchange of the surface area of heat exchanger or the internal unit, m2\n a_e_hex = get_a_e_hex()\n\n return (theta_hs_in_h + theta_hs_out_h) / 2 + q_hs_h / (a_e_hex * alpha_c_hex_h)", "def two_theta_hkl(self, H, K, L):\n return self.unit_cell.two_theta((H, K, L), self.wavelength, deg=True)", "def curve_with_hillcoef(ph, pka, hillcoef):\n# return hillcoef * ph - pka\n return 1/(1+10**(hillcoef*(pka-ph)))", "def get_metrics(H):\n theta = np.arctan2(H[0,1], H[0,0])\n scale = H[0,0] / np.cos(theta)\n tx = H[0,2]\n ty = H[1,2]\t\n return tx,ty,theta", "def phitheta(loc):\n x = loc[0]\n y = loc[1]\n z = loc[2]\n r = sqrt(x**2 + y**2 + z**2)\n theta = arcsin(z/r)\n phi = arctan2(y,x)\n return(phi, theta)", "def _step(self, t, y, h):\n # We must use solvers / implicit form\n f_pn1 = lambda a_n1: (y + h*self.v + (h**2 / 2.0) * \\\n ((1.0 - 2.*self.beta)*self.a + 2.*self.beta*a_n1))\n f_vn1 = lambda a_n1: (self.v + h*((1.0-self.gamma)*self.a + self.gamma*a_n1))\n def f_an1(a_n1):\n f_n1 = self.f(t+h,f_pn1(a_n1),f_vn1(a_n1))\n f_n = self.f(t,y,self.v,)\n return a_n1 - ((1.0+self.alpha)*f_n1 - self.alpha*f_n)\n\n a = self.solver(f_an1, self.a)\n y = f_pn1(a) # Calculate and store new variables. \n self.v = f_vn1(a)\n self.a = a\n return t+h, y", "def __s_polynomial(g, h):\n\n deg_g = __multidegree(g)\n deg_h = __multidegree(h)\n max_deg = map(max, zip(deg_g, deg_h))\n R = g.parent()\n\n # Builds a polynomial with the variables raised to max_deg, in order\n vars = map(R, R.variable_names())\n x_pow_max_deg = reduce(operator.mul, [x ** d for (d, x) in zip(max_deg, vars)], R(1))\n\n quo_g, _ = x_pow_max_deg.quo_rem(g.lt())\n quo_h, _ = x_pow_max_deg.quo_rem(h.lt())\n return quo_g * g - quo_h * h", "def ab2rhotheta(a, b):\n \"\"\" also : y - ax - b = 0 \"\"\"\n \"\"\" y*sin(theta) + x*cos(theta) - rho = 0 \"\"\"\n #print(\"a: %f b: %f\" % (a, b))\n theta = math.atan(a) + math.pi/2.0\n rho = b*math.sin(theta)\n #print(\"a: %f b: %f rho: %f theta: %f\" % (a, b, rho, theta))\n return (rho, theta)", "def alkTphosfac(hguess,ks):\n #mick - first estimate of contribution from phosphate\n #mick based on Dickson and Goyet\n h3po4g,h2po4g,hpo4g,po4g = phosfracs(hguess,ks)\n return h3po4g-hpo4g-2*po4g", "def get_desired_heading(self, h, p):\n heading_vector = [self.alpha*h[0] + self.beta*p[0],\n self.alpha*h[1] + self.beta*p[1]]\n a = [(heading_vector[0]/linalg.norm(heading_vector)),\n (heading_vector[1]/linalg.norm(heading_vector))]\n return a", "def H1(self,kx,ky):\n return -2.*self.t2*np.cos(self.phi)*(np.cos(3.*kx/2.+np.sqrt(3.)*ky/2.)+np.cos(-3.*kx/2.+np.sqrt(3.)*ky/2.)+np.cos(-np.sqrt(3.)*ky))", "def get_theta_hs_out_max_h(\n theta_d_hs_in: np.ndarray, q_hs_max_h: np.ndarray, v_d_supply: np.ndarray) -> np.ndarray:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n return np.minimum(theta_d_hs_in + q_hs_max_h / (c * rho * np.sum(v_d_supply, axis=0)) * 10 ** 6, 45.0)", "def cost(self, h_theta, y):\n \n return -y*np.log(h_theta) - (1. - y)*np.log(1. - h_theta)", "def mort(self, h):\n return(self.mu_bg +\\\n (1.0 - self.mu_bg) * self.pr_P * self.p_att *\\\n (1 - h**self.ap))", "def forward(self, h, r, t):\n h_e, r_e, t_e = self.embed(h, r, t)\n\n norm_h_e = F.normalize(h_e, p=2, dim=-1)\n norm_r_e = F.normalize(r_e, p=2, dim=-1)\n norm_t_e = F.normalize(t_e, p=2, dim=-1)\n\n r_theta = self.theta[r]\n\n if self.l1_flag:\n return r_theta*torch.norm(norm_h_e + norm_r_e - norm_t_e, p=1, dim=-1)\n\n return r_theta*torch.norm(norm_h_e + norm_r_e - norm_t_e, p=2, dim=-1)", "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n # Using u = ubar to avoid algeabric loops\n \n y = self.plant.h( x , self.plant.ubar , t )\n \n return y", "def lambert_eqarea(khi,phi):\n r = 2 * np.sin(khi/2.)\n th = phi\n return r, th", "def get_h0(self, t):\n return self.h0 * np.sin(2 * np.pi * t / self.Pmod + self.Pmod_phi)", "def Psi(l,m,theta,phi):\n if numpy.isscalar(theta): \n theta=numpy.array([[theta]])\n phi=numpy.array([[phi]])\n Psilm_th=numpy.zeros(theta.shape,dtype=complex)\n Psilm_ph=numpy.zeros(theta.shape,dtype=complex)\n x=numpy.cos(theta)\n thetaNonZerosIdx=numpy.where(theta!=0.0)\n if len(thetaNonZerosIdx[0]) != 0:\n Ylm=scipy.special.sph_harm(m,l,phi[thetaNonZerosIdx],theta[thetaNonZerosIdx])\n #Compute derivative of sphrHarm function w.r.t. theta:\n if l>=numpy.abs(m):\n Plmpo=legendreLM(l,m+1,x[thetaNonZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/float(math.factorial(l+m)))*Plmpo*numpy.exp(1j*m*phi[thetaNonZerosIdx])\n #YlmPmpo=sqrt((l-m)*(l+m+1))*spharm(l,m+1,theta,phi)*exp(-i*phi) %Should be equivalent to above formula.\n dtYlm=+YlmPmpo+m*x[thetaNonZerosIdx]*Ylm/numpy.sin(theta[thetaNonZerosIdx])\n # thetZerInd=[find(theta==0); find(theta==pi)]\n # dtYlm(thetZerInd)=0; %This is a fudge to remove NaNs\n else:\n dtYlm=numpy.zeros(theta[thetaNonZerosIdx].shape,dtype=complex)\n\n #dtYlm=spharmDtheta(l,m,theta,phi)\n\n Psilm_ph[thetaNonZerosIdx]=+1j*m/numpy.sin(theta[thetaNonZerosIdx])*Ylm\n Psilm_th[thetaNonZerosIdx]=+dtYlm\n #Ref: http://mathworld.wolfram.com/VectorSphericalHarmonic.html\n\n thetaZerosIdx=numpy.where(theta==0.0)\n if len(thetaZerosIdx[0]) != 0:\n if numpy.abs(m)==1:\n Yl1B=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*PBl1(l,m)*numpy.exp(1j*m*phi[thetaZerosIdx])\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+m*Yl1B\n Psilm_ph[thetaZerosIdx]=+1j*m*Yl1B\n Psilm_th[thetaZerosIdx]=+dtYlm\n else:\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+0\n Psilm_ph[thetaZerosIdx]=0\n Psilm_th[thetaZerosIdx]=+dtYlm\n return Psilm_th,Psilm_ph", "def phireturn(xhat0, tof):\n\t\n\t\tstoptime = tof\n\t\tnumpoints = 2\n\t\t#Integration time array:\n\t\tt = [stoptime * float(i) / (numpoints - 1) for i in range(numpoints)]\n\t\t\n\t\txsol = twomode.intfull(xhat0, t, abserror=1.0e-14, relerror=1.0e-12)\n\t\t#Phase of the first mode is the slice phase\n\t\tphi = np.angle(xsol[1,0] + 1j*xsol[1,1]) \t\n\t\t\n\t\treturn -phi", "def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d", "def calcTrh(N, rh, m, G, gamma=0.02):\n return 0.138*N**0.5*rh**1.5/(m**0.5*np.log(gamma*N)*G**0.5)", "def Hstep_cost_function(H): \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost", "def ISA_trop(h):\n\tT = 288.15 - 0.0065*h;\n\tp = 101325*(T/288.15)**(-g/(-0.0065*287));\n\trho = 1.225*(T/288.15)**(-g/(-0.0065*287) - 1);\n\ta = np.sqrt(1.4*287*T);\n\treturn T, p, rho, a;", "def get_hessian(phi, pred, t, dot_product, reg= 1, regression= \"logistic\"):\n R = np.eye(pred.shape[0])\n if regression == \"logistic\":\n for i in range(pred.shape[0]):\n R[i,i] = pred[i,0] * (1- pred[i,0])\n elif regression == \"probit\":\n for i in range(pred.shape[0]):\n y_n = pred[i,0]\n t_n = t[i,0] \n dotp = dot_product[i, 0]\n pdf = norm.pdf(dotp)\n\n term1 = 1/ (y_n * (1- y_n) + TOLERANCE)\n term2 = (y_n - t_n)/(y_n**2 * (1- y_n) + TOLERANCE)\n term3 = (y_n - t_n)/((1- y_n)**2 * y_n + TOLERANCE)\n term4 = (y_n - t_n)* dotp/(y_n * (1- y_n) * pdf + TOLERANCE)\n\n R[i,i] = (term1 - term2 + term3 - term4)*(pdf**2)\n\n # Add regularization\t\t\t\n hessian = np.matmul(np.matmul(phi.T, R), phi) + np.eye(phi.shape[1])/reg\n return hessian", "def sph_harm_diff_theta(m, l, theta, phi):\n check_degree_and_order(m, l)\n return (m * _sph_harm(m, l, theta, phi) / np.tan(theta) +\n np.sqrt( (l - m) * (l + m + 1) ) * np.exp( -1j*phi ) *\n _sph_harm(m+1, l, theta, phi))", "def qFelder(h):\n\treturn (0.92 + 0.153 * h/1.01) * math.sqrt(9.8 * (2/3.0 * h)**3)", "def decode(self, h):\n return self.tanh(self.linearD(h))" ]
[ "0.5862929", "0.5850773", "0.57733494", "0.5686483", "0.56282175", "0.5592743", "0.55332625", "0.55067706", "0.55043215", "0.5491298", "0.5484844", "0.5470747", "0.5449616", "0.5431975", "0.5429191", "0.5417069", "0.54054326", "0.5379685", "0.53791565", "0.5375629", "0.5361891", "0.53573406", "0.5336242", "0.532041", "0.5293915", "0.52920234", "0.5254426", "0.5241588", "0.5235897", "0.5231546" ]
0.75179046
0
the_vars[0]= thetabar the_vars[1] = h the_vars[2] = qv surface flux from drag law with subsidence and diagnosed deltheta
def dmixed_vars(the_vars,tstep,coeffs): deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0] F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux Fint = -coeffs.k*F0 #entrainment heat flux if coeffs.use_NT: # use NT parameterization by calculating we using function went = calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0) # Nicholls-Turton parameterization else: # use simple we parameterization went = -Fint/deltheta #simple entrainment parameterization # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*coeffs.ft_qv*1000 # convert qt_ft to g kg^-1 Fqvent = -went*( coeffs.ft_qv - the_vars[2]) wsubs = -coeffs.D*the_vars[1] rho=1. cp=1004. derivs=np.empty_like(the_vars) # higher delta_Fr from drier air at mixed-layer top...hence cloudy air results in less radiative cooling derivs[0]=(F0 - Fint)/(the_vars[1]*rho) - delta_Fr/1004./the_vars[1] derivs[1] = went + wsubs derivs[2] = (Fqv0 - Fqvent)/the_vars[1] return derivs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 # clock2 variable\n th = 11.5 # The reactor temperature\n\n # You need vtol here, because of floating point error.\n loc0_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(vr),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc0_FT = False\n\n loc1_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v1),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc1_FT = False\n\n loc2_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v2),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc2_FT = False\n\n # Location 3 is reactor shutdown\n loc3_FT = False\n\n # Location 0\n def location0(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thM and x >= T1:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 1, 0, x, y, th, None, True, None, None, curr_time\n elif th == thM and y >= T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 2, 0, x, y, th, None, None, True, None, curr_time\n elif th == thM and x < T1 and y < T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 3, 0, x, y, th, None, None, None, True, curr_time\n # The invariant\n elif th <= thM:\n if not loc0_FT:\n x = loc0_ode_x.compute(vals, curr_time-prev_time)\n y = loc0_ode_y.compute(vals, curr_time-prev_time)\n th = loc0_ode_th.compute(vals, curr_time-prev_time)\n loc0_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thM) > loc0_ode_th.vtol:\n deltath = loc0_ode_th.delta(vals, quanta=(thM-th))\n else:\n th = thM\n deltath = 0\n return 0, deltath, x, y, th, False, None, None, None, curr_time\n else:\n # print('th:', th)\n raise RuntimeError('Reached unreachable branch'\n ' in location 0')\n\n def location1(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n x = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc1_FT:\n x = loc1_ode_x.compute(vals, curr_time-prev_time)\n y = loc1_ode_y.compute(vals, curr_time-prev_time)\n th = loc1_ode_th.compute(vals, curr_time-prev_time)\n loc1_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc1_ode_th.vtol:\n deltath = loc1_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 1, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 1')\n\n def location2(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n y = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc2_FT:\n x = loc2_ode_x.compute(vals, curr_time-prev_time)\n y = loc2_ode_y.compute(vals, curr_time-prev_time)\n th = loc2_ode_th.compute(vals, curr_time-prev_time)\n loc2_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc2_ode_th.vtol:\n deltath = loc2_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 2, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 2')\n\n def location3(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n global step\n # print('total steps: ', step)\n # Done\n print(time.time()-start)\n sys.exit(1)\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location0,\n 1: location1,\n 2: location2,\n 3: location3\n }\n\n prev_time = env.now\n while(True):\n (cstate, delta, x, y, th,\n loc0_FT, loc1_FT, loc2_FT, loc3_FT,\n prev_time) = switch_case[cstate](x, y, th,\n loc0_FT,\n loc1_FT,\n loc2_FT,\n loc3_FT,\n prev_time)\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)", "def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):\n thetal_m = the_vars[0]\n qt_m = the_vars[2]\n zi = the_vars[1]\n dth = deltheta\n \n thetal_ft = thetal_m + dth\n qt_ft = coeffs.ft_qv\n \n dqt = qt_ft - qt_m\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-zi)\n LTS = thetal_3000 - coeffs.sst # lower tropospheric stability\n\n # calculate coefficients\n press=tf.find_press(zi)\n Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)\n Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)\n \n invert= tf.t_uos_thetal(thetal_m,qt_m,press)\n T_0 = invert.temp\n lv=tf.L_t(invert.temp)\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n del_thv_dry = Ad * dth + Bd * dqt\n del_thv_sat = Aw * dth + Bw * dqt\n \n # account for evaporative cooling (increases we)\n ql_max = invert.ql\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n Del_thv = del_thv_dry - Cl * ql_max\n \n # calculate buoyancy integral terms\n rho = 1.\n lcl_press=tf.LCL_thetal(thetal_m,qt_m)\n zb=tf.find_height(lcl_press)\n\n T1 = zb/zi\n T2 = 0.5 * zb**2 / zi**2\n T3 = (zi-zb)/zi\n T4 = 0.5 * (zi**2 - zb**2) / zi**2\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n wtl_0=F0\n wqt_0=Fqv0\n Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3\n term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))\n term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))\n term3 = Del_F * (Ad * T2 + Aw * T4)\n\n Theta_NE = term1 + term2 + term3\n \n # calculate w*\n wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)\n \n # calculate chi*\n chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)\n \n # calculate del_m\n Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)\n \n # calculate we\n a2=15.\n Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))\n \n A_NT = 0.2\n fac_NT = 2.5\n\n term4 = Del_thv_NT\n term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)\n denominator = term4 + term5\n\n we = A_NT * fac_NT * Theta_NE / denominator\n \n return we", "def dewT_2_q_magnus(ds, var):\n A1, B1, C1 = 17.625, 243.04, 610.94\n vpsl = C1 * np.exp(A1 * (ds[var['dew']] - 273.15) / (B1 + (ds[var['dew']] - 273.15)))\n wsl = eps0 * vpsl / (ds[var['pressure']] - vpsl)\n ds[var['spec_h']] = wsl / (1 + wsl)\n return ds", "def _redef_sp1_vars(self):\r\n\r\n if len(self.fq_list) == 0:\r\n no_rad = True\r\n lst_tmp = np.matrix(np.reshape(self.lst_tmp, \r\n (self.lst_tmp.size, 1)))\r\n else: no_rad = False\r\n # The practically constants...\r\n # Big Epsilon:\r\n if self.cond == True:\r\n self.Epsilon = self.d_T * self.thermal_conductivity\r\n else:\r\n self.Epsilon = (self.diff_scale ** 2) / \\\r\n (3.0 * self.absorb_coeffs[self.rad] ** 2)\r\n # Beta:\r\n if self.cond == True:\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff)\r\n else:\r\n self.Beta = (1.0 + 3.0 * self.r2) * (2.0 * self.diff_scale) / \\\r\n ((1.0 - 2.0 * self.r1) * (\r\n 3.0 * self.absorb_coeffs[self.rad]))\r\n\r\n # The feild solutions at the last timestep.\r\n # The integral vF:\r\n if self.cond == True:\r\n # The horrifically complicated F:\r\n def F_func_cond(elem, eta):\r\n F = 0.0\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F += Tn\r\n for k in range(0, len(self.fq_list)):\r\n vk = self.fq_list[k]\r\n try:\r\n vk_m = self.fq_list[k - 1]\r\n except:\r\n vk_m = self.v0_frequency\r\n absorbtion = self.absorb_coeffs[k]\r\n phi = elem.eval_elem(self.node_map, self.lst_rad[k],\r\n [eta])[0]\r\n inter1 = phi - 4.0 * sconst.pi * \\\r\n self.B_int_function(Tn, self.refr_idx_vol,\r\n vk, vk_m)\r\n inter2 = absorbtion * self.d_T / (self.diff_scale ** 2)\r\n F += inter2 * inter1\r\n return elem.funcs(eta) * F\r\n if not no_rad:\r\n # We're integrating something non-linear for SP1\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_cond,\r\n self.node_map)\r\n else:\r\n # Or something easier if we're only looking at heat.\r\n self.vF_vect_vol = np.array(self.uv_vol * lst_tmp).reshape(-1)\r\n else:\r\n def F_func_radiative(elem, eta):\r\n T = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n F = 4.0 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_radiative,\r\n self.node_map)\r\n # The path integral vf:\r\n if self.cond == True:\r\n def f_func_cond(elem, eta):\r\n Tb = self.background_temperature\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n n = self.refr_idx_background\r\n vk = self.v0_frequency\r\n vk_minus = 0\r\n Bb0 = self.B_int_function(Tb, n, vk, vk_minus)\r\n Bn0 = self.B_int_function(Tn, n, vk, vk_minus)\r\n B_coeff = (self.alpha * sconst.pi) / self.convect_coeff\r\n f = Tb + B_coeff * (Bb0 - Bn0)\r\n return elem.funcs(eta) * f\r\n if not no_rad:\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_cond,\r\n self.node_map)\r\n else:\r\n try:\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n except AttributeError:\r\n def elem_functor(elem, eta): return elem.funcs(eta)\r\n self.cache_tb_integral_array = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n elem_functor,\r\n self.node_map)\r\n self.cache_tb_integral_array *= self.background_temperature\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n \r\n else:\r\n # Radiation f = 4*pi*B^{(k)}(T_b, n_g)\r\n def f_func_radiative(elem, eta):\r\n T = self.background_temperature\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n f = 4 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_radiative,\r\n self.node_map)\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vf_vect_bound.shape[0] == \\\r\n self.vF_vect_vol.shape[0])", "def getdelta(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"delta\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tadgradred,hydrograd,my_nu,my_alpha,self.data[\"delta\"][i],my_gamma1,my_cp,my_cph,my_c_s,failtrig = myhmag.gethelmgrads(self.data[\"T\"][i], self.data[\"rho\"][i], 0.,abar,zbar,True)", "def __init__(self,b,u,v,hbls_old,hbbl_old,Kv_old,Kt_old,srflx,sustr,svstr,f,grid_dict,tstep_mode,dt):\n \n # INPUTS FROM TTTW SYSTEM\n self.b = b #buoyancy field: [Ly,N]\n self.u = u # x-component of velocity [Ly,N]\n self.v = v # y-component of velocity [Ly+1,N]\n self.hbls_old = hbls_old #boundary layer depth from previous time step [Ly]\n self.hbbl_old = hbbl_old # bottom boundary layer depth from previous time step [Ly]\n self.Kv_old = Kv_old # momentum mixing coefficeint from previous time step [Ly,N+1]\n self.Kt_old = Kt_old # tracer mixing coefficient from previous time step [Ly,N+1]\n self.srflx = srflx #solar heat flux [Ly] (degC * (m/s))\n self.sustr = sustr # x-component surface wind stress [Ly] (N/m^2) \n self.svstr = svstr # y-component surface wind stress [Ly+1] (N/m^2)\n self.grid_dict = grid_dict #gridded data\n self.f = f #coriolis parameter\n # KPP-SPECIFIC VARIABLES \n self.hbls = np.zeros([self.b.shape[0]])\n self.hbbl = np.zeros([self.b.shape[0]])\n self.ustar = []\n self.bvf = [] \n self.kmo = []\n self.C_h_MO = []\n self.kbl = []\n self.Cr = [] \n self.Fc = []\n self.ghat = [] #NONLOCAL TERM: TO BE USED IN TIME STEPPING\n self.tstep_mode = tstep_mode# if in time steppign mode, turn on HBL_RATE_LIMIT\n self.dt = dt", "def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)", "def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)", "def dynamics(self,eta,nu,u_actual,u_control,sampleTime): \n \n # Current velocities\n u_c = self.V_c * math.cos(self.beta_c - eta[5]) # current surge velocity\n v_c = self.V_c * math.sin(self.beta_c - eta[5]) # current sway velocity \n \n nu_c = np.array([u_c,v_c,0,0,0,0],float) # current velocity vector\n nu_r = nu - nu_c # relative velocity vector\n \n U_r = math.sqrt( nu_r[0]**2 + nu_r[1]**2 ) # relative speed\n \n # Rudder command and actual rudder angle\n delta_c = u_control[0]\n delta = u_actual[0]\n \n # Rudder forces and moment (Fossen 2021, Chapter 9.5.1)\n b = 0.7 * self.T # rudder height\n AR = b**2 / self.Lambda # aspect ratio: Lamdba = b**2/AR \n CN = 6.13 * self.Lambda / ( self.Lambda + 2.25 ) # normal coefficient\n t_R = 1 - 0.28 * self.Cb - 0.55\n a_H = 0.4\n x_R = -0.45 * self.L\n x_H = -1.0 * self.L\n\n Xdd = -0.5 * ( 1 - t_R ) * self.rho * U_r**2 * AR * CN\n Yd = -0.25 * ( 1 + a_H ) * self.rho * U_r**2 * AR * CN \n Nd = -0.25 * ( x_R + a_H * x_H ) * self.rho * U_r**2 * AR * CN \n \n # Control forces and moment\n delta_R = -delta # physical rudder angle (rad)\n T = self.tau_X # thrust (N)\n t_deduction = 0.1 # thrust deduction number\n tau1 = ( 1 - t_deduction ) * T - Xdd * math.sin( delta_R )**2 \n tau2 = -Yd * math.sin( 2 * delta_R ) \n tau6 = -Nd * math.sin( 2 * delta_R ) \n tau = np.array( [ tau1, tau2, tau6 ],float) \n \n # Linear maneuvering model\n T_surge = self.L # approx. time constant in surge (s)\n xg = 0 # approx. x-coordinate, CG (m) \n \n # 3-DOF ship model\n [M,N] = clarke83(U_r,self.L, self.B, self.T,self.Cb,self.R66,xg,T_surge)\n Minv = np.linalg.inv(M)\n nu3 = np.array( [ nu_r[0], nu_r[1], nu_r[5] ]) \n nu3_dot = np.matmul( Minv, tau - np.matmul(N,nu3) ) \n \n # 6-DOF ship model\n nu_dot = np.array( [ nu3_dot[0],nu3_dot[1],0,0,0,nu3_dot[2] ]) \n\n # Rudder angle saturation\n if ( abs(delta) >= self.deltaMax * math.pi / 180 ):\n delta = np.sign(delta) * self.deltaMax * math.pi / 180\n \n # Rudder dynamics\n delta_dot = (delta_c - delta) / self.T_delta \n\n # Forward Euler integration [k+1]\n nu = nu + sampleTime * nu_dot\n delta = delta + sampleTime * delta_dot\n\n u_actual = np.array([delta],float) \n\n return nu, u_actual", "def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel", "def fluid_reynolds(uu, param, grid, lnrho=list(), shock=list(), nghost=3,\n lmix=True):\n #viscous forces\n th2 = 2./3\n th1 = 1./3\n fvisc = np.zeros_like(uu)\n #molecular viscosity contribution\n ldel2, lshock, lhyper3 = False, False, False\n for ivisc in param.ivisc:\n if not 'shock' in ivisc and not 'hyper' in ivisc\\\n and not '\\n' in ivisc:\n ldel2 = True\n if 'shock' in ivisc:\n lshock = True\n if 'hyper3' in ivisc:\n lhyper3 = True\n \n if ldel2:\n if lhyper3:\n lhyper3 = lhyper3==lmix\n del2u = np.zeros_like(uu)\n for j in range(0,3):\n del2u[j] = del2(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n del2u[j, : nghost, nghost:-nghost, nghost:-nghost] = del2u[j,-2*nghost: -nghost, nghost: -nghost, nghost: -nghost]\n del2u[j,-nghost: , nghost:-nghost, nghost:-nghost] = del2u[j, nghost:2*nghost, nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, : nghost, nghost:-nghost] = del2u[j, nghost: -nghost,-2*nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost,-nghost: , nghost:-nghost] = del2u[j, nghost: -nghost, nghost:2*nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost, : nghost] = del2u[j, nghost: -nghost, nghost: -nghost,-2*nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost,-nghost: ] = del2u[j, nghost: -nghost, nghost: -nghost, nghost:2*nghost]\n for ivisc in param.ivisc:\n ivisc = str.strip(ivisc,'\\n')\n if 'nu-const' not in ivisc and 'shock' not in ivisc\\\n and 'hyper' not in ivisc and len(ivisc) > 0:\n print('fluid_reynolds WARNING: '+ivisc+' not implemented\\n'+\n 'terms may be missing from the standard rate of strain tensor')\n fvisc = fvisc + param.nu*del2u\n del(del2u)\n tmp0 = grad(uu[0],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp0[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp0[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp0[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp0[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp0[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp0[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp0[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp1 = grad(uu[1],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp1[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp1[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp1[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp1[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp1[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp1[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp1[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp2 = grad(uu[2],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp2[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp2[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp2[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp2[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp2[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp2[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp2[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #effect of compressibility \n if len(lnrho) > 0:\n divu = div(uu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n divu[ :nghost,nghost:-nghost,nghost:-nghost] = divu[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n divu[-nghost:,nghost:-nghost,nghost:-nghost] = divu[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost, :nghost,nghost:-nghost] = divu[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost,-nghost:,nghost:-nghost] = divu[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost, :nghost] = divu[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost,-nghost:] = divu[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n gradlnrho = grad(lnrho,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradlnrho[j, :nghost,nghost:-nghost,nghost:-nghost] = gradlnrho[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradlnrho[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost, :nghost,nghost:-nghost] = gradlnrho[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradlnrho[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost, :nghost] = gradlnrho[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradlnrho[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n Sglnrho = np.zeros_like(uu)\n Sglnrho[0] = dot(tmp0,gradlnrho) +\\\n (tmp0[0]+tmp1[0]+tmp2[0]-th2*divu)*gradlnrho[0] \n Sglnrho[1] = dot(tmp1,gradlnrho) +\\\n (tmp0[1]+tmp1[1]+tmp2[1]-th2*divu)*gradlnrho[1]\n Sglnrho[2] = dot(tmp2,gradlnrho) +\\\n (tmp0[2]+tmp1[2]+tmp2[2]-th2*divu)*gradlnrho[2]\n graddivu = grad(divu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n graddivu[j, :nghost,nghost:-nghost,nghost:-nghost] = graddivu[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,-nghost:,nghost:-nghost,nghost:-nghost] = graddivu[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost, :nghost,nghost:-nghost] = graddivu[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,-nghost:,nghost:-nghost] = graddivu[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost, :nghost] = graddivu[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost,-nghost:] = graddivu[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n fvisc = fvisc + param.nu*(th1*graddivu+Sglnrho)\n del(Sglnrho)\n elif param.ldensity:\n print('fluid_reynolds WARNING: no lnrho provided\\n'+\n 'rate of strain tensor likely incomplete')\n #shock contribution\n if lshock:\n if len(shock) == 0:\n print('fluid_reynolds WARNING: no shock provided\\n'+\n 'rate of strain tensor likely incomplete')\n else:\n shock[ :nghost,nghost:-nghost,nghost:-nghost] = shock[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n shock[-nghost:,nghost:-nghost,nghost:-nghost] = shock[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost, :nghost,nghost:-nghost] = shock[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost,-nghost:,nghost:-nghost] = shock[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost, :nghost] = shock[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost,-nghost:] = shock[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n divugradlnrho = np.zeros_like(uu)\n gradshock = grad(shock,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradshock[j, :nghost,nghost:-nghost,nghost:-nghost] = gradshock[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradshock[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost, :nghost,nghost:-nghost] = gradshock[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradshock[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost, :nghost] = gradshock[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradshock[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n for j in range(0,3):\n divugradlnrho[j] = param.nu_shock*divu*gradshock[j] +\\\n param.nu_shock*shock*(divu*gradlnrho[j] + graddivu[j])\n del(divu,gradshock,gradlnrho,graddivu)\n fvisc = fvisc + divugradlnrho\n del(divugradlnrho)\n if lhyper3:\n #deluij5 = np.zeros_like([uu,uu,uu])\n #uij5glnrho to be included\n del6u = np.zeros_like(uu)\n for j in range(0,3):\n del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz)\n del6u[j, :nghost,nghost:-nghost,nghost:-nghost] = del6u[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,-nghost:,nghost:-nghost,nghost:-nghost] = del6u[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost, :nghost,nghost:-nghost] = del6u[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,-nghost:,nghost:-nghost] = del6u[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost, :nghost] = del6u[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost,-nghost:] = del6u[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #del6 for non-cartesian tba\n #del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n # coordinate_system=param.coord_system)\n fvisc = fvisc + param.nu_hyper3*del6u\n del(del6u)\n fvisc2 = np.sqrt(dot2(fvisc))\n #advective forces\n advec = np.zeros_like(uu)\n advec[0] = dot(uu,tmp0)\n advec[1] = dot(uu,tmp1)\n advec[0] = dot(uu,tmp2)\n del(tmp0,tmp1,tmp2)\n advec2 = np.sqrt(dot2(advec))\n del(advec)\n #avoid division by zero\n if fvisc2.max() > 0:\n fvisc2[np.where(fvisc2==0)] = fvisc2[np.where(fvisc2>0)].min()\n Re = advec2/fvisc2\n #set minimum floor to exclude zero-valued Re \n Re[np.where(Re==0)] = Re[np.where(Re>0)].min()\n else:\n Re = advec2\n print('Re undefined')\n return Re", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def trajectories(t_upper=3600*24*687, h=100, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11):\n\n # We check if parameters are all positive\n\n list_parameters = [t_upper, h, m1, m2, m3,\n a1, a2]\n\n for parameters in list_parameters:\n\n if parameters < 0:\n print(f'You have entered a negative parameter')\n\n # initial values for planet 1 in x, y and z direction\n x_i1 = a1\n y_i1 = 0\n v_x1i = 0\n v_y1i = 29779.301841746023\n z_i1 = 0\n v_z1i = 0\n\n # initial values for planet 2 in x, y and z direction\n x_i2 = a2\n y_i2 = 0\n v_x2i = 0\n v_y2i = 24154.203325249873\n z_i2 = 0\n v_z2i = 0\n\n # initial values for Sun in x, y and z direction\n x_i3 = 0\n y_i3 = 0\n v_x3i = 0\n v_y3i = 0\n z_i3 = 0\n v_z3i = 0\n\n# Initial positions and velocities\n r = np.array([x_i1, y_i1, v_x1i, v_y1i, x_i2,\n y_i2, v_x2i, v_y2i, x_i3, y_i3, v_x3i, v_y3i,\n z_i1, z_i2, z_i3, v_z1i, v_z2i, v_z3i])\n\n # We create vectors which will contains the trajectories\n # and velocities of each bodies\n x_pnts1 = [x_i1]\n y_pnts1 = [y_i1]\n v_x_pnts1 = [v_x1i]\n v_y_pnts1 = [v_y1i]\n\n x_pnts2 = [x_i2]\n y_pnts2 = [y_i2]\n v_x_pnts2 = [v_x2i]\n v_y_pnts2 = [v_y2i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n z_pnts1 = [z_i1]\n z_pnts2 = [z_i2]\n z_pnts3 = [z_i3]\n\n v_z_pnts1 = [v_z1i]\n v_z_pnts2 = [v_z2i]\n v_z_pnts3 = [v_z3i]\n\n m1 = m1\n m2 = m2\n m3 = m3\n a1 = a1\n a2 = a2\n\n # We create a vector which will contain the time\n # Initial value\n t_i = 0.0\n t_values = [t_i]\n\n for t in range(0, t_upper, h):\n\n # We used the RK4 formula here\n k1 = h*derivative(r=r, t=0, m1=5.972e+24, m2=m2, m3=1.989e+30,\n a1=a1, a2=1.52*1.496e+11)\n k2 = h*derivative(r=r + 0.5*k1, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k3 = h*derivative(r=r + 0.5*k2, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k4 = h*derivative(r=r + h*k3, t=t+h, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11)\n\n # We calculate the new vector r\n r += (k1 + 2*k2 + 2*k3 + k4)*(1.0/6.0)\n\n # We add the new points calculated\n x_pnts1.append(r[0])\n y_pnts1.append(r[1])\n\n v_x_pnts1.append(r[2])\n v_y_pnts1.append(r[3])\n\n x_pnts2.append(r[4])\n y_pnts2.append(r[5])\n v_x_pnts2.append(r[6])\n v_y_pnts2.append(r[7])\n\n x_pnts3.append(r[8])\n y_pnts3.append(r[9])\n v_x_pnts3.append(r[10])\n v_y_pnts3.append(r[11])\n\n z_pnts1.append(r[12])\n z_pnts2.append(r[13])\n z_pnts3.append(r[14])\n\n v_z_pnts1.append(r[15])\n v_z_pnts2.append(r[16])\n v_z_pnts3.append(r[17])\n\n t_values.append(t)\n\n # We return all the trajectories\n return x_pnts1, y_pnts1, x_pnts2, y_pnts2, x_pnts3, y_pnts3, z_pnts1, z_pnts2, z_pnts3", "def rhs(y, t, l, m, g):\n # Unpack the states so you can use the variable names in the\n # sympy.physics.mechanics equations\n q1 = y[0]\n q2 = y[1]\n u1 = y[2]\n u2 = y[3]\n # or you can make use of python's tuple unpacking for a one liner\n # q1, q2, u1, u2 = y\n\n # Initialize a vector for the derivatives.\n dydt = zeros((len(y)))\n\n # Compute the derivatives, these are pasted in from the\n # sympy.physics.mechanics results.\n dydt[0] = u1\n dydt[1] = u2\n dydt[2] = (-g*sin(q1)*sin(q2)**2 + 2*g*sin(q1) -\n g*sin(q2)*cos(q1)*cos(q2) + 2*l*u1**2*sin(q1)*cos(q1)*cos(q2)**2 -\n l*u1**2*sin(q1)*cos(q1) - 2*l*u1**2*sin(q2)*cos(q1)**2*cos(q2) +\n l*u1**2*sin(q2)*cos(q2) + l*u2**2*sin(q1)*cos(q2) -\n l*u2**2*sin(q2)*cos(q1))/(l*(sin(q1)**2*sin(q2)**2 +\n 2*sin(q1)*sin(q2)*cos(q1)*cos(q2) + cos(q1)**2*cos(q2)**2 - 2))\n dydt[3] = (-sin(q1)*sin(q2)/2 - cos(q1)*cos(q2)/2)*(2*g*l*m*sin(q1) -\n l**2*m*(-sin(q1)*cos(q2) +\n sin(q2)*cos(q1))*u2**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2)) -\n l**2*m) + (g*l*m*sin(q2) - l**2*m*(sin(q1)*cos(q2) -\n sin(q2)*cos(q1))*u1**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2))\n - l**2*m)\n\n # Return the derivatives.\n return dydt", "def main():\r\n #Drag Force Equation: 1/2 * rho * Cd * A * v^2\r\n\r\n #User-Defined Constants\r\n global m\r\n global v0\r\n global theta\r\n global rho #Fluid Density\r\n global A #Cross-sectional Area\r\n global Cd #Drag coefficient\r\n global tStep\r\n global g\r\n\r\n m = 1\r\n v0 = 30\r\n theta = math.radians(45)\r\n rho = 1.225\r\n A = 0.05\r\n Cd = 0.5 #A ball is approx. 0.5\r\n tStep = 0.005\r\n g = 9.8\r\n\r\n\r\n #Data Structures\r\n global tHist\r\n global xHist\r\n global yHist\r\n global thetaHist\r\n global vHist\r\n global vXHist\r\n global vYHist\r\n tHist = [] #list for all time steps\r\n xHist = [] #list for all x position steps\r\n yHist = [] #list for all y position steps\r\n thetaHist = [] #List for all theta at every time step\r\n vHist = [] #list for all velocities at every time step\r\n vXHist = [] #list for all x-axis velocities at every time step\r\n vYHist = [] #list for all y-axis velocities at every time step\r\n\r\n #Initialize intial values\r\n tHist.append(0.0)\r\n xHist.append(0.0)\r\n yHist.append(0.0)\r\n thetaHist.append(theta)\r\n vHist.append(v0)\r\n vXHist.append(v0 * math.cos(theta))\r\n vYHist.append(v0 * math.sin(theta))\r\n vTheta = math.atan(vYHist[0] / vXHist[0])\r\n # print(\"t: \" + str(tHist[0]))\r\n # print(\"x: \" + str(xHist[0]))\r\n # print(\"y: \" + str(yHist[0]))\r\n # print(\"v: \" + str(vHist[0]))\r\n # print(\"Vx: \" + str(vXHist[0]))\r\n # print(\"Vy: \" + str(vYHist[0]))\r\n\r\n #Convenience variables\r\n global k\r\n\r\n counter = 1\r\n #Loop until the y-displacement becomes negative (projectile reaches ground again)\r\n while True:\r\n tHist.append(counter * tStep) #increment time\r\n print(\"t: \" + str(tHist[counter]))\r\n\r\n #This large hunk is the solution to the net force differential equation in the x-axis\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep)) #STABLE\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd)/(2*m))*(tStep))\r\n # oneOverVX = (1/vHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep))\r\n oneOverVX = (1/vXHist[counter-1]) + ((rho*A*Cd)/(2*m*math.cos(thetaHist[counter-1]))*(tStep)) #This is one over the solution for velocity in the x-axis net force differential equation\r\n vXHist.append(1 / oneOverVX) #Adding the velocity to the list of velocities\r\n\r\n vY0 = vYHist[counter-1] #Convenience variable\r\n # k = 0.5 * rho * A * Cd * math.sin(abs(thetaHist[counter-1])) #STABLE\r\n # k = 0.5 * rho * A * Cd\r\n k = (rho * A * Cd) / (2 * math.sin(abs(thetaHist[counter-1]))) #Convenience variable\r\n print(\"k: \" + str(k))\r\n print(\"vX: \" + str(vXHist[counter]))\r\n rootGMK = math.sqrt(g*m*k) #Convenience variable\r\n if vYHist[counter-1] > 0.0: #If the projectile is going upwards\r\n #Solving the y-axis differential equation for velocity\r\n equationRight = -rootGMK * ((tStep/m) - (math.atan((k*vY0)/(rootGMK))/rootGMK))\r\n vYHist.append((math.tan(equationRight) * rootGMK) / k)\r\n elif vYHist[counter-1] < 0.0: #If the projectile is going downwards\r\n #Solving the y-axis differential equation for velocity\r\n\r\n # Hand-solved integral\r\n # exponent = -(2*tStep*rootGMK)/m\r\n # numerator = g*m*math.exp(exponent) - math.exp(exponent)*vY0*rootGMK - vY0*rootGMK - g*m\r\n # denominator = math.exp(exponent)*(vY0-rootGMK) - vY0*k - rootGMK\r\n # vYHist.append(numerator / denominator)\r\n\r\n #Wolfram Alpha arctanh integral\r\n arctanh =(vY0*math.sqrt(k))/(math.sqrt(g*m))\r\n print(\"arctanh: \" + str(arctanh))\r\n equationRight = (np.arctanh(arctanh))/(rootGMK) - (tStep/m)\r\n vYHist.append(np.tanh(rootGMK * equationRight) * ((math.sqrt(g*m))/(math.sqrt(k))))\r\n else: #If current y velocity is 0\r\n vYHist.append(vY0 - g*tStep)\r\n print(\"vY: \" + str(vYHist[counter]))\r\n\r\n vHist.append(math.hypot(vXHist[counter], vYHist[counter])) #Calculate the net velocity and add it to the velocities list\r\n print(\"v: \" + str(vHist[counter]))\r\n thetaHist.append(math.atan(vYHist[counter]/vXHist[counter])) #Calculate the current angle based on the velocities and add it to the theta list\r\n print(\"0: \" + str(math.degrees(thetaHist[counter])))\r\n\r\n x0 = xHist[counter-1]\r\n y0 = yHist[counter-1]\r\n\r\n # yIntegral = trigintegrate()\r\n\r\n \"\"\"\r\n Note: What I wanted to do here was to integrate the velocity functions over the time interval to find the exact\r\n changes in position. Unfortunately, I was running short of time and decided it was not worth it to move forward with\r\n this final step, and instead worked on the presentation and testing different cases.\r\n \"\"\"\r\n xHist.append(x0 + vXHist[counter]*tStep) #Calculate new x position using x = x0 + vt\r\n yHist.append(y0 + vYHist[counter]*tStep) #Calculate new y position using y = y0 + vt\r\n print(\"x: \" + str(xHist[counter]))\r\n print(\"y: \" + str(yHist[counter]))\r\n print()\r\n\r\n # xHist.append(xHist[counter-1] + vXHist[counter-1]*tStep + 0.5*aXHist[counter-1]*tStep**2)\r\n # yHist.append(yHist[counter-1] + vYHist[counter-1]*tStep + 0.5*aYHist[counter-1]*tStep**2)\r\n # vXHist.append(vXHist[counter-1] + aXHist[counter-1]*tStep)\r\n # vYHist.append(vYHist[counter-1] + aYHist[counter-1]*tStep)\r\n # vHist.append(math.hypot(vXHist[counter], vYHist[counter]))\r\n #\r\n # vTheta = math.atan(vYHist[counter] / vXHist[counter])\r\n # xDragAccel = -0.5*rho*Cd*A*vHist[counter]**2*math.cos(vTheta) / m\r\n # yDragAccel = -math.copysign(0.5*rho*Cd*A*vHist[counter]**2*math.sin(vTheta) / m, vYHist[counter])\r\n #\r\n # aXHist.append(xDragAccel)\r\n # aYHist.append(-g*tStep + yDragAccel)\r\n\r\n if vYHist[counter-1] > 0.0 and vYHist[counter] < 0.0: #Check if the projectile has reached it's peak by checking for a critical point\r\n print(\"max height reached at time=\" + str(tHist[counter]))\r\n # break\r\n\r\n # print(\"t: \" + str(tHist[counter]))\r\n # print(\"x: \" + str(xHist[counter]))\r\n # print(\"y: \" + str(yHist[counter]))\r\n # print(\"Vx: \" + str(vXHist[counter]))\r\n # print(\"Vy: \" + str(vYHist[counter]))\r\n # print(\"Ax: \" + str(aXHist[counter]))\r\n # print(\"Ay: \" + str(aYHist[counter]))\r\n\r\n if yHist[counter] < 0 or counter > 99999: #End the loop if the projectile has reached the ground (or limit the number of iterations to avoid computer death)\r\n break\r\n\r\n counter += 1\r\n\r\n plotData()", "def hydro_solver(self):\n u_dx = self.central_x(self.u)\n w_dy = self.central_y(self.w)\n P_dx = self.central_x(self.P)\n P_dy = self.central_y(self.P)\n\n rho_dx_upwind = self.upwind_x(self.rho, self.u)\n rho_dy_upwwind = self.upwind_y(self.rho, self.w)\n rho_udx_upwind = self.upwind_x(self.rho * self.u, self.u)\n rho_udy_upwind = self.upwind_y(self.rho * self.u, self.w)\n rho_wdx_upwind = self.upwind_x(self.rho * self.w, self.u)\n rho_wdy_upwind = self.upwind_y(self.rho * self.w, self.w)\n u_dx_uu = self.upwind_x(self.u, self.u)\n u_dx_uw = self.upwind_x(self.u, self.w)\n w_dy_uu = self.upwind_y(self.w, self.u)\n w_dy_uw = self.upwind_y(self.w, self.w)\n e_dx = self.upwind_x(self.e, self.u)\n e_dy = self.upwind_y(self.e, self.w)\n\n self.rho_dt = (\n -self.rho * (u_dx + w_dy)\n - self.u * rho_dx_upwind\n - self.w * rho_dy_upwwind\n )\n self.e_dt = (\n -(self.e + self.P) * (u_dx + w_dy) - self.u * e_dx - self.w * e_dy\n )\n self.rho_udt = (\n -self.rho * self.u * (u_dx_uu + w_dy_uu)\n - self.u * rho_udx_upwind\n - self.w * rho_udy_upwind\n - P_dx\n )\n self.rho_wdt = (\n -self.rho * self.w * (u_dx_uw + w_dy_uw)\n - self.u * rho_wdx_upwind\n - self.w * rho_wdy_upwind\n - P_dy\n + self.rho * self.g\n )\n\n self.time_step()\n rho_previous = np.zeros_like(self.rho)\n rho_previous[:, :] = self.rho\n self.rho[:, :] = self.rho + self.rho_dt * self.dt\n self.e[:, :] = self.e + self.e_dt * self.dt\n self.u[:, :] = (\n rho_previous * self.u + self.rho_udt * self.dt\n ) / self.rho\n self.w[:, :] = (\n rho_previous * self.w + self.rho_wdt * self.dt\n ) / self.rho\n\n self.boundary_conditions()\n self.T[:, :] = (\n (self.Y - 1) * self.e * self.mu * self.m_u / (self.kb * self.rho)\n )\n self.P[:, :] = (self.Y - 1) * self.e\n uw = (self.u, self.w)\n v = np.linalg.norm(uw)\n dt = self.dt\n\n return dt", "def forward(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=0, eps=0, gamma=0, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # forward euler and forward/backward timestep\n beta = np.float32(beta)\n mu = np.float32(mu)\n \n du1, du0 = du[:2]\n dv1, dv0 = dv[:2]\n dn0 = dn[0]\n \n dndt_x(h, n, u, v, dx, dy, dn0) # calculate dndt and put it into dn0\n \n n1 = n + ( dn0 )*dt\n \n dudt_x(h, n, f, u, v, dx, dy, du0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n, f, u, v, dx, dy, dv0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dudt_x(h, n1, f, u, v, dx, dy, du1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n1, f, u, v, dx, dy, dv1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n u1 = u + ( beta*du1 + (one-beta)*du0 )*dt\n v1 = v + ( beta*dv1 + (one-beta)*dv0 )*dt\n \n n, u, v = n1, u1, v1\n \n du = [du1, du0, du0, du0]\n dv = [dv1, dv0, dv0, dv0]\n dn = [dn0, dn0, dn0]\n return n1, u1, v1, du, dv, dn", "def t_rh_2_dewT(ds, var):\n ds['dew'] = 243.04 * (np.log(ds[var['rh']] / 100) + ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))/\\\n (17.625-np.log(ds[var['rh']] / 100) - ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))\n return ds", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def main(data, setup):\n # input check \n varnames = ('vm_raw', 'vm_raw_theo')\n for varname in varnames:\n if varname not in data.keys():\n raise LookupError('data must contain variable %s.' %s)\n\n # display info message\n chrono = setup['chrono']\n chrono.issue('target velocity: correct for sensor motion...')\n\n # retrieve varialbes\n vnys = data['nqv']\n v_sensor_r = data['v_sensor_r']\n\n # ========== main =================================== #\n for key_raw in ('vm_raw', 'vm_raw_theo'):\n key_c = key_raw.replace('raw', 'raw_c')\n\n # sum\n vm_raw = data[key_raw]\n v_sum = (vm_raw + np.expand_dims(v_sensor_r, 1))\n\n # mod\n data[key_c] = symmod(v_sum, vnys)\n # ==================================================== #\n\n return data", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0,0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda t: np.interp(t, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0, 0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda tt: np.interp(tt, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def forward(self, z_t_1, h_x, phi_table):\n z_category = self.gen_z_t_dist_now(z_t_1, h_x)\n \n if self.use_gumbel_softmax:\n \n# device = z_category.device\n \n averaged_z_t = 0\n \n log_prob = Variable(torch.log(z_category))\n \n for k in range(self.sampling_times): \n curr_z_t = F.gumbel_softmax(log_prob, tau = 0.1)\n \n averaged_z_t += curr_z_t\n \n del curr_z_t\n \n# averaged_z_t = averaged_z_t.to(device)\n \n z_t = averaged_z_t/self.sampling_times\n else:\n z_t = z_category\n \n phi_z = torch.mm(z_t, torch.t(phi_table))\n# mu = self.h_to_mu(h_combined)\n# logvar = self.h_to_logvar(h_combined)\n# std = torch.exp(0.5 * logvar) \n# epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, z_category, phi_z", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])", "def main():\n \n # Particle in SHO - c.f. Mocz & Succi (2015) Fig. 2\n # parameters\n n = 100 # number of particles\n dt = 0.02 # timestep\n nt = 100 # number of timesteps\n nt_setup = 400 # number of timesteps to set up simulation\n n_out = 25 # plot solution every nout steps\n b = 4 # velocity damping for acquiring initial condition\n m = 1/n # mass of SPH particle ( m * n = 1 normalizes |wavefunction|^2 to 1)\n h = 40/n # smoothing length\n t = 0. # time\n\n # plot potential\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n fig = plt.plot(xx, 0.5*xx**2, linewidth=5, color=[0.7, 0.7, 0.9])\n \n # initialize\n x = np.linspace(-3.0, 3.0, num=n)\n x = np.reshape(x,(n,1))\n u = np.zeros((n,1))\n \n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h )\n\n # get v at t=-0.5*dt for the leap frog integrator using Euler's method\n u_mhalf = u - 0.5 * dt * a\n\n # main loop (time evolution)\n for i in np.arange(-nt_setup, nt): # negative time (t<0, i<0) is used to set up initial conditions\n\n # leap frog\n u_phalf = u_mhalf + a*dt\n x = x + u_phalf*dt\n u = 0.5*(u_mhalf+u_phalf)\n u_mhalf = u_phalf\n if (i >= 0):\n t = t + dt\n print(\"%.2f\" % t)\n \n if (i == -1 ): # switch off damping before t=0\n u = np.zeros((n,1)) + 1.0\n u_mhalf = u\n b = 0 # switch off damping at time t=0\n \n # update densities, pressures, accelerations\n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h)\n \n # plot solution every n_out steps\n if( (i >= 0) and (i % n_out) == 0 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n rr_exact = 1./np.sqrt(np.pi) * np.exp(-(xx-np.sin(t))**2/2.)**2\n fig = plt.plot(xx, rr_exact, linewidth=2, color=[.6, .6, .6])\n fig = plt.plot(xx, rr, linewidth=2, color=[1.*i/nt, 0, 1.-1.*i/nt], label='$t='+\"%.2f\" % t +'$')\n # plot the t<0 damping process for fun\n if( i==-nt_setup or i==-nt_setup*3/4 or i==-nt_setup/2 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n fig = plt.plot(xx, rr, linewidth=1, color=[0.9, 0.9, 0.9])\n \n plt.legend()\n plt.xlabel('$x$')\n plt.ylabel('$|\\psi|^2$')\n plt.axis([-2, 4, 0, 0.8])\n plt.savefig('solution.pdf', aspect = 'normal', bbox_inches='tight', pad_inches = 0)\n plt.close()" ]
[ "0.6345493", "0.6289429", "0.6155934", "0.6074104", "0.60056967", "0.5953673", "0.59231526", "0.5861008", "0.5852607", "0.5842901", "0.58290935", "0.58032346", "0.57665646", "0.576412", "0.57412446", "0.57161164", "0.5708992", "0.5680603", "0.56741995", "0.5667289", "0.5651036", "0.5636091", "0.56326026", "0.5630486", "0.56118804", "0.56004745", "0.560004", "0.55921143", "0.5578695", "0.55510914" ]
0.7249938
0
NichollsTurton entrainment parameterization the_vars and coeffs are inputs into dmixed_vars deltheta, F0, Fqv0 are calculated in dmixed_vars
def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0): thetal_m = the_vars[0] qt_m = the_vars[2] zi = the_vars[1] dth = deltheta thetal_ft = thetal_m + dth qt_ft = coeffs.ft_qv dqt = qt_ft - qt_m # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt) gamma = 6e-3 thetal_3000 = thetal_ft + gamma*(3000-zi) LTS = thetal_3000 - coeffs.sst # lower tropospheric stability # calculate coefficients press=tf.find_press(zi) Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press) Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press) invert= tf.t_uos_thetal(thetal_m,qt_m,press) T_0 = invert.temp lv=tf.L_t(invert.temp) Cl = (Ad*lv/tc.CPD - T_0/tc.EPS) del_thv_dry = Ad * dth + Bd * dqt del_thv_sat = Aw * dth + Bw * dqt # account for evaporative cooling (increases we) ql_max = invert.ql Cl = (Ad*lv/tc.CPD - T_0/tc.EPS) Del_thv = del_thv_dry - Cl * ql_max # calculate buoyancy integral terms rho = 1. lcl_press=tf.LCL_thetal(thetal_m,qt_m) zb=tf.find_height(lcl_press) T1 = zb/zi T2 = 0.5 * zb**2 / zi**2 T3 = (zi-zb)/zi T4 = 0.5 * (zi**2 - zb**2) / zi**2 # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1 wtl_0=F0 wqt_0=Fqv0 Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3 term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4)) term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4)) term3 = Del_F * (Ad * T2 + Aw * T4) Theta_NE = term1 + term2 + term3 # calculate w* wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.) # calculate chi* chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat) # calculate del_m Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry) # calculate we a2=15. Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv)) A_NT = 0.2 fac_NT = 2.5 term4 = Del_thv_NT term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat) denominator = term4 + term5 we = A_NT * fac_NT * Theta_NE / denominator return we
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dmixed_vars(the_vars,tstep,coeffs):\n\n deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0]\n F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux\n Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux\n Fint = -coeffs.k*F0 #entrainment heat flux\n \n if coeffs.use_NT: # use NT parameterization by calculating we using function\n went = calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0) # Nicholls-Turton parameterization\n \n else: # use simple we parameterization\n went = -Fint/deltheta #simple entrainment parameterization\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*coeffs.ft_qv*1000 # convert qt_ft to g kg^-1\n \n Fqvent = -went*( coeffs.ft_qv - the_vars[2])\n wsubs = -coeffs.D*the_vars[1]\n rho=1.\n cp=1004.\n \n derivs=np.empty_like(the_vars)\n \n # higher delta_Fr from drier air at mixed-layer top...hence cloudy air results in less radiative cooling\n derivs[0]=(F0 - Fint)/(the_vars[1]*rho) - delta_Fr/1004./the_vars[1] \n derivs[1] = went + wsubs\n derivs[2] = (Fqv0 - Fqvent)/the_vars[1]\n return derivs", "def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Löwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT", "def solveFluNet(T,Ntime,a,b0,b1,g,k,w,y0,P,N,RHS=3):\n #I have added the variables P the transport matrix \n #and N the network size because they are needed\n #in the RHS.\n #I have added the variable RHS to be able to \n #choose which RHS method we want to use when running\n #solveFluNet\n \n #add input variables to RHS functions if needed\n def RHSnet(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\"\"\"\n S = y[:N]\n E = y[N:2*N]\n C = y[2*N:3*N]\n b = b0 + b1*(1+np.cos(2*np.pi*t))\n dy = np.zeros(3*N)\n dy[:N]= k*(1-S)-b*C*S+w*np.dot(P,S)-w*S\n dy[N:2*N]= b*C*S-(k+a)*E+w*np.dot(P,E)-w*E\n dy[2*N:3*N]= a*E-(g+k)*C+w*np.dot(P,C)-w*C\n return dy\n \n def RHSnetF(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\"\n Calculations carried out by fn.rhs\n \"\"\"\n dy = fn.rhs(P,y,t,a,b0,b1,g,k,w)\n return dy\n \n def RHSnetFomp(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\n Calculations carried out by fn.rhs_omp\n \"\"\"\n dy = fn.rhs_omp(P,y,t,a,b0,b1,g,k,w,2)\n return dy\n\n #Add code here and to RHS functions above to simulate network flu model\n t = np.linspace(0,T,Ntime)\n if (RHS==1):\n sol = odeint(RHSnet,y0,t,args=(a,b0,b1,g,k,w))\n if (RHS==2):\n sol = odeint(RHSnetF,y0,t,args=(a,b0,b1,g,k,w))\n if (RHS==3):\n sol = odeint(RHSnetFomp,y0,t,args=(a,b0,b1,g,k,w))\n S = sol[:,:N]\n E = sol[:,N:2*N]\n C = sol[:,2*N:3*N]\n return t,S,E,C", "def taylor_expansion(self,g_temp,g_step,var):\n A=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n A[self.n][i]=1\n for j in range(self.n):\n if(i==j): A[i][j]=2.*var[i]+2.+g_temp*np.sum([self.XXZ.Z(k,i) for k in range(self.n) if k!=i])\n else: A[i][j]=-g_temp*self.XXZ.Z(j,i)\n #First derivative\n B1=np.zeros(self.n+1)\n for i in range(self.n): \n B1[i]=self.gamma*2.*g_temp*self.N*(self.n-self.N)+np.sum([self.XXZ.Z(k,i)*(var[k]-var[i]) for k in range(self.n) if k!=i])\n Ainv=np.linalg.pinv(A)\n der1=np.dot(Ainv,B1)\n #Second derivative\n B2=np.zeros(self.n+1)\n for k in range(self.n):\n B2[k]=self.gamma*2.*self.N*(self.n-self.N) -2.*der1[k]**2+2.*np.sum([self.XXZ.Z(l,k)*(der1[l]-der1[k]) for l in range(self.n) if k!=l])\n der2=np.dot(Ainv,B2)\n #Third derivative\n B3=np.zeros(self.n+1)\n for k in range(self.n):\n B3[k]=-6*der1[k]*der2[k]+3.*np.sum([self.XXZ.Z(l,k)*(der2[l]-der2[k]) for l in range(self.n) if k!=l])\n der3=np.dot(Ainv,B3)\n #Fourth derivative\n B4=np.zeros(self.n+1)\n for k in range(self.n):\n B4[k]=-8.*der3[k]*der1[k]-6.*der2[k]*der2[k]+4.*np.sum([self.XXZ.Z(l,k)*(der3[l]-der3[k]) for l in range(self.n) if k!=l])\n der4=np.dot(Ainv,B4)\n \n return var+g_step*der1+g_step**2*der2/2.+g_step**3*der3/6.+g_step**4*der4/24.", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def niv_variable_selection(x, y, t, max_vars):\n y1_t = (y == 1) & (t == 1)\n y0_t = (y == 0) & (t == 1)\n y1_c = (y == 1) & (t == 0)\n y0_c = (y == 0) & (t == 0)\n\n sum_y1_t = sum(y1_t)\n sum_y0_t = sum(y0_t)\n sum_y1_c = sum(y1_c)\n sum_y0_c = sum(y0_c)\n\n niv_dict = {}\n for col in x.columns:\n df = pd.concat([x[col].rename(col), y1_t.rename('y1_t'), y0_t.rename('y0_t'),\n y1_c.rename('y1_c'), y0_c.rename('y0_c')], axis=1)\n x_group = df.groupby(x[col])\n x_sum = x_group.sum()\n\n if sum_y0_t == 0 or sum_y1_t == 0:\n woe_t = 0\n else:\n woe_t = x_sum.apply(lambda r: np.log((r['y1_t'] * sum_y0_t) / (r['y0_t'] * sum_y1_t))\n if r['y1_t'] > 0 and r['y0_t'] > 0 else 0, axis=1)\n\n if sum_y0_c == 0 or sum_y1_c == 0:\n woe_c = 0\n else:\n woe_c = x_sum.apply(lambda r: np.log((r['y1_c'] * sum_y0_c) / (r['y0_c'] * sum_y1_c))\n if r['y1_c'] > 0 and r['y0_c'] > 0 else 0, axis=1)\n\n nwoe = woe_t - woe_c\n\n p_x_y1_t = x_sum['y1_t'] / sum_y1_t if sum_y1_t > 0 else 0\n p_x_y0_t = x_sum['y0_t'] / sum_y0_t if sum_y0_t > 0 else 0\n p_x_y1_c = x_sum['y1_c'] / sum_y1_c if sum_y1_c > 0 else 0\n p_x_y0_c = x_sum['y0_c'] / sum_y0_c if sum_y0_c > 0 else 0\n niv_weight = (p_x_y1_t * p_x_y0_c - p_x_y0_t * p_x_y1_c)\n\n niv_row = 100 * nwoe * niv_weight\n niv = niv_row.sum()\n niv_dict[col] = niv\n\n s_niv = pd.Series(niv_dict)\n s_selected_niv = s_niv.sort_values(ascending=False)[: max_vars]\n\n return s_selected_niv.index", "def undetermined_coefficients(gensols: List[Symbol], func_coeffs: List[Symbol], gt: Symbol, t: Symbol = t) -> Tuple[Symbol, Procedure]:\n\n Y = Function('Y', real=True)(t)\n\n coeffs = numbered_symbols('A', cls=Dummy)\n coefflist = []\n\n trialset = _undetermined_coefficients_match(gt, t)['trialset']\n\n notneedset = set()\n\n mult = 0\n for i, sol in enumerate(gensols):\n check = sol\n if check in trialset:\n # If an element of the trial function is already part of the\n # homogeneous solution, we need to multiply by sufficient x to\n # make it linearly independent. We also don't need to bother\n # checking for the coefficients on those elements, since we\n # already know it will be 0.\n while True:\n if check*t**mult in trialset:\n mult += 1\n else:\n break\n trialset.add(check*t**mult)\n notneedset.add(check)\n\n newtrialset = trialset - notneedset\n\n # while True:\n # dependent = False\n # for trial in newtrialset:\n # if trial in gensols:\n # dependent = True\n # break\n # if not dependent:\n # break\n # newtrialset = set([t*trial for trial in trialset])\n\n # trialset = trialset.union(newtrialset)\n\n trialfunc = sympy.Number(0)\n for i in newtrialset:\n c = next(coeffs)\n coefflist.append(c)\n trialfunc += c*i\n\n derivatives = []\n\n eqs = 0\n for order, coeff in enumerate(func_coeffs[::-1]):\n deriv = simplify(trialfunc.diff(t, order))\n derivatives.append(\n Eq(Derivative(Y, t, order), deriv, evaluate=False))\n eqs += coeff * deriv\n\n coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1))))\n\n eqs_lhs = eqs\n\n eqs = _mexpand(simplify(eqs - gt).expand())\n\n for i in Add.make_args(eqs):\n s = separatevars(i, dict=True, symbols=[t])\n coeffsdict[s[t]] += s['coeff']\n\n coeffvals = solve(list(coeffsdict.values()), coefflist)\n\n if not coeffvals:\n print(\n \"Could not solve `%s` using the \"\n \"method of undetermined coefficients \"\n \"(unable to solve for coefficients).\" % eqs)\n\n psol = trialfunc.subs(coeffvals)\n\n procedure = Procedure()\n procedure\\\n .text('Find ').latex('Y(t)').text(' that mimics the form of ').latex('g(t)', nl=True)\\\n .eq(Eq(Y, trialfunc, evaluate=False))\\\n .text('Compute successive derivatives of ').latex('Y(t)', nl=True)\\\n .equlist(derivatives)\\\n .text('Plug the derivatives into the LHS and equate coefficients', nl=True)\\\n .equlist([Eq(eqs_lhs, gt, evaluate=False),\n Eq(simplify(eqs_lhs).expand().collect(t), gt, evaluate=False)])\\\n .equarr([Eq(a, 0, evaluate=False) for a in coeffsdict.values()])\\\n .text('Solve for the undetermined coefficients', nl=True)\\\n .equarr([Eq(k, v, evaluate=False)\n for k, v in coeffvals.items() if k != 0] if len(coeffvals) > 0 else [])\\\n .text('Substitute the coefficients to get the particular solution', nl=True)\\\n .eq(Eq(Dummy('y_p'), psol, evaluate=False))\n\n return psol, procedure", "def _redef_via_predef_eqn(self):\r\n time = self.current_T # + self.d_T\r\n\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff) \r\n self.Epsilon = self.d_T * self.thermal_conductivity / \\\r\n (self.density * self.heat_capacity)\r\n\r\n # Source term.\r\n def F_func(elem, eta):\r\n x = elem.local_to_global(eta)\r\n F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)\r\n F += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func,\r\n self.node_map,\r\n gauss_mult=2) # Use double gp_1D\r\n\r\n # Boundary term.\r\n def f_func(elem, eta):\r\n n = elem.guess_normal_vector_global(eta)\r\n f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n x = elem.local_to_global(eta)\r\n # Evaluate our boundary term.\r\n f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)\r\n f += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func,\r\n self.node_map,\r\n gauss_mult=2)", "def variational_distribution(self):\n activation = tf.nn.relu\n if self.linear:\n activation = None\n\n #q(z | x, s)\n if self.log_variational:\n x = tf.log(1 + self.expression)\n else:\n x = self.expression\n\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n for layer in range(2, self.n_layers + 1):\n h = dense(h, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n\n \n self.qz_m = dense(h, self.n_latent, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.qz_v = dense(h, self.n_latent, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n \n if self.scalings:\n # q(l | x, s)\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n self.ql_m = dense(h, 1, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.ql_v = dense(h, 1, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)", "def fluid_deriv(self):\n deriv = np.zeros((self.fluid_constraints['num_eq'],\n 2 * self.num_i + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n for j in range(self.num_nw_fluids):\n deriv[i * self.num_nw_fluids + j, i, j + 3] = 1\n deriv[i * self.num_nw_fluids + j, self.num_i + i, j + 3] = -1\n return deriv", "def variable_costs(dh: DataHandler):\n print(\"PtHydrogen not implemented\")\n\n scen_hor_map = dh.scenarios.horizon\n\n cost_var = dh.get(\"i_cost\").xs(\"varcost\", level=\"par_cost\")\n cost_var = cost_var.groupby([\"alltec\"]).apply(\n extract_horizon_specific_cost, scen_hor_map\n )\n cost_var = add_dimension(cost_var, dh.merge_stored_sets(\"r\"), \"r\")\n cost_var = cost_var.reorder_levels([\"alltec\", \"r\"])\n\n h2_price = dh.get(\"o_h2price_buy\")\n h2_price = add_dimension(h2_price, dh.merge_stored_sets(\"tec_h2g\"), \"alltec\")\n\n elec_price = dh.get(\"o_prices\")\n\n cost_fuel = dh.get(\"cost_fuel\")\n cost_fuel = add_dimension(cost_fuel, dh.merge_stored_sets(\"r\"), \"r\")\n cost_fuel = cost_fuel.reorder_levels([\"alltec\", \"r\"])\n\n cost_fuel.loc[h2_price.index, :] = h2_price\n\n eff = dh.get(\"eff\")\n\n co2_int = dh.get(\"co2_int\").div(1000)\n\n co2_price = dh.get(\"o_co2price\")\n\n co2_costs = co2_int * co2_price\n co2_costs.index.names = [\"alltec\", \"r\"]\n\n var_cost = (\n cost_fuel.add(co2_costs, fill_value=0).div(eff).add(cost_var, fill_value=0)\n )\n\n return var_cost", "def taylor_exp_3(y0, t, f, jac, hess, df_dt=None, d2f_dt2=None, d2f_dtdu=None, verbose=True, krylov_subspace_dim=None,\n **_):\n try:\n n, d = len(t), len(y0)\n y = np.zeros((n, d))\n except TypeError:\n n, d = len(t), 1\n y = np.zeros((n,))\n if verbose is False:\n count = Counter('', 0)\n elif verbose is True:\n count = Counter('Taylor Exp 3', n)\n else:\n count = Counter(verbose, n)\n if df_dt is None:\n def df_dt(*_): return np.zeros((d,))\n if d2f_dt2 is None:\n def d2f_dt2(*_): return np.zeros((d,))\n if d2f_dtdu is None:\n def d2f_dtdu(*_): return np.zeros((d, d))\n y[0] = y0\n j = jac(y[0], t[0])\n w = np.zeros((d, 3))\n expanded_vector = np.zeros((d + 3,))\n expanded_vector[-1] = 1\n expanded_matrix = np.zeros((d + 3, d + 3))\n expanded_matrix[-3:-1, -2:] = np.eye(2)\n expanded_matrix[:d, :d] = j\n for i in range(n - 1):\n h = t[i + 1] - t[i]\n w[:, -1] = f(y[i], t[i]) - np.dot(j, y[i])\n w[:, -2] = np.dot(jac(y[i], t[i]) - j, f(y[i], t[i])) + df_dt(y[i], t[i])\n w[:, -3] = np.dot(np.dot(hess(y[i], t[i]), f(y[i], t[i])), f(y[i], t[i])) \\\n + np.dot(jac(y[i], t[i]) - j, np.dot(jac(y[i], t[i]), y[i])) \\\n + np.dot(jac(y[i], t[i]) - j, df_dt(y[i], t[i])) \\\n + 2 * np.dot(d2f_dtdu(y[i], t[i]), f(y[i], t[i])) \\\n + d2f_dt2(y[i], t[i])\n expanded_vector[:d] = y[i]\n expanded_matrix[:d, -3:] = w\n if krylov_subspace_dim is None:\n y[i + 1] = np.dot(expm_sp(h * expanded_matrix), expanded_vector)[:d]\n else:\n y[i + 1] = expm_krylov(h * expanded_matrix, expanded_vector, krylov_subspace_dim)[:d]\n count(i + 1)\n return y", "def f1d(t,y,float_params,sigmaI): #sigmastep is an array\n \n ## y is Ntot0 ##\n\n # unpack parameters\n Nbar, Nstar, sigma0, nu_kin_mlyperus, DoverdeltaX2 = float_params \n\n # Ntot is passed in, Fqll calculated from Ntot\n Ntot0 = np.ascontiguousarray(y)\n Nqll0 = Nbar - Nstar * np.sin(2*np.pi*(Ntot0))\n\n # Calc surface deposition, dNtot_dt before diffusion\n m = (Nqll0 - (Nbar - Nstar))/(2*Nstar)\n sigmaM = (sigmaI - m * sigma0)/(1+m*sigma0)\n depsurf = nu_kin_mlyperus * sigmaM\n dNtot_dt = depsurf\n\n # Diffusion\n dy = diffuse_1d(Nqll0,DoverdeltaX2)\n dNtot_dt += dy \n\n # Package for output, only values of dNtot\n derivs = dNtot_dt\n return derivs", "def variation_of_parameters(y: List[Symbol], gt: Symbol, t: Symbol = t, do_integral=True) -> Tuple[Symbol, Procedure]:\n W, w = Wronskian(y, t)\n goW = simplify(gt / W)\n\n yp = 0\n\n Wdets = []\n integrals = []\n\n col = [0] * len(y)\n col[-1] = 1\n for i in range(len(y)):\n Wi = w.copy()\n Wi[:, i] = col.copy()\n\n # reduce cos^2 t + sin^2 t to 1\n Wi_det = trigsimp(simplify(Wi.det()), deep=True, recursive=True)\n\n integrand = (Wi_det * goW).expand()\n integral = integrate(\n integrand, t) if do_integral else Integral(integrand, t)\n yp += y[i] * integral\n\n if do_integral:\n integrals.append(\n Eq(Dummy('mu_{}'.format(i + 1)),\n Eq(Integral(integrand, t), integral, evaluate=False), evaluate=False)\n )\n else:\n integrals.append(Eq(Dummy('mu_{}'.format(i)),\n Integral(integrand, t), evaluate=False))\n\n Wdets.append(\n Eq(Symbol('W{}'.format(i+1)), Eq(Determinant(Wi), Wi_det, evaluate=False), evaluate=False))\n\n yps = logcombine(simplify(yp))\n\n procedure = Procedure()\n procedure\\\n .text('Compute the Wronskian determinant', nl=True)\\\n .eq(Eq(Dummy('W'), Eq(Determinant(w), W, evaluate=False), evaluate=False))\\\n .text('Compute ').latex('W_i', nl=True)\\\n .equlist(Wdets)\\\n .text('Calculate and simplify ').latex('\\\\frac{g(t)}{W(t)}', nl=True)\\\n .eq(Eq(sympy.Mul(gt, sympy.Pow(W, -1, evaluate=False), evaluate=False), goW, evaluate=False))\\\n .text('Compute ').latex('\\\\mu_i = \\\\int \\\\frac{g(t)W_i(t)}{W(t)} dt', nl=True)\\\n .equlist(integrals)\\\n .text('Compute the sum ').latex('\\\\sum_{i=1}^{k} y_i \\\\int \\\\frac{g(t)W_i(t)}{W(t)} dt', nl=True)\\\n .equlist([\n Eq(Dummy('y_p'), yp, evaluate=False),\n Eq(Dummy('y_p'), yps, evaluate=False)\n ])\\\n .text('Complementray + particular = general', nl=True)\\\n .eq(Eq(Dummy('y'), to_general(y, yps)[0], evaluate=False))\n\n return yps, procedure", "def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)", "def fwd_model(Ti_samples,To_samples, dw_samples, kw_samples,hi_samples,ho_samples,TA_samples):\n\t#Determine number of samples (totquat)\n\ttotquat=len(Ti_samples)\n\t# List to store values of Q (assuming no radiative heat transfer) calculated from\n\t# the random samples of the parameters\n\tQ_samples_4PCE=[]\n\t# List to store values of Q assuming radiative heat transfer occurs\n\t#Q_r_samples_4PCE=[]\n\t# Calculate values of heat flux Q (assuming no radiative heat transfer)\n\t# for the different sample values and append to the list\n\tfor i in range(totquat):\n\t\t(Q,T1,T2)=compute_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i],\\\n\t\t\tkw_samples[i], hi_samples[i], ho_samples[i])\n\t\tQ_samples_4PCE.append(Q)\n\t\t# Calculate values of heat flux Q assuming radiative heat transfer to atmosphere and append to list\n\t\t# For the required estimates of Q,T1, and T2 needed to solve the nonlinear system,\n\t\t# we use the values obtained by solving the system assuming no radiative heat transfer\n\t\t\"\"\"Q2=r_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i], kw_samples[i],\\\n\t\t\thi_samples[i], ho_samples[i], TA_samples[i], (Q,T1,T2))\n\t\tQ_r_samples_4PCE.append(Q2)\n\t# Convert Q_r_samples_4PCE to numpy array\n\tQ_evals = np.array(Q_r_samples_4PCE)\n\treturn Q_evals\"\"\"\n\t\tConvert Q_samples_4PCE to numpy array\n\t\tQ_evals = np.array(Q_samples_4PCE)\n\t\treturn Q_evals\"\"\"\n\n\ndef KDE(fcn_evals):\n\t\"\"\"\n\tPerforms kernel density estimation\n\tInput:\n\t\tfcn_evals: numpy array of evaluations of the forward model (values of heat flux Q)\n\tOutput:\n\t\txpts_pce: numpy array of points at which the PDF is estimated.\n\t\tPDF_data_pce: numpy array of estimated PDF values.\n\t\"\"\"\n\t# Perform KDE on fcn_evals\n\tkern_pce=stats.kde.gaussian_kde(fcn_evals)\n\t# Generate points at which to evaluate the PDF\n\txpts_pce=np.linspace(fcn_evals.min(),fcn_evals.max(),200)\n\t# Evaluate the estimated PDF at these points\n\tPDF_data_pce=kern_pce(xpts_pce)\n\treturn xpts_pce, PDF_data_pce", "def run_main(sst, ft_qv, use_NT):\n\n dtout=10. #minutes\n end_time=8*24. #hours\n del_time=dtout*60. #seconds\n end_time=end_time*3600. #seconds\n #sst=297\n D=5.e-6 #s-1\n U=7 #m/s\n psfc=100. #kPa\n qsfc=tf.qs_tp(sst,psfc)\n ft_intercept = 292 #K\n ft_gamma = 6.e-3 #K/m\n #ft_qv = 2.e-3\n k=0.2 #entrainment efficiency\n Cd = 1.e-3 #drag coefficient\n tspan = np.arange(0.,end_time,del_time)\n vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start\n the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,\n qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT\n the_tup=make_tuple(the_tup,'coeffs')\n output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))\n result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])\n\n # save time/computation by only doing calculations for the last timestep (equilibrium)\n result['time']=tspan[-1]/3600./24. #days\n result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]\n result['delqv'] = ft_qv - result['qv'].iloc[-1]\n result['LCL'] = calc_lcl(result.iloc[-1], psfc)\n result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)\n result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)\n result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)\n \n # decide how to calculate entrainment\n the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]\n if use_NT:\n result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], \n result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])\n else:\n result['went']=calc_went(result.iloc[-1],the_tup)\n\n result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)\n\n with open('dumpmodel.csv','w') as f:\n result.to_csv(f,index=False)\n \n return None", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def function_PLD_Ntransits(coeffs, t_list, Pns_list, lc_list,\n coeffs_dict_list,\n coeffs_tuple, fix_coeffs, fix_coeffs_channels,\n batman_params_list, PLD_params_list):\n\n residuals = []\n\n x1 = len([key for key in coeffs_tuple[0:9] if key not in fix_coeffs\n and key not in fix_coeffs_channels])\n x2 = len([key for key in coeffs_tuple[0:9] if key in fix_coeffs_channels])\n x3 = len([key for key in coeffs_tuple if key not in fix_coeffs\n and key not in fix_coeffs_channels])\n\n for i in range(len(coeffs_dict_list)):\n\n coeffs_fit = np.concatenate((coeffs[i*x3:i*x3 + x1],\n coeffs[-x2:],\n coeffs[i*x3 + x1 :(i+1)*x3 ]))\n\n new_flux = model_PLD(coeffs_fit, t_list[i], Pns_list[i], coeffs_dict_list[i],\n coeffs_tuple, fix_coeffs, batman_params_list[i], PLD_params_list[i])\n\n residuals.append(lc_list[i]-new_flux)\n\n return np.array(np.concatenate(residuals))", "def derivatives(self, increment_filter):\n ######################################################################\n # derivatives fluid and mass balance are static\n k = self.num_nw_fluids + 1\n\n ######################################################################\n # derivatives for specified heat transfer\n if self.Q.is_set:\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.jacobian[k, 0, 2] = -self.inl[0].m.val_SI\n self.jacobian[k, 1, 2] = self.inl[0].m.val_SI\n # custom variable Q\n if self.Q.is_var:\n self.jacobian[k, 2 + self.Q.var_pos, 0] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio\n if self.pr.is_set:\n self.jacobian[k, 0, 1] = self.pr.val\n self.jacobian[k, 1, 1] = -1\n # custom variable pr\n if self.pr.is_var:\n self.jacobian[k, 2 + self.pr.var_pos, 0] = (\n self.inl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # derivatives for specified zeta\n if self.zeta.is_set:\n f = self.zeta_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(\n f, 'm', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(\n f, 'p', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(\n f, 'h', 0, zeta='zeta')\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(\n f, 'p', 1, zeta='zeta')\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(\n f, 'h', 1, zeta='zeta')\n # custom variable zeta\n if self.zeta.is_var:\n self.jacobian[k, 2 + self.zeta.var_pos, 0] = (\n self.numeric_deriv(f, 'zeta', 2, zeta='zeta'))\n k += 1\n\n ######################################################################\n # derivatives for specified hydro-group parameters\n if self.hydro_group.is_set:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(func, 'm', 0)\n if not increment_filter[0, 1]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(func, 'p', 0)\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(func, 'h', 0)\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(func, 'p', 1)\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(func, 'h', 1)\n # custom variables of hydro group\n for var in self.hydro_group.elements:\n if var.is_var:\n self.jacobian[k, 2 + var.var_pos, 0] = (\n self.numeric_deriv(func, self.vars[var], 2))\n k += 1\n\n ######################################################################\n # derivatives for additional equations\n self.additional_derivatives(increment_filter, k)", "def forward(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=0, eps=0, gamma=0, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # forward euler and forward/backward timestep\n beta = np.float32(beta)\n mu = np.float32(mu)\n \n du1, du0 = du[:2]\n dv1, dv0 = dv[:2]\n dn0 = dn[0]\n \n dndt_x(h, n, u, v, dx, dy, dn0) # calculate dndt and put it into dn0\n \n n1 = n + ( dn0 )*dt\n \n dudt_x(h, n, f, u, v, dx, dy, du0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n, f, u, v, dx, dy, dv0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dudt_x(h, n1, f, u, v, dx, dy, du1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n1, f, u, v, dx, dy, dv1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n u1 = u + ( beta*du1 + (one-beta)*du0 )*dt\n v1 = v + ( beta*dv1 + (one-beta)*dv0 )*dt\n \n n, u, v = n1, u1, v1\n \n du = [du1, du0, du0, du0]\n dv = [dv1, dv0, dv0, dv0]\n dn = [dn0, dn0, dn0]\n return n1, u1, v1, du, dv, dn", "def dfegn17(mod96file, wavetype, nmod, freq, pertu = 0.05, tmpdir = None, cleanup = True, fig = None):\n \n #create the main temporary directory\n if tmpdir is None: \n tmpdir = \"/tmp/tmpdir_dfegn17_%10d\" % (np.random.rand() * 1e10)\n while os.path.isdir(tmpdir):\n tmpdir += \"_%10d\" % (np.random.rand() * 1e10)\n os.mkdir(tmpdir)\n\n #read starting model\n Z, H, VP, VS, RHO, QP, QS, ETAP, ETAS, FREFP, FREFS = readmod96(mod96file)\n model0 = np.concatenate((H, VP, VS, RHO))\n nlayer = len(H)\n IH = np.arange(nlayer) #index of thickness parameters\n IVP = np.arange(1 * nlayer, 2 * nlayer) #index of Vp parameters\n IVS = np.arange(2 * nlayer, 3 * nlayer) \n IRH = np.arange(3 * nlayer, 4 * nlayer) \n\n\n\n #compute eigenfunctions for starting model\n out0 = fegn17(mod96file, wavetype, nmod, freq, tmpdir = \"%s/%s\" % (tmpdir, \"startingmodel\"), fig = None, cleanup = False)\n\n #initiate kernels\n if wavetype == \"R\":\n DURDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DUZDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DTRDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DTZDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n elif wavetype == \"L\":\n DUTDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DTTDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n\n\n #perturbate each parameter subsequently\n for nparam in xrange(len(model0)):\n if nparam == nlayer - 1 :\n #thickness of the half space: meaningless\n continue\n\n #prepare the perturbated model and determine the sub-directory name\n modeln = model0.copy()\n modeln[nparam] *= (1.0 + pertu)\n tmpdir_n = \"%s/param%06d\" % (tmpdir, nparam)\n mod96file_n = \"%s/model_param%06d.mod96\" % (tmpdir, nparam) #write the perturbated file in the main temporary directory\n\n #write perturbated model\n writemod96(mod96file_n, \n H = modeln[IH], \n VP = modeln[IVP], \n VS = modeln[IVS], \n RHO = modeln[IRH], \n QP = QP, QS = QS, ETAP=ETAP, ETAS=ETAS, FREFP=FREFP, FREFS=FREFS) #keep attenuation untouched\n\n #call fegn17 with perturbated model\n out_n = fegn17(mod96file_n, wavetype, nmod, freq, tmpdir = tmpdir_n, fig = None, cleanup = False)\n dm = (modeln[nparam] - model0[nparam])\n\n for modnum in xrange(nmod+1):\n key = \"%s%d\" % (wavetype, modnum)\n\n if wavetype == \"R\":\n DURDN[modnum, :, nparam] = (out_n[key]['UR'] - out0[key]['UR']) / dm\n DUZDN[modnum, :, nparam] = (out_n[key]['UZ'] - out0[key]['UZ']) / dm\n DTRDN[modnum, :, nparam] = (out_n[key]['TR'] - out0[key]['TR']) / dm\n DTZDN[modnum, :, nparam] = (out_n[key]['TZ'] - out0[key]['TZ']) / dm\n elif wavetype == \"L\":\n\n DUTDN[modnum, :, nparam] = (out_n[key]['UT'] - out0[key]['UT']) / dm\n DTTDN[modnum, :, nparam] = (out_n[key]['TT'] - out0[key]['TT']) / dm\n\n\n #-------------------\n dout = {\"Z\" : out0['model'][\"Z\"], \"T\" : 1. / freq}\n if wavetype == \"R\":\n dout['DURDH'] = DURDN[:, :, IH ]\n dout['DURDVP'] = DURDN[:, :, IVP]\n dout['DURDVS'] = DURDN[:, :, IVS]\n dout['DURDRH'] = DURDN[:, :, IRH]\n\n dout['DUZDH'] = DUZDN[:, :, IH ]\n dout['DUZDVP'] = DUZDN[:, :, IVP]\n dout['DUZDVS'] = DUZDN[:, :, IVS]\n dout['DUZDRH'] = DUZDN[:, :, IRH]\n\n dout['DTRDH'] = DTRDN[:, :, IH ]\n dout['DTRDVP'] = DTRDN[:, :, IVP]\n dout['DTRDVS'] = DTRDN[:, :, IVS]\n dout['DTRDRH'] = DTRDN[:, :, IRH]\n\n dout['DTZDH'] = DTZDN[:, :, IH ]\n dout['DTZDVP'] = DTZDN[:, :, IVP]\n dout['DTZDVS'] = DTZDN[:, :, IVS]\n dout['DTZDRH'] = DTZDN[:, :, IRH]\n \n elif wavetype == \"L\": \n dout['DUTDH'] = DUTDN[:, :, IH ]\n dout['DUTDVP'] = DUTDN[:, :, IVP]\n dout['DUTDVS'] = DUTDN[:, :, IVS]\n dout['DUTDRH'] = DUTDN[:, :, IRH]\n \n dout['DTTDH'] = DTTDN[:, :, IH ]\n dout['DTTDVP'] = DTTDN[:, :, IVP]\n dout['DTTDVS'] = DTTDN[:, :, IVS]\n dout['DTTDRH'] = DTTDN[:, :, IRH]\n\n if cleanup:\n #remove temporary directory\n execbash('rm -rf %s' % tmpdir, \".\")\n\n if fig is not None:\n\n ax2 = fig.add_subplot(224)\n ax1 = fig.add_subplot(223, sharey = ax2)\n ax3 = fig.add_subplot(222, sharex = ax2)\n\n #------------------\n ax1.invert_yaxis()\n #------------------\n z = np.concatenate((np.repeat(out0['model'][\"Z\"], 2)[1:], [sum(out0['model'][\"H\"]) * 1.1]))\n vp = np.repeat(out0['model'][\"Vp\"], 2)\n vs = np.repeat(out0['model'][\"Vs\"], 2)\n rh = np.repeat(out0['model'][\"Rh\"], 2)\n ax1.plot(vp, z, label = \"Vp\")\n ax1.plot(vs, z, label = \"Vs\")\n ax1.plot(rh, z, label = \"Rh\")\n ax1.legend()\n ax1.grid(True)\n ax1.set_ylabel('model depth (km)')\n\n #------------------\n if wavetype == \"R\":\n vmax = abs(dout['DUZDVS'][nmod, :, :]).max()\n ax3.plot(dout[\"Z\"], out0[\"%s%d\" % (wavetype, nmod)]['UZ'], label = \"UZ\")\n Y = dout['DUZDVS'][nmod, :, :]\n Y = np.ma.masked_where(np.isnan(Y), Y)\n ax2.pcolormesh(dout[\"Z\"], dout[\"Z\"], Y, vmin = -vmax, vmax = vmax)\n ax2.set_title(\"DUZ/DVS, T = %f, mode %d\" % (1. / freq, nmod))\n ax3.set_xlabel('eigenfunction depth (km)')\n elif wavetype == \"L\":\n vmax = abs(dout['DUTDVS'][nmod, :, :]).max()\n ax3.plot(dout[\"Z\"], out0[\"%s%d\" % (wavetype, nmod)]['UT'], label = \"UT\")\n Y = dout['DUTDVS'][nmod, :, :]\n Y = np.ma.masked_where(np.isnan(Y), Y)\n ax2.pcolormesh(dout[\"Z\"], dout[\"Z\"], Y, vmin = -vmax, vmax = vmax)\n ax2.set_title(\"DUT/DVS, T = %f, mode %d\" % (1. / freq, nmod))\n ax3.set_xlabel('eigenfunction depth (km)')\n\n ax3.xaxis.set_label_position(\"top\") \n ax3.legend()\n ax2.grid(True)\n ax3.grid(True)\n\n return dout", "def dynstall_mhh_dxdt_simple(t, x, U, U_dot, omega, alpha_34, p):\n # States\n x1=x[0] # Downwash memory term 1\n x2=x[1] # Downwash memory term 2\n x3=x[2] # Clp', Lift coefficient with a time lag to the attached lift coeff\n x4=x[3] # f'' , Final separation point function\n # Parameters\n alpha0 = p['alpha0']\n Cla = p['Cla']\n c = p['chord']\n A1 = p['A1']\n A2 = p['A2']\n b1 = p['b1']\n b2 = p['b2']\n F_st = p['F_st']\n # Variables derived from inputs\n U = max(U, 0.01)\n Tu = max(c/(2*U), 1e-4) # Eq. 23\n Tf = p['Tf0']*Tu # OLD was twice: Tf = p['Tf0']*c/U\n Tp = p['Tp0']*Tu # OLD was twice: Tp = p['Tp0']*c/U\n # Variables derived from states\n if p['alpha0_in_x1x2']:\n alphaE = alpha_34*(1-A1-A2)+ x1 + x2 # Eq. 12\n else:\n alphaE = (alpha_34-alpha0)*(1-A1-A2)+ x1 + x2 + alpha0 # Eq. 12\n\n# alphaE = u['alphaE'](t) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< HACK HACK TODO TODO TODO TODO TODO\n\n Clp = Cla * (alphaE-alpha0) + np.pi * Tu * omega # Eq. 13\n alphaF = x3/Cla+alpha0 # p. 13\n fs_aF = F_st(alphaF) # p. 13\n if(fs_aF<0):\n print('Problematic fs:',fs_aF)\n x4 = np.clip(x4, 1e-16, 1.0) # Constraining x4 between 0 and 1 increases numerical stability\n # State equation\n xdot = [0]*4\n if p['alpha0_in_x1x2']:\n xdot[0] = -1/Tu * (b1 + c * U_dot/(2*U**2)) * x1 + b1 * A1 / Tu * alpha_34\n xdot[1] = -1/Tu * (b2 + c * U_dot/(2*U**2)) * x2 + b2 * A2 / Tu * alpha_34\n else:\n xdot[0] = -1/Tu * (b1 + c * U_dot/(2*U**2)) * x1 + b1 * A1 / Tu * (alpha_34-alpha0)\n xdot[1] = -1/Tu * (b2 + c * U_dot/(2*U**2)) * x2 + b2 * A2 / Tu * (alpha_34-alpha0)\n xdot[2] = -1/Tp * x3 + 1/Tp * Clp\n xdot[3] = -1/Tf * x4 + 1/Tf * fs_aF\n return xdot", "def DyEvo(x, t, T0, r1, r2, K_co, K_ch, alpha, n):\n y=np.zeros([np.size(x)])\n D=np.zeros([2]) \n #define fitnss\n D[0]=dmax*x[0]**n/(K_co**n+x[0]**n) #cooperator\n D[1]=dmax*x[0]**n/(K_ch**n+x[0]**n) #cheater \n #degradation\n deg=fmax*x[1]/(x[1]+Kd) \n #ODE of eco-evo dynamics\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r1*(1-x[1]-x[2])-D[0]-alpha)#d Co/dt\n y[2]=x[2]*(r2*(1-x[1]-x[2])-D[1]-alpha) #d Ch/dt\n \n return y", "def dE_mdn(self, x, y, t, w1 = None, w2 = None):\n if w2 == None:\n w2 = self.w2\n M = int(self.M)\n # avoid underrun\n \n alpha, sigma, mu = self.getMixtureParams(y.T)\n #import pdb; pdb.set_trace()\n \n #T = t.T[None, None, :] # note: np.tile is slower than this notation\n T = t.T[None, :]\n \n phi = self._phi(T, mu, sigma)\n aphi = alpha*phi\n pi = aphi / np.sum(aphi, 0)\n \n # derivatives of E with respect to the output variables (s. Bishop 1995, chp. 6.4)\n dE_dy_alpha = alpha - pi\n dE_dy_sigma = - 0.5 * pi * ((np.sum((T-mu)**2 , 1) / sigma) - self.c)\n dE_dy_mu = pi[:,np.newaxis,:] * (mu - T) / sigma[:,np.newaxis,:]\n\n dk = np.zeros([self.ny, x.shape[0]])\n dk[0:M,:] = dE_dy_alpha\n dk[M:2*M,:] = dE_dy_sigma\n \n dk[2*M:] = np.reshape(dE_dy_mu, [M*self.c, x.shape[0]])\n \n # back-propagate the dks\n #t0=datetime.now()\n dEnw1, dEnw2 = self.backward(x, dk, None, w2)\n #print 'eval of dE_mdn:' + str((datetime.now()-t0))\n #dj = (1 - self.z[1:]**2) * np.dot(w2[:,1:].T, dk)\n # evaluate derivatives with respect to the weights\n #dEnw1 = (dj[:,:,np.newaxis]*x[np.newaxis,:,:]).transpose(1,0,2)\n #dEnw2 = (dk[:,:,np.newaxis]*self.z.T[np.newaxis,:,:]).transpose(1,0,2)\n return dEnw1, dEnw2", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n znew = z\n\n iter = 0\n eps = 1.0e99\n while iter < self.maxiter and abs(eps) > self.atol:\n z = znew\n znew = 4.0 - x*z\n\n eps = x*znew + znew - 4.0\n\n unknowns['z'] = znew\n unknowns['y'] = x + 2.0*znew\n\n resids['z'] = eps\n #print(unknowns['y'], unknowns['z'])", "def _func_pen(self, coeffs_ext):\n l_elastic_net = self.l_elastic_net\n eta = self.eta\n n_features = self.n_features\n coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]\n return l_elastic_net * ((1. - eta) * coeffs_ext.sum()\n + 0.5 * eta * np.linalg.norm(coeffs) ** 2)", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n a = self.a\n b = self.b\n c = self.c\n\n unknowns['y'] = a*x**2 + b*x + c", "def test_adding_three_variables():\n a = fwd.Variable()\n b = fwd.Variable()\n c = fwd.Variable()\n f = fwd.exp(a-b+c)\n assert equals(f.evaluation_at({a: 1.0, b: 2.0, c: 3.0}), np.exp(2.0))\n assert equals(f.derivative_at(b, {a: 1.0, b: 2.0, c: 3.0}), -np.exp(2.0))\n assert equals(f.derivative_at(a, {a: 1.0, b: 2.0, c: 3.0}), np.exp(2.0))", "def fluid_deriv(self, increment_filter, k):\n i = 0\n for fluid in self.nw_fluids:\n j = 0\n for o in self.outl:\n self.jacobian[k, j + 1, 0] = -o.fluid.val[fluid]\n self.jacobian[k, j + 1, i + 3] = -o.m.val_SI\n j += 1\n self.jacobian[k, 0, 0] = self.inl[0].fluid.val[fluid]\n self.jacobian[k, 0, i + 3] = self.inl[0].m.val_SI\n k += 1\n i += 1" ]
[ "0.80656624", "0.5865987", "0.5693037", "0.5615564", "0.55575585", "0.55294347", "0.55230105", "0.5521183", "0.5503675", "0.5450836", "0.540424", "0.53514105", "0.5325137", "0.53189623", "0.5317447", "0.5312849", "0.5308044", "0.5304588", "0.530239", "0.5301694", "0.5296928", "0.5287328", "0.52784234", "0.5275083", "0.52576554", "0.5250537", "0.5234691", "0.5217848", "0.5201104", "0.5196678" ]
0.73483014
1
find the lcl (in m) for a row in the dataframe
def calc_lcl(row,psfc): Tdew = tf.tmr(row['qv'],psfc) LCL = tf.LCL(Tdew,row['theta'],psfc) #kPa # # rough approximation: 10 kPa = 1 km # delp=psfc - LCL lcl_h = delp*100. return lcl_h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_index_lm(l, m):\n return (l+1)**2 -1 -l + m", "def compute_kl(self, df):\n value_counts = [df[col].value_counts() for col in self.hist_cols]\n next_hists = self.value_counts_to_hists(value_counts)\n\n if self.prev_hists is None:\n self.prev_hists = next_hists\n return None\n\n output = []\n for prev_h, curr_h in zip(self.prev_hists, next_hists):\n for i in range(len(prev_h)):\n prev_h[i] = prev_h[i] if prev_h[i] != 0 else 1\n curr_h[i] = curr_h[i] if curr_h[i] != 0 else 1\n kl = entropy(prev_h, curr_h)\n output.append(kl)\n\n self.prev_hists = next_hists\n return output", "def lfindwithin (data):\r\n\r\n numfact = len(data[0])-1\r\n withinvec = 0\r\n for col in range(1,numfact):\r\n examplelevel = pstats.unique(pstats.colex(data,col))[0]\r\n rows = pstats.linexand(data,col,examplelevel) # get 1 level of this factor\r\n factsubjs = pstats.unique(pstats.colex(rows,0))\r\n allsubjs = pstats.unique(pstats.colex(data,0))\r\n if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?\r\n withinvec = withinvec + (1 << col)\r\n return withinvec", "def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)", "def calculate_recovery_clifford(cl_in, desired_cl=0):\n row = list(clifford_lookuptable[cl_in])\n return row.index(desired_cl)", "def calc_Lr(rho,mld,f,g=9.8,po=1027.):\n n2ml=np.ndarray(len(rho[1,:-1]))\n for i in range(len(rho[1,:-1])):\n n2ml[i]=-(g/po)*((rho[15,i]-rho[np.int8(mld[i])+15,i])/mld[i])\n Lr=(np.sqrt(n2ml)*mld[:-1])/f\n\n return Lr", "def find_lcs(l1: str, l2: str, length1: int, length2: int):\n \"\"\" Theorem:{\n Initialize matrix with 0 for first row and colm\n If s1[i] = s2[j], update matrix[i][j] with value\n of matrix[i-1][j-1]+1\n Else update matrix[i][j] with max of value among\n matrix[i][j-1],matrix[i-1][j]\n Matrix[n][m] will be lcs\n }\n \"\"\"\n matrix = [[None]*(length1+1) for i in range(0, length2+1)]\n for i in range(0, length2+1):\n for j in range(0, length1+1):\n if i == 0 or j == 0:\n matrix[i][j] = 0\n elif l1[j-1] == l2[i-1]:\n matrix[i][j] = matrix[i-1][j-1] + 1\n else:\n matrix[i][j] = max(matrix[i][j-1], matrix[i-1][j])\n lcs = [None for i in range(0, matrix[length2][length1])]\n index = matrix[length2][length1]\n m = length2 \n n = length1\n while(m > -1 and n > -1):\n if l2[m-1] == l1[n-1]:\n lcs[index-1] = l2[m-1]\n index -= 1\n m -= 1\n n -= 1\n elif matrix[m-1][n] > matrix[m][n-1]:\n m -= 1\n else:\n n -= 1\n return lcs", "def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1", "def _compute_lcs(source, target):\n table = _lcs_table(source, target)\n return _backtrack(table, source, target, len(source), len(target))", "def getCL(self):\r\n return self.cL;", "def calculate_LC(read):\n k_values = []\n for i in range(1,len(read)+1):\n k_values.append(i)\n observed_kmers = []\n for i in k_values:\n observed_kmers.append((count_kmers_observed(read, i)))\n possible_kmers = []\n for i in k_values:\n possible_kmers.append(count_kmers_possible(read, i))\n df = pd.DataFrame(list(zip(k_values, observed_kmers, possible_kmers)), columns = ['k','observed kmers','possible kmers'])\n df.at['Total', 'observed kmers'] = df['observed kmers'].sum()\n df.at['Total', 'possible kmers'] = df['possible kmers'].sum()\n x = int(df.at['Total', 'observed kmers'])\n y = int(df.at['Total', 'possible kmers'])\n LC = (x/y)\n return(LC)", "def getLPos(self):\n c = 0\n while c <= ALIENS_IN_ROW-1:\n i = 0\n for a in range(ALIEN_ROWS):\n if self._aliens[a][c] != None:\n return self._aliens[a][c].x - ALIEN_WIDTH/2\n else:\n i +=1\n if i == ALIEN_ROWS:\n c +=1", "def get_L(self, tolerance=None):\r\n index = self.data.index\r\n columns = self.data.columns\r\n\r\n # Obtain the eigenvalues and eigenvectors\r\n E, V = sandy.CategoryCov(self.data).get_eig(tolerance=tolerance)\r\n\r\n # need sparse because much faster for large matrices (2kx2k from J33 Pu9)\r\n # with a lot of zero eigs\r\n # this is exactly equivalent to V.values @ np.diag(np.sqrt(E.values))\r\n A = (sparse.csr_matrix(V.values) @ sparse.csr_matrix(np.diag(np.sqrt(E.values)))).todense()\r\n \r\n# u, s, vh = self.get_svd()\r\n# A = (sparse.csr_matrix(u) @ sparse.csr_matrix(np.diag(np.sqrt(s)))).todense()\r\n\r\n # QR decomposition\r\n Q, R = scipy.linalg.qr(A.T)\r\n L = R.T\r\n\r\n return pd.DataFrame(L, index=index, columns=columns)", "def get_l(m):\n L = m.copy()\n for i in range(L.shape[0]):\n L[i, i] = 1\n L[i, i+1:] = 0\n return np.matrix(L)", "def calcLnL(self, tree):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.calcLnL(self, tree)", "def lll(self, delta=0.75):\n if self.domain != ZZ:\n raise DMDomainError(\"ZZ expected, got %s\" % self.domain)\n elif self.rows > self.cols:\n raise DMShapeError(\"Matrix must not have more rows than columns.\")\n\n rep = self._lll(delta=delta)\n return self._new_rep(rep)", "def generate_LLL_matrix(self, matrix):\n LLL_matrix = matrix.transpose().LLL().transpose()\n return LLL_matrix", "def extract_hillslope_profile(node_matrix):\n ncols = numpy.size(node_matrix, 1)\n z = numpy.zeros(ncols)\n for col in range(ncols):\n dirt = numpy.where(node_matrix[:,col]!=0)[0]\n if len(dirt)>0:\n z[col] = numpy.amax(dirt)\n return z", "def log_likelihoods(self):\n return self.__data_frame.loc[:, \"ll\":\"ll\"].values[:-1]", "def recommend_L(self):\n\n min_eigenvalue = torch.min(torch.linalg.eigvalsh(self.Wc))\n lamBda = 1 / (1 + 4*torch.abs(min_eigenvalue - self.vars['q_init']))\n return lamBda", "def getL(self):\r\n return self.L", "def _getPMI(self, df, targetColname):\n pmi = 0\n search_term = df[targetColname]\n noofterms = len(search_term)\n startindex = 0\n pmiAccumulate = 0\n if(noofterms>1):\n for i in range(0,noofterms-1):\n pmi = self.computePMI(search_term[i],search_term[i+1])\n pmiAccumulate = pmiAccumulate+pmi\n pmiAccumulate = pmiAccumulate/noofterms\n pmi = pmiAccumulate\n return pmi", "def Findlt(l,sp,rhs):\n m = sp.M(l)\n return (m / l**3) - rhs", "def calcLnLFromNode(self, nd):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.calcLnLFromNode(self, nd)", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def extract_ltri( m, context = FloatContext ):\n rows, cols = shape_mat(m)\n return [ row[:i+1] + [context.zero]*(cols - i - 1) \n for i, row in enumerate(m) ]", "def get_number_of_rows_and_columns(m):\n\n r = int(np.sqrt(m))\n c = m // r if np.mod(m, r) == 0 else m // r + 1\n return r, c", "def magma_cgels(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_cgels(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def KL2kL(NL, KL, BL):\n # cycle through BL, finding matching inds in NL and thus KL\n # for row in BL, get KL value in row BL[i,0] and col where(NL[BL[i,0],:]==BL[i,1])[0]\n if (BL < 0).any():\n aBL = np.abs(BL)\n kL = np.array([KL[aBL[i, 0], np.where(NL[aBL[i, 0], :] == aBL[i, 1])[0]][0] for i in range(len(aBL))])\n kL2 = np.array([KL[aBL[i, 1], np.where(NL[aBL[i, 1], :] == aBL[i, 0])[0]][0] for i in range(len(aBL))])\n if np.abs(kL - kL2).any() > 1e-6:\n raise RuntimeError('KL is not properly symmetric! KL[i, j neighbor] != KL[j neighbor, i]')\n else:\n kL = np.array([KL[BL[i, 0], np.where(NL[BL[i, 0], :] == BL[i, 1])[0]][0] for i in range(len(BL))])\n return kL" ]
[ "0.59105355", "0.57944614", "0.5768694", "0.5714691", "0.5706475", "0.5483352", "0.5476161", "0.5426626", "0.53980875", "0.5363862", "0.53620285", "0.5346637", "0.530857", "0.5301486", "0.52655256", "0.52437156", "0.52226084", "0.52145016", "0.5176464", "0.51718956", "0.5150789", "0.51381934", "0.5129281", "0.5109691", "0.5087405", "0.5087405", "0.50779736", "0.50730824", "0.50631976", "0.5058335" ]
0.60421354
0
Adapted from interactive_vaporflux.ipynb sst, sea surface temperature (K) ft_qv, mixedlayer top qv (kg kg^1) use_NT, True or False outputs csv and json files with equilibrium values
def run_main(sst, ft_qv, use_NT): dtout=10. #minutes end_time=8*24. #hours del_time=dtout*60. #seconds end_time=end_time*3600. #seconds #sst=297 D=5.e-6 #s-1 U=7 #m/s psfc=100. #kPa qsfc=tf.qs_tp(sst,psfc) ft_intercept = 292 #K ft_gamma = 6.e-3 #K/m #ft_qv = 2.e-3 k=0.2 #entrainment efficiency Cd = 1.e-3 #drag coefficient tspan = np.arange(0.,end_time,del_time) vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma, qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT the_tup=make_tuple(the_tup,'coeffs') output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,)) result=pd.DataFrame.from_records(output,columns=['theta','h','qv']) # save time/computation by only doing calculations for the last timestep (equilibrium) result['time']=tspan[-1]/3600./24. #days result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1] result['delqv'] = ft_qv - result['qv'].iloc[-1] result['LCL'] = calc_lcl(result.iloc[-1], psfc) result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup) result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup) result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup) # decide how to calculate entrainment the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]] if use_NT: result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1]) else: result['went']=calc_went(result.iloc[-1],the_tup) result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup) with open('dumpmodel.csv','w') as f: result.to_csv(f,index=False) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_state\n \n # obtain steady-state values\n dth=steady_state.deltheta\n dqt=steady_state.delqv\n thetal_m=steady_state.theta\n qt_m=steady_state.qv\n h=steady_state.h\n press=tf.find_press(steady_state.h) #kPa\n thetal_ft = steady_state.theta + dth\n qt_ft = steady_state.qv + dqt\n zb = steady_state.LCL\n zi = steady_state.h\n we = steady_state.went\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-h)\n LTS = thetal_3000 - steady_state.theta\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n # calculate LWP\n rho = 1.\n LWP = 0.5*rho*(zi-zb)**2\n \n # put all required variables into output array\n out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])\n \n return out_array", "def extractQuantities(path, run, t0, t1):\n data = pyLTR.Models.MIX(path, run)\n\n # hard-coded input for testing & debugging:\n #data = pyLTR.Models.LFM('/hao/aim2/schmitt/data/LTR-2_0_1b/r1432/March1995/LR/single', 'LRs')\n \n #Make sure variables are defined in the model.\n modelVars = data.getVarNames()\n for v in ['Grid X', 'Grid Y', \n 'Potential North [V]', 'Potential South [V]', \n 'FAC North [A/m^2]', 'FAC South [A/m^2]',\n 'Pedersen conductance North [S]', 'Pedersen conductance South [S]', \n 'Hall conductance North [S]', 'Hall conductance South [S]', \n 'Average energy North [keV]', 'Average energy South [keV]',\n 'Number flux North [1/cm^2 s]', 'Number flux South [1/cm^2 s]']:\n assert( v in modelVars )\n\n timeRange = data.getTimeRange()\n if len(timeRange) == 0:\n raise Exception(('No data files found. Are you pointing to the correct run directory?'))\n\n index0 = 0\n if t0:\n for i,t in enumerate(timeRange):\n if t0 >= t:\n index0 = i\n\n index1 = len(timeRange)-1\n if t1:\n for i,t in enumerate(timeRange):\n if t1 >= t:\n index1 = i \n\n print(( 'Extracting MIX quantities for time series over %d time steps.' % (index1-index0) ))\n \n # Output a status bar displaying how far along the computation is.\n progress = pyLTR.StatusBar(0, index1-index0)\n progress.start()\n\n t_doy = []\n cpcpNorth = []\n cpcpSouth = []\n hpNorth = []\n hpSouth = []\n ipfacNorth = []\n ipfacSouth = []\n\n # Pre-compute area of the grid.\n x = data.read('Grid X', timeRange[index0])\n y = data.read('Grid Y', timeRange[index0])\n # Fix singularity at the pole\n x[:,0] = 0.0\n y[:,0] = 0.0\n z = numpy.sqrt(1.0-x**2-y**2)\n ri = 6500.0e3 # Radius of ionosphere\n areaMixGrid = pyLTR.math.integrate.calcFaceAreas(x,y,z)*ri*ri\n\n for i,time in enumerate(timeRange[index0:index1]):\n try:\n # -- Day of Year\n tt = time.timetuple()\n t_doy.append(tt.tm_yday+tt.tm_hour/24.0+tt.tm_min/1440.0+tt.tm_sec/86400.0)\n\n # --- Cross Polar Cap Potential\n psiNorth = data.read('Potential North [V]', time) / 1000.0\n cpcpNorth.append(psiNorth.max() - psiNorth.min())\n\n psiSouth = data.read('Potential South [V]', time) / 1000.0\n cpcpSouth.append(psiSouth.max() - psiSouth.min())\n \n # --- Hemispheric Power\n energy = data.read('Average energy North [keV]', time)\n flux = data.read('Number flux North [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpNorth.append(hp.sum() * 1.6e-21) \n\n energy = data.read('Average energy South [keV]', time)\n flux = data.read('Number flux South [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpSouth.append(hp.sum() * 1.6e-21)\n\n # --- Positive current density\n fac = data.read('FAC North [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacNorth.append(pfac.sum()/1.0e6)\n\n fac = data.read('FAC South [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacSouth.append(pfac.sum()/1.0e6)\n\n progress.increment()\n except KeyboardInterrupt:\n # Exit when the user hits CTRL+C.\n progress.stop()\n progress.join() \n print('Exiting.')\n import sys\n sys.exit(0)\n except:\n # Cleanup progress bar if something bad happened.\n progress.stop()\n progress.join()\n raise\n progress.stop()\n progress.join()\n\n dataNorth = pyLTR.TimeSeries()\n dataSouth = pyLTR.TimeSeries()\n dataNorth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataSouth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataNorth.append('doy', 'Day of Year', '', t_doy)\n dataSouth.append('doy', 'Day of Year', '', t_doy)\n \n # \"N\" and \"S\" label subscripts are redundant here, potentially leading to\n # mis-labeling of plots\n #dataNorth.append('cpcp', r'$\\Phi_N$', 'kV', cpcpNorth)\n #dataSouth.append('cpcp', r'$\\Phi_S$', 'kV', cpcpSouth)\n #\n #dataNorth.append('hp', r'$HP_N$', 'GW', hpNorth)\n #dataSouth.append('hp', r'$HP_S$', 'GW', hpSouth)\n #\n #dataNorth.append('ipfac', r'$FAC_N$', 'MA', ipfacNorth)\n #dataSouth.append('ipfac', r'$FAC_S$', 'MA', ipfacSouth)\n \n dataNorth.append('cpcp', r'$\\Phi$', 'kV', cpcpNorth)\n dataSouth.append('cpcp', r'$\\Phi$', 'kV', cpcpSouth)\n \n dataNorth.append('hp', r'$HP$', 'GW', hpNorth)\n dataSouth.append('hp', r'$HP$', 'GW', hpSouth)\n \n dataNorth.append('ipfac', r'$FAC$', 'MA', ipfacNorth)\n dataSouth.append('ipfac', r'$FAC$', 'MA', ipfacSouth)\n\n return (dataNorth, dataSouth)", "def water(fname):\n\n variables = [\"H2OICE\", \"QV_COLUMN\", \"QI_COLUMN\"]\n data = dict()\n for v in variables:\n data.update(common.zonal_mean_surface(fname, v))\n \n return data", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def get_iPTF14gqr(colorplt=False):\n z = 0.063\n # ebv = 0.082\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_exp = 56943.74 # \n t_max = 56950.26 # g band max light + 3\n \n tb = Table(fits.open('../data/otherSN/De2018/tables1.fit')[1].data)\n tb.rename_column('MJD' , 'mjd')\n tb['texp_rf'] = (tb['mjd'] - t_exp) / (1+z)\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n # tb = tb[tb[\"Filt\"]==\"g \"]\n tb = tb[~np.isnan(tb['e_mag'])]\n tb.rename_column('Filt' , 'filter')\n tb.rename_column('e_mag' , 'emag')\n tb.rename_column('mag' , 'mag0')\n \n ixg = tb['filter']==\"g \"\n ixB = tb['filter']==\"B \"\n ixV = tb['filter']==\"V \"\n ixr = tb['filter']==\"r \"\n ixi = tb['filter']==\"i \"\n ixUVW1 = tb['filter']==\"UVW1\"\n ixUVW2 = tb['filter']==\"UVW2\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'][ixUVW2] = 2079\n tb['wave'][ixUVW1] = 2614\n tb['wave'][ixB] = 4359\n tb['wave'][ixg] = 4814\n tb['wave'][ixV] = 5430\n tb['wave'][ixr] = 6422\n tb['wave'][ixi] = 7883\n \n tb['mag0_abs'] = tb['mag0'] - dis_mod\n \n tb = tb.to_pandas()\n tb[\"texp_rf\"] = tb[\"Phase\"]\n tb = tb.drop(columns=[\"recno\", \"Phase\", \"l_mag\"])\n \"\"\"\n ix = np.any([tb['Tel'].values==\"P60 \",\n tb[\"filter\"].values=='g '], axis=0)\n tb = tb[ix]\n \"\"\"\n tb = add_datecol(tb)\n tb = add_physcol(tb)\n tt = tb[\"tmax_rf\"].values\n epochs = [\" \" for x in range(len(tt))]\n epochs = np.array(epochs)\n \"\"\"\n ix = (tt>-5.6)&(tt<-5.55)\n epochs[ix] = \"epoch 01\"\n \"\"\"\n ix = (tt>-5.55)&(tt<-5.50)\n epochs[ix] = \"epoch 02\"\n \n ix = (tt>-5.50)&(tt<-5.45)\n epochs[ix] = \"epoch 03\"\n \n ix = (tt>-5.2)&(tt<-5.0)\n epochs[ix] = \"epoch 04\"\n ix = (tt>-5.0)&(tt<-4.7)\n epochs[ix] = \"epoch 05\"\n \n ix = (tt>-4.7)&(tt<-4.5)\n epochs[ix] = \"epoch 06\"\n ix = (tt>-4.5)&(tt<-3.5)\n epochs[ix] = \"epoch 07\"\n ix = (tt>-3.5)&(tt<-2.5)\n epochs[ix] = \"epoch 08\"\n ix = (tt>-1.5)&(tt<-1)\n epochs[ix] = \"epoch 09\"\n ix = (tt>-1)&(tt<-0.82)\n epochs[ix] = \"epoch 10\"\n ix = (tt>-0.82)&(tt<-0.6)\n epochs[ix] = \"epoch 11\"\n ix = (tt>-0.5)&(tt<0.5)\n epochs[ix] = \"epoch 12\"\n ix = (tt>0.5)&(tt<1.5)\n epochs[ix] = \"epoch 13\"\n ix = (tt>1.5)&(tt<2.5)\n epochs[ix] = \"epoch 14\"\n ix = (tt>3.5)&(tt<4.5)\n epochs[ix] = \"epoch 15\"\n ix = (tt>4.5)&(tt<5)\n epochs[ix] = \"epoch 16\"\n ix = (tt>5)&(tt<5.6)\n epochs[ix] = \"epoch 17\"\n ix = (tt>5.6)&(tt<5.8)\n epochs[ix] = \"epoch 18\"\n ix = (tt>6)&(tt<7)\n epochs[ix] = \"epoch 19\"\n ix = (tt>7)&(tt<8)\n epochs[ix] = \"epoch 20\"\n ix = (tt>8)&(tt<9)\n epochs[ix] = \"epoch 21\"\n tb[\"epoch\"] = epochs\n\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g ', 'r ', 'i ']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r \" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g \"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r \"]\n itb = tbsub[tbsub[\"filter\"].values==\"i \"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def get_thermochem(file_set, results_dict, save_vibes, out_dir, tog_output_fname, qh_h_opt, write_mode):\n h = []\n qh_h = []\n gt = []\n qh_gt = []\n temps = []\n for index, file in enumerate(file_set):\n base_name = os.path.basename(file)\n if file == REACT_PROD_SEP:\n h.append(np.full([len(temps)], np.nan))\n qh_h.append(np.full([len(temps)], np.nan))\n gt.append(np.full([len(temps)], np.nan))\n qh_gt.append(np.full([len(temps)], np.nan))\n continue\n vibes_out = results_dict[base_name][GOODVIBES_OUT]\n found_structure = False\n skip_line = True\n h.append([])\n qh_h.append([])\n gt.append([])\n qh_gt.append([])\n # we know the last line should be dropped, and at least the first 10\n for line in vibes_out[10:-2]:\n if GOODVIBES_ERROR_PAT.match(line):\n raise InvalidDataError(\"See GoodVibes output: {}\".format(vibes_out))\n if not found_structure:\n if GOODVIBES_DATA_PAT.match(line):\n found_structure = True\n continue\n elif skip_line:\n skip_line = False\n continue\n else:\n vals = line.split()\n if index == 0:\n temps.append(float(vals[1]))\n h[index].append(float(vals[2]))\n if qh_h_opt:\n qh_h[index].append(float(vals[3]))\n gt[index].append(float(vals[-2]))\n qh_gt[index].append(float(vals[-1]))\n if save_vibes:\n vibes_out_fname = os.path.relpath(create_out_fname(file, suffix='_vibes', base_dir=out_dir, ext='.dat'))\n list_to_file(vibes_out, vibes_out_fname, print_message=False)\n print('Saved GoodVibes output as: {}'.format(vibes_out_fname))\n if tog_output_fname:\n list_to_file(vibes_out, tog_output_fname, mode=write_mode, print_message=False)\n if write_mode == 'w':\n print(\"Adding all GoodVibes output to: {}\".format(tog_output_fname))\n write_mode = \"a\"\n\n temps = np.asarray(temps)\n # for each molecule, multiply the array to convert to kcal/mol\n for index in range(len(gt)):\n h[index] = np.asarray(h[index]) * EHPART_TO_KCAL_MOL\n if qh_h_opt:\n qh_h[index] = np.asarray(qh_h[index]) * EHPART_TO_KCAL_MOL\n gt[index] = np.asarray(gt[index]) * EHPART_TO_KCAL_MOL\n qh_gt[index] = np.asarray(qh_gt[index]) * EHPART_TO_KCAL_MOL\n\n return temps, h, qh_h, gt, qh_gt", "def main(folder, quiet=0):\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n\n color1 = \"I4\" #filter system for first color of CMD\n color2 = \"M1\" #filter system for second color of CMD\n zeromagc1 = zero.zero_mag[color1]\n zeromagc2 = zero.zero_mag[color2]\n min_mag = 8. #minimal observation limit\n max_mag = 0. #maximal observation limit\n\n#getting file list\n files = sorted(os.listdir('%s/%s' % (os.getcwdu(), folder))) \n out = []\n\n for fil in files:\n#only using files created by the automated simulation\n if fil.startswith('sim_') and not 'settings' in fil.encode(\"ascii\"):\n print(\"%s/%s\" % (folder,fil.encode(\"ascii\")), file=output_stream)\n \n\n # Read in\n hdulist = fits.open('%s/%s' %(folder,fil))\n data = hdulist[1].data\n\n #calculating magnitudes from fluxes and converting to CMD-data\n x = -2.5*(np.log10(data['c%s' % color1]/zeromagc1) - np.log10(data['c%s' % color2]/zeromagc2))\n y = -2.5*(np.log10(data['c%s' % color2]/zeromagc2))\n\n \n sel = np.logical_and( (y > -10./3. * (x-1.) + 10.), np.logical_and(max_mag < y, y < min_mag))\n sel = np.logical_and(sel, y < -x + 12.)\n n = sum(sel)\n t = Table(hdulist[1].data)\n if 'sel' in t.columns:\n t.remove_column('sel')\n t.add_column(Column(name='sel', data=sel.astype('int')))\n \n hdulist[1].data = np.array(t)\n tmp, av, apera, age = fil.split('_')\n fits.update('%s/%s' %(folder,fil), np.array(t), ext = 1, clobber=True)\n out.append([av, apera, age, n])\n\n #writing obtained data to \"folder/__expected_number\"\n head = ['#', 'AV', 'Aperature_size', 'Age', 'Expected_number']\n f = open('%s/__expected_number' % folder, 'w')\n f.write(','.join(head)+'\\n' )\n np.savetxt(f, np.asarray(out).astype(int))\n f.close()\n \n print (\"Analysed %s files and saved output to %s\" % (len(out),'%s/__expected_number' % folder), file=output_stream)", "def get_taux_variants(name):\n indicateurResult = get_empty_kpi()\n config = get_config(name)\n log.debug('Processing - '+name)\n\n indicateurResult['nom'] = config['nom']\n indicateurResult['unite'] = config['unite']\n indicateurResult['unite_short'] = config['unite_short']\n indicateurResult['trendType'] = config['trendType']\n indicateurResult['color'] = get_color_indicateur(config)\n\n if name == \"prop_variant_A\":\n colname = \"tx_A1\"\n elif name == \"prop_variant_B\":\n colname = \"tx_B1\"\n elif name == \"prop_variant_C\":\n colname = \"tx_C1\"\n elif name == \"prop_variant_D\":\n colname = \"tx_D1\"\n else:\n colname = \"tx_A0C0\"\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_fra'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine'].apply(lambda x: str(x)[11:])\n for country in tqdm(countries, desc=\"Processing National\"):\n res = process_stock(\n df,\n 'nat',\n 'fra',\n config['trendType'],\n colname\n )\n indicateurResult['france'].append(res)\n \n df = pd.read_csv(\n 'files_new/'+config['res_id_reg'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = df[~df['reg'].isnull()]\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine'].apply(lambda x: str(x)[11:])\n for reg in tqdm(df.reg.unique(), desc=\"Processing Régions\"):\n res = process_stock(\n df[df['reg'] == reg].copy(),\n 'reg',\n reg,\n config['trendType'],\n colname\n )\n indicateurResult['regions'].append(res)\n\n # df = pd.read_csv(\n # 'files_new/'+config['res_id_dep'],\n # sep=None,\n # engine='python',\n # dtype={'reg': str, 'dep': str}\n # )\n # df = enrich_dataframe(df, name)\n # df['date'] = df['semaine'].apply(lambda x: str(x)[11:])\n # for dep in tqdm(df.dep.unique(), desc=\"Processing Départements\"):\n # res = process_stock(\n # df[df['dep'] == dep].copy(),\n # 'dep',\n # dep,\n # config['trendType'],\n # colname\n # )\n # indicateurResult['departements'].append(res)\n\n save_result(indicateurResult, name)", "def ferry_data_QC(ferry, TH_abs, TH_u, TH_d):\n if type(ferry) is not xr.core.dataset.Dataset:\n raise ValueError('Ferry is not defined')\n # QC1: make nan all Absolute velocities that are greater than 6.5 m/s\n abs_u = ferry.eastward_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n abs_v = ferry.northward_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n abs_w = ferry.vertical_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n # Get bottom track velocity for reference\n # and also clean for TH in abs velocity\n east_btv = ferry.eastward_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n north_btv = ferry.northward_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n vert_btv = ferry.vertical_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n # Estimate u_true = abs_u + east_bt_v\n u_true = abs_u + east_btv\n v_true = abs_v + north_btv\n w_true = abs_w + vert_btv\n U = np.sqrt(u_true**2 + v_true**2)\n # QC2: check that u_true and v_true are less than 4 m/s\n u_true = u_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))\n v_true = v_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))\n w_true = w_true.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))\n U = U.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))\n # Add true velocity data to the dataset\n ferry['u_true'] = u_true\n ferry['v_true'] = v_true\n ferry['w_true'] = w_true\n ferry['Horizontal_speed'] = U\n # Remove first 5 rows of depth\n ferryQC = ferry.isel(depth=slice(TH_d, None))\n goodQC = True\n return(ferryQC, goodQC)", "def read_elia_activated_energy_volumes(filename,status):\r\n df = pd.read_excel(filename,skiprows=2,parse_dates=False)\r\n df[\"Timestamp\"] = df[\"Date\"]+\" \"+df['Quarter'].map(lambda x: str(x)[:-9])\r\n pd.to_datetime(df[\"Timestamp\"])\r\n df.set_index(\"Timestamp\",inplace=True)\r\n if ((status == \"validated\") | (status == \"valid\")):\r\n df = df.drop(df[df.Status != \"Validated\"].index)\r\n df = df.drop([\"Date\",\"Quarter\",\"Status\"], axis=1)\r\n \r\n if ((len(df.columns)<13) & (len(df.columns)>11)) :\r\n df.columns.values[0:13] = [\"NRV in MW\", \"GUV in MW\", \"IGCC+ in MW\", \"R2+ in MW\", \"Bids+ in MW\", \"R3+ in MW\", \"R3DP+ in MW\", \"GDV in MW\", \"IGCC- in MW\", \"R2- in MW\", \"Bids- in MW\", \"R3- in MW\"]\r\n if len(df.columns)<= 11:\r\n df.columns.values[0:12] = [\"NRV in MW\", \"GUV in MW\", \"IGCC+ in MW\", \"R2+ in MW\", \"Bids+ in MW\", \"R3+ in MW\", \"GDV in MW\", \"IGCC- in MW\", \"R2- in MW\", \"Bids- in MW\", \"R3- in MW\"]\r\n if len(df.columns)>14:\r\n df.columns.values[0:16] = [\"NRV in MW\", \"SR in MW\",\"GUV in MW\", \"IGCC+ in MW\",\"R2+ in MW\",\"Bids+ in MW\",\"R3 std in MW\",\"R3 flex in MW\",\"ICH in MW\",\"inter TSO import in MW\",\"GDV in MW\",\"IGCC- in MW\",\"R2- in MW\",\"Bids- in MW\",\"inter TSO export in MW\"]\r\n \r\n return df", "def main(daymet_dir,pickles,start_date='1980-10-01',end_date='2020-09-30',huc_col = 'huc8', **kwargs):\r\n\tprint(f'The huc col being processed is: {huc_col}')\r\n\t################################################################\r\n\t#first do the daymet data \r\n\t#read in all the files in this dir and combine them into one df\r\n\tearly=FormatData(glob.glob(daymet_dir+f'*_12_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tmid=FormatData(glob.glob(daymet_dir+f'*_2_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tlate=FormatData(glob.glob(daymet_dir+f'*_4_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\t################################################################\r\n\t#next do the snotel data \r\n\toutput=[]\r\n\r\n\t#read in some pickled objects, these look like a list of dfs with each being a station for the full time period \r\n\tfor item in ['PREC','TAVG','WTEQ']:\r\n\t\t#get the pickled objects for each parameter \r\n\t\tfiles = glob.glob(pickles+f'*{item}_{start_date}_{end_date}_snotel_data_list') #hardcoded currently\r\n\t\tdf=FormatData(files,drop_cols=['year','month','day']).read_in_pickles()\r\n\t\toutput.append(df) #the df here is 365 days x ~30 yrs x 237 stations so these are pretty big dfs\r\n\t\r\n\t#join the three enviro params \r\n\toutput_df = reduce(lambda left,right: pd.merge(left,right,how='inner',on=['date','id']), output)\r\n\t\r\n\t\r\n\t#convert the temp column from F to C \r\n\toutput_df['TAVG'] = (output_df['TAVG']-32)*(5/9) \r\n\t#there are a couple of erroneous temp values, remove those \r\n\toutput_df = output_df.loc[output_df['TAVG'] <= 50]\r\n\r\n\t#convert prec and swe cols from inches to cm \r\n\toutput_df['PREC'] = output_df['PREC']*2.54\r\n\toutput_df['WTEQ'] = output_df['WTEQ']*2.54\r\n\t\r\n\t#remove rows that have one of the data types missing- this might need to be amended because \r\n\t#it means that there are different numbers of records in some of the periods. \r\n\toutput_df=output_df.dropna()\r\n\t\r\n\t#cast the snotel id col to int to add the hucs \r\n\toutput_df['id'] = output_df['id'].astype('int')\r\n\r\n\t#add the as yet nonexistant hucs data to the outputs \r\n\thucs = kwargs.get('hucs')\r\n\toutput_df[huc_col] = output_df['id'].map(hucs)\r\n\r\n\t#there are multiple snotel stations in some of the basins, \r\n\t#combine those so there is just one number per basin like the \r\n\t#daymet and RS data. \r\n\r\n\toutput_df=output_df.groupby([huc_col,'date'])[['PREC','WTEQ','TAVG']].mean().reset_index()\r\n\r\n\tperiod_list = []\r\n\tfor p1,p2 in zip(['early','mid','late'],[early,mid,late]): \r\n\t\t\t#get snotel first\r\n\t\t#make a temporal chunk of data \r\n\t\tsnotel_chunk=FormatData(None,time_period=p1).split_yearly_data(output_df)\r\n\r\n\t\t##########working below here\r\n\t\t############################\r\n\t\t#calculate the snow droughts for that chunk \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\t\t#print('snotel')\r\n\t\t\t#print(snotel_drought)\r\n\t\telse: \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',sort_col=huc_col).prepare_df_cols()\r\n\r\n\t\t#get cols of interest \r\n\t\t#snotel_drought=snotel_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t#rename cols so they don't get confused when data are merged \r\n\t\t#snotel_drought.columns=['huc8','year']+['s_'+column for column in snotel_drought.columns if not (column =='huc8') | (column=='year')]\r\n\t\t\r\n\t\t#then do the same for daymet \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\telse: \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,sort_col=huc_col).prepare_df_cols()\r\n\t\t#print('daymet',daymet_drought)\r\n\t\t#daymet_drought=daymet_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t\r\n\t\t#daymet_drought.columns=['huc8','year']+['d_'+column for column in daymet_drought.columns if not (column =='huc8') | (column=='year')]\r\n\r\n\t##########################################\r\n\t\r\n\t\t#run the kmeans with drought types as intiilization conditions (centroids) for the clusters\r\n\t\t\r\n\t\t#these are all of the huc 4 basins in the study area \r\n\t\thuc4s = ['1708','1801','1710','1711','1709','1701','1702','1705','1703','1601','1707','1706','1712','1704']\r\n\t\ts_output = []\r\n\t\td_output = []\r\n\t\tfor huc4 in huc4s: \r\n\t\t\thuc4_s = sd.prep_clusters(snotel_drought,huc4,huc_col=huc_col) #get the subset of the snow drought data for a given huc4\r\n\t\t\thuc4_d = sd.prep_clusters(daymet_drought,huc4,huc_col=huc_col)\r\n\t\t\t#make the centroids that serve as the intialization for the kmeans clusters- these are like endmembers (ish)\r\n\t\t\ts_centroids = DefineClusterCenters(huc4_s,'WTEQ','PREC','TAVG').combine_centroids() #makes a numpy array with four centroids\r\n\t\t\td_centroids = DefineClusterCenters(huc4_d,'swe','prcp','tavg').combine_centroids() #makes a numpy array with four centroids\r\n\r\n\t\t\t#clusters should be like: {0:dry, 1:warm, 2:warm_dry, 3:no_drought} 6/8/2021 DOUBLE CHECK\r\n\t\t\t#run kmeans for the snotel data\r\n\t\t\ts_clusters = sd.run_kmeans(huc4_s[['WTEQ','PREC','TAVG']].to_numpy(),huc4_s['label'],s_centroids)\r\n\t\t\ts_clusters = sd.add_drought_cols_to_kmeans_output(s_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\t\t\t#run kmeans for the daymet data \r\n\t\t\td_clusters = sd.run_kmeans(huc4_d[['swe','prcp','tavg']].to_numpy(),huc4_d['label'],d_centroids)\r\n\t\t\td_clusters = sd.add_drought_cols_to_kmeans_output(d_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\r\n\t\t\ts_output.append(s_clusters)\r\n\t\t\td_output.append(d_clusters)\r\n\t\ts_plot = pd.concat(s_output)\r\n\r\n\t\t#select the cols of interest and rename so there's no confusion when dfs are merged \r\n\t\ts_plot=s_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\ts_plot.columns=[huc_col,'year']+['s_'+column for column in s_plot.columns if not (column == huc_col) | (column=='year')]\r\n\r\n\t\td_plot = pd.concat(d_output)\r\n\t\td_plot=d_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\td_plot.columns=[huc_col,'year']+['d_'+column for column in d_plot.columns if not (column == huc_col) | (column=='year')]\r\n\t\r\n\t\t#merge the two datasets into one df \r\n\t\tdfs = s_plot.merge(d_plot,on=[huc_col,'year'],how='inner')\r\n\t\t\r\n\t\t#deal with the scenario that there are basins with less than 30 years of data, remove those here\r\n\t\tdfs = sd.remove_short_dataset_stations(dfs,huc_col)\r\n\t\tperiod_list.append(dfs)\r\n\r\n\tplot_counts(period_list,kwargs.get('stats_dir'),huc_col=huc_col,**kwargs)", "def show_trap_results():\n df_grid = pd.read_hdf('./temp_results.h5', '/optimize_grid')\n print(df_grid)\n \n print('Minimum fwhm:')\n print(df_grid[df_grid.fwhm_ovr_mean==df_grid.fwhm_ovr_mean.min()])\n \n plt.plot(df_grid.e_fit, df_grid.fwhm_ovr_mean, '.b')\n plt.show()", "def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def get_iPTF16asu():\n z = 0.187\n ebv = 0.0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = asci.read('../data/otherSN/Whitesides2017/table1.txt')\n tb = tb.to_pandas()\n tb = tb[tb[\"col4\"].values!=\">\"]\n \n tb = tb.rename(columns={'col1' : 'mjd',\n 'col2': 'tmax_rf',\n 'col3': 'filter',\n \"col4\": 'mag',\n 'col5': 'emag',\n 'col6': 'instrument'})\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb[\"mag\"] = np.array(tb[\"mag\"].values, dtype = np.float)\n #tb[\"emag\"] = np.array(tb[\"emag\"].values, dtype = np.float)\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb[tb.wave!=0]\n return tb", "def generate_experimental_condition(dir_output, file_name_prefix,list_temperature, partial_pressure_H2O=0.02,SinteringTemperature=1600,SinteringTime=24):\r\n\r\n print(\"Enter the host element occupying the A-site\")\r\n set_A1 = input (\"Ex: Ba\\n\")\r\n print(\"Enter the valence of the A-site host element\")\r\n set_A1_valence = input(\"Ex: 2\\n\")\r\n frac_A1 = '1'\r\n print(\"Enter the host element occupying the B-site\")\r\n set_B1 = input (\"Ex: Zr\\n\")\r\n print(\"Enter the valence of the B-site host element\")\r\n set_B1_valence = input(\"Ex:4\\n\")\r\n print(\"Enter the fraction that describes the composition of the B-site host element\")\r\n frac_B1 = str(format(float( input (\"Ex:0.8\\n\")), '.2f'))\r\n print(\"Enter the dopant element occupying the B-site\")\r\n set_B2 = input (\"Ex: Sc\\n\")\r\n print(\"Enter the valence of the B-dopant\")\r\n set_B2_valence = input(\"Ex: 3\\n\")\r\n frac_B2 = str(format((1 - float(frac_B1)), '.2f'))\r\n\r\n # generate dataframe for base\r\n CA = set_A1 + set_B1 + frac_B1 + set_B2 + frac_B2 + \"O3\"\r\n dic = {'Composition':CA,\r\n 'A1':set_A1, 'Valence A1':set_A1_valence, 'fraction A1':frac_A1,\r\n 'B1':set_B1, 'Valence B1':set_B1_valence, 'fraction B1':frac_B1,\r\n 'B2':set_B2, 'Valence B2':set_B2_valence, 'fraction B2':frac_B2}\r\n df = pd.DataFrame(dic,index=['i',])\r\n\r\n # add columns name\r\n columns_all = ['Composition','Temperature / C','pH2O / atm','CH',\r\n 'A1','Valence A1','fraction A1','A2','Valence A2','fraction A2',\r\n 'B1','Valence B1','fraction B1','B2','Valence B2','fraction B2',\r\n 'B3','Valence B3','fraction B3','X1','Valence X1','fraction X1','fraction total']\r\n for c in columns_all:\r\n if not(c in df.columns):\r\n df[c] = float(np.NaN)\r\n df = df[columns_all]\r\n\r\n # add another experimental conditions\r\n df['pH2O / atm'] = partial_pressure_H2O\r\n df['Sintering temperature/C'] = SinteringTemperature\r\n df['Sintering time / h'] = SinteringTime\r\n df['fraction A2']='0'\r\n df['fraction B3']='0'\r\n df['X1']='O'\r\n df['Valence X1']='-2'\r\n df['fraction X1']='0.2'\r\n df['fraction total']='1'\r\n\r\n for cnt, tmp in enumerate(list_temperature):\r\n df['Temperature / C'] = tmp\r\n if cnt==0:\r\n df_all = df.copy()\r\n else:\r\n df_all = pd.concat([df_all,df], ignore_index=True)\r\n file_name = os.path.join(dir_output,'{:}_all.csv'.format(file_name_prefix, tmp))\r\n df_all.to_csv(file_name, index=False)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def main():\n std_template_file = os.path.join(os.environ['HOME'], 'prospect/py/prospect/data/std_templates.fits')\n if os.path.isfile(std_template_file):\n print('Error std template file already exists')\n\n #- Templates produced from 1st component of old (pre-Aug 2022) Redrock templates:\n template_dir = os.path.join(os.environ['DESICONDA'], '../code/redrock-templates/0.7.2')\n #std_templates = {'QSO': ('QSO',''), 'GALAXY': ('GALAXY',''), 'STAR': ('STAR','F') }\n std_templates = {'GALAXY': ('GALAXY',''), 'STAR': ('STAR','F') }\n delta_lambd_templates = 3\n\n rr_templts = load_redrock_templates(template_dir=template_dir)\n for key,rr_key in std_templates.items() :\n wave_array = np.arange(rr_templts[rr_key].wave[0], rr_templts[rr_key].wave[-1], delta_lambd_templates)\n flux_array = resample_flux(wave_array, rr_templts[rr_key].wave, rr_templts[rr_key].flux[0,:])\n table_templates = Table(data=[wave_array, flux_array], names=['wave_'+key, 'flux_'+key], meta={'name':key})\n table_templates.write(std_template_file, append=True)\n\n #- Case of QSO (Summer 2022): use new template provided by A. Brodzeller\n qsotemplate_file = os.environ['HOME'] + '/stdtemplate-qso.fits'\n hdul = fits.open(qsotemplate_file)\n qsowave = 10**(hdul[0].header['CRVAL1']+np.arange(hdul[0].header['NAXIS1'])*hdul[0].header['CDELT1'])\n qsoflux = hdul[0].data\n # Resample as previously:\n wave_array = np.arange(qsowave[0], qsowave[-1], delta_lambd_templates)\n flux_array = resample_flux(wave_array, qsowave, qsoflux)\n table_templates = Table(data=[wave_array, flux_array], names=['wave_QSO', 'flux_QSO'], meta={'name':'QSO'})\n table_templates.write(std_template_file, append=True)\n return 0", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def analyzeViSDEMData(dict):\n \n if 'path_in' in dict:\n path_in = dict['path_in']\n else:\n print(\"Caution: No path for input folder containing the data has been defined. Please define path to folder by dict['path_in']=path_in\") \n return\n \n path_out_default = '../colordeficiency-data/' \n if 'path_out' in dict:\n path_out = dict['path_out']\n else:\n print(\"Caution: No path for output folder where the data should be stored has been defined. Using default output path instead: \"+str(path_out_default))\n path_out = path_out_default\n \n if 'round' in dict:\n round = dict['round']\n else:\n print(\"Error: You have to chose a round first.\")\n \n path = os.path.join(os.path.dirname(os.path.abspath(os.path.join(__file__,os.pardir))),'colordeficiency-data')\n \n # 0. Step: Get all the relevant information, i.e. motive_ids, obs_col_defs etc.\n if round == 1:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids.csv\")\n elif round == 2:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids_2.csv\")\n \n vs_ids_sheet = pandas.read_csv(visualsearch_ids,sep=\";\")\n \n # Get all the relevant information about the observers, i.e. obs_col_defs etc.\n observer_ids = os.path.join(path,\"observer_ids.csv\")\n obs_ids_sheet = pandas.read_csv(observer_ids,sep=\";\")\n \n # 1. Step: Read all the XLSX data in the path\n ext = 'xlsx'; xlsx_files = getAllXXXinPath(path_in,ext)\n dataArray = pandas.DataFrame()\n i=1\n for xlsx_file in xlsx_files:\n sys.stdout.write(xlsx_file)\n dataArray_tmp, testArray, extraDataDict = extractExperimentData(os.path.join(path_in,xlsx_file))\n \n newDataArray = dataArray_tmp[['dalt_id','coldef_type','resp.corr_raw','resp.rt_raw','stimFile']]\n \n if \"2. Session\" in extraDataDict:\n sessionID = int(extraDataDict['2. Session'])\n newDataArray['session_id'] = sessionID\n \n if 'group' in extraDataDict:\n obsGroup = str(extraDataDict['group'])\n newDataArray['obsGroup'] = obsGroup\n \n if '0. Participant ID' in extraDataDict:\n obsID = int(extraDataDict['0. Participant ID'])\n \n newDataArray['observer_id'] = obsID\n obs_coldef_type = obs_ids_sheet.loc[obs_ids_sheet['observer_id']==obsID,['observer_coldef_type']]\n newDataArray['observer_coldef_type'] = int(obs_coldef_type['observer_coldef_type'])\n \n dataArray = pandas.concat([dataArray, newDataArray])\n sys.stdout.write(' . ')\n if (i%5)==0: sys.stdout.write('\\n')\n i+=1\n sys.stdout.write('\\n')\n \n # 2. Step: Adapt values to programstandards\n for item in settings.colDefLong2ID:\n dataArray.loc[dataArray['coldef_type'] == item, ['coldef_type']] = settings.colDefLong2ID[item]\n \n for item in settings.dalt2ID:\n dataArray.loc[dataArray['dalt_id'] == item, ['dalt_id']] = settings.dalt2ID[item]\n \n dataArray.loc[dataArray['dalt_id'] == 'none', ['dalt_id']] = 0\n \n \n dataArray = dataArray.rename(columns={'dalt_id': 'dalt_id',\n 'coldef_type': 'coldef_type',\n 'resp.corr_raw': 'is_correct',\n 'resp.rt_raw': 'resp_time',\n 'stimFile': 'filepath'})\n dataArray = dataArray.reset_index()\n \n # Adding set_id, motive_id and variant_id to each file\n for index, row in dataArray.iterrows():\n path_tmp = row['filepath']\n filename = os.path.basename(path_tmp).split('.')[0]\n dict_tmp = getStatsFromFilename(filename)\n imgID_tmp = int(dict_tmp['img_id'])\n \n tempVSDataArray = vs_ids_sheet.loc[vs_ids_sheet['image_id']==imgID_tmp,['set_id','motive_id','variant_id']]\n \n dataArray.at[index,'image_id'] = imgID_tmp\n dataArray.ix[index,'set_id'] = int(tempVSDataArray['set_id'])\n dataArray.ix[index,'motive_id'] = int(tempVSDataArray['motive_id'])\n dataArray.ix[index,'variant_id'] = int(tempVSDataArray['variant_id'])\n\n dataArray.image_id = dataArray.image_id.astype(int)\n dataArray.set_id = dataArray.set_id.astype(int)\n dataArray.motive_id = dataArray.motive_id.astype(int)\n dataArray.variant_id = dataArray.variant_id.astype(int)\n dataArray.is_correct = dataArray.is_correct.astype(bool)\n \n dataArray = dataArray[['image_id','set_id','motive_id','variant_id','dalt_id','coldef_type','is_correct','resp_time','observer_id','observer_coldef_type','session_id','filepath','obsGroup']]\n \n # 3. Saving data to file\n try:\n dataArray.to_csv(os.path.join(path_out, 'visdem-data.csv'),sep=\";\")\n sys.stdout.write(\"Success: ViSDEM data successfully saved in '\"+str(path_out)+\"'.\\n\")\n except Exception as e:\n print(e)", "def temperature(self):\n value = float(self._parent.query('R{}'.format(self._idx))[1:])\n return pq.Quantity(value, pq.Kelvin)", "def vorticity(tsr,solidity):\n \n # Reading in csv file (vorticity database)\n basepath = path.join(path.dirname(path.realpath(__file__)),'data')\n fdata = basepath + path.sep + 'vortdatabase.csv'\n f = open(fdata)\n csv_f = csv.reader(f)\n \n i = 0\n sol_d = np.array([])\n for row in csv_f:\n if i == 0:\n raw = row\n raw = np.delete(raw,0)\n vortdat = raw\n tsr_d = raw # range of tip-speed ratios included\n if row[0] == 'solidity':\n sol_d = np.append(sol_d,float(row[1])) # range of solidities included\n elif row[0] != 'TSR' and row[0] != 'solidity':\n raw = row\n raw = np.delete(raw,0)\n vortdat = np.vstack([vortdat,raw]) # adding entry to vorticity database array\n i += 1\n f.close()\n \n vortdat = np.delete(vortdat,(0),axis=0) # eliminating first row used as a placeholder\n tsr_d = tsr_d.astype(np.float) # converting tip-speed ratio entries into floats\n vortdat = vortdat.astype(np.float) # converting vorticity database entries into floats\n \n # Creating arrays for each EMG parameter\n for i in range(np.size(sol_d)):\n sol = str(i+1)\n \n exec('s'+sol+'_loc1 = vortdat[i*10]\\ns'+sol+'_loc2 = vortdat[i*10+1]\\ns'+sol+'_loc3 = vortdat[i*10+2]\\ns'+sol+'_spr1 = vortdat[i*10+3]\\ns'+sol+'_spr2 = vortdat[i*10+4]\\ns'+sol+'_skw1 = vortdat[i*10+5]\\ns'+sol+'_skw2 = vortdat[i*10+6]\\ns'+sol+'_scl1 = vortdat[i*10+7]\\ns'+sol+'_scl2 = vortdat[i*10+8]\\ns'+sol+'_scl3 = vortdat[i*10+9]\\n')\n \n # BIVARIATE SPLINE FITTING\n \n iz = np.size(sol_d)\n jz = np.size(tsr_d)\n \n # Initializing rectangular matrices\n Z_loc1 = np.zeros((iz,jz))\n Z_loc2 = np.zeros((iz,jz))\n Z_loc3 = np.zeros((iz,jz))\n Z_spr1 = np.zeros((iz,jz))\n Z_spr2 = np.zeros((iz,jz))\n Z_skw1 = np.zeros((iz,jz))\n Z_skw2 = np.zeros((iz,jz))\n Z_scl1 = np.zeros((iz,jz))\n Z_scl2 = np.zeros((iz,jz))\n Z_scl3 = np.zeros((iz,jz))\n \n # Transferring raw data into rectangular matrices\n for i in range(iz):\n for j in range(jz):\n sol = str(i+1)\n exec('Z_loc1[i,j] = s'+sol+'_loc1[j]')\n exec('Z_loc2[i,j] = s'+sol+'_loc2[j]')\n exec('Z_loc3[i,j] = s'+sol+'_loc3[j]')\n exec('Z_spr1[i,j] = s'+sol+'_spr1[j]')\n exec('Z_spr2[i,j] = s'+sol+'_spr2[j]')\n exec('Z_skw1[i,j] = s'+sol+'_skw1[j]')\n exec('Z_skw2[i,j] = s'+sol+'_skw2[j]')\n exec('Z_scl1[i,j] = s'+sol+'_scl1[j]')\n exec('Z_scl2[i,j] = s'+sol+'_scl2[j]')\n exec('Z_scl3[i,j] = s'+sol+'_scl3[j]')\n \n # Creating a rectangular bivariate spline of the parameter data\n s_loc1 = RectBivariateSpline(sol_d,tsr_d,Z_loc1)\n s_loc2 = RectBivariateSpline(sol_d,tsr_d,Z_loc2)\n s_loc3 = RectBivariateSpline(sol_d,tsr_d,Z_loc3)\n s_spr1 = RectBivariateSpline(sol_d,tsr_d,Z_spr1)\n s_spr2 = RectBivariateSpline(sol_d,tsr_d,Z_spr2)\n s_skw1 = RectBivariateSpline(sol_d,tsr_d,Z_skw1)\n s_skw2 = RectBivariateSpline(sol_d,tsr_d,Z_skw2)\n s_scl1 = RectBivariateSpline(sol_d,tsr_d,Z_scl1)\n s_scl2 = RectBivariateSpline(sol_d,tsr_d,Z_scl2)\n s_scl3 = RectBivariateSpline(sol_d,tsr_d,Z_scl3)\n \n # Selecting the specific parameters to use for TSR and solidity\n loc1 = s_loc1(solidity,tsr)\n loc2 = s_loc2(solidity,tsr)\n loc3 = s_loc3(solidity,tsr)\n spr1 = s_spr1(solidity,tsr)\n spr2 = s_spr2(solidity,tsr)\n skw1 = s_skw1(solidity,tsr)\n skw2 = s_skw2(solidity,tsr)\n scl1 = s_scl1(solidity,tsr)\n scl2 = s_scl2(solidity,tsr)\n scl3 = s_scl3(solidity,tsr)\n \n # Creating arrays of the parameters\n loc = np.array([loc1[0,0],loc2[0,0],loc3[0,0]])\n spr = np.array([spr1[0,0],spr2[0,0]])\n skw = np.array([skw1[0,0],skw2[0,0]])\n scl = np.array([scl1[0,0],scl2[0,0],scl3[0,0]])\n \n return loc,spr,skw,scl", "def process_weather(forecast_file):\n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n\n min_temp_store = {}\n max_temp_store = {}\n weather_results = str()\n header_results = str()\n\n for day_in_forecast in json_data['DailyForecasts']:\n day_date = day_in_forecast['Date']\n min_temp = day_in_forecast['Temperature']['Minimum'][\"Value\"]\n min_temp_c = convert_f_to_c(min_temp)\n min_temp_store[day_date] = min_temp_c\n max_temp = day_in_forecast['Temperature']['Maximum'][\"Value\"]\n max_temp_c = convert_f_to_c(max_temp)\n max_temp_store[day_date] = max_temp_c\n\n day_time_phrase = day_in_forecast['Day']['LongPhrase']\n rain_chance_day = day_in_forecast['Day']['RainProbability']\n night_time_phrase = day_in_forecast['Night']['LongPhrase']\n rain_chance_night = day_in_forecast['Night']['RainProbability']\n weather_results = weather_results + (f\"-------- {convert_date(day_date)} --------\\nMinimum Temperature: {format_temperature(round(min_temp_c,1))}\\nMaximum Temperature: {format_temperature(round(max_temp_c,1))}\\nDaytime: {day_time_phrase}\\n Chance of rain: {rain_chance_day}%\\nNighttime: {night_time_phrase}\\n Chance of rain: {rain_chance_night}%\\n\")+ \"\\n\"\n\n\n max_day = max(max_temp_store, key=max_temp_store.get)\n max_value = max_temp_store[max_day]\n min_day = min(min_temp_store, key=min_temp_store.get)\n min_value = min_temp_store[min_day]\n max_totals = (sum(max_temp_store.values()))\n min_totals = (sum(min_temp_store.values()))\n num_items = len(min_temp_store)\n mean_min = round(calculate_mean(min_totals,num_items),1)\n mean_max = round(calculate_mean(max_totals,num_items),1)\n\n save_header = (f\"{len(json_data['DailyForecasts'])} Day Overview\\n The lowest temperature will be {format_temperature(round((min_value),1))}, and will occur on {convert_date(min_day)}.\\n The highest temperature will be {format_temperature(round((max_value),1))}, and will occur on {convert_date(max_day)}.\\n The average low this week is {format_temperature(mean_min)}.\\n The average high this week is {format_temperature(mean_max)}.\\n\")\n\n header_results = save_header + \"\\n\"+ weather_results\n \n return(header_results)", "def get_temp():\n epts = [\"cage_coldPlate_temp\", \"cage_pressure\"]\n # t_earlier_aug = '2019-10-02T00:00'\n # t_later_aug = datetime.utcnow().isoformat()\n t_earlier_aug = '2019-09-27T13:00'\n t_later_aug = '2019-09-28T19:49'\n dfs = pandas_db_query(epts, t_earlier_aug, t_later_aug)\n print(dfs[epts[0]].tail())\n\n exit()\n\n xv = dfs[epts[0]][\"timestamp\"]\n yv = dfs[epts[0]][epts[0]]\n plt.plot(xv, yv, '-b')\n plt.ylabel(epts[0], ha='right', y=1)\n\n p1a = plt.gca().twinx()\n xv = dfs[epts[1]][\"timestamp\"]\n yv = dfs[epts[1]][epts[1]]\n p1a.set_ylabel(epts[1], color='r', ha='right', y=1)\n p1a.tick_params('y', colors='r')\n p1a.semilogy(xv, yv, '-r')\n\n plt.gcf().autofmt_xdate()\n plt.tight_layout()\n plt.show()", "def temperatures():\n hi_act= session.query(measurements.tobs,measurements.date,measurements.station).\\\n filter(measurements.station == 'USC00519281').\\\n filter(measurements.date >last_12).\\\n order_by(measurements.date).all()\n hi_act_df=pd.DataFrame(hi_act).set_index('date')\n hi_act_dict=hi_act_df.to_dict()\n return jsonify(hi_act_dict)", "def run_get_tones_for_15k_subset():\n add_tone_columns_to_csv('politics_30_months_comments_cleaned_standardized_vader_flair_15k.csv',\n 'politics_30_months_comments_cleaned_standardized_vader_flair_15k_tones.csv')", "def run_purity_plotting(input_tsvs, output_dir):\n # type: (list[str], str) -> None\n result_cols = [\"sensitivity\", \"sens_lo\", \"sens_hi\", \"sens_N\", \"precision\", \"prec_lo\", \"prec_hi\", \"prec_N\", \"purity\", \"pass\"]\n amp_results_df = DataFrame(columns=result_cols)\n del_results_df = DataFrame(columns=result_cols)\n\n min_sensitivity = 0.85\n min_precision = 0.8\n min_supported_purity = 0.39\n\n for i, input_tsv in enumerate(input_tsvs):\n purity = find_purity_from_filename(input_tsv)\n print(input_tsv + \" purity: \" + str(purity))\n\n if purity is None:\n print(\"The file \" + input_tsv + \" is unrecognized as being a HCC1143T purity file, so it is being skipped. Please see the src code here if you believe this is an error.\")\n continue\n\n sample = find_sample_from_filename(input_tsv)\n\n segs_df_tmp = pandas.read_csv(input_tsv, sep=\"\\t\", comment=\"@\")\n\n # Clean up by removing all locations where there was more than one ground truth value for copy number/ratio\n segs_df = segs_df_tmp[segs_df_tmp[GT_CN_COLUMN_NAME].apply(more_than_one_value)]\n tmp = segs_df[GT_CN_COLUMN_NAME]\n tmp = pandas.to_numeric(tmp, errors='coerce', downcast='integer')\n segs_df[GT_CN_COLUMN_NAME] = tmp\n\n cr_gt = 1 + (purity * ((segs_df[GT_CN_COLUMN_NAME] / ploidy) - 1))\n cr_gt.rename(GT_CR_COLUMN_NAME, inplace=True)\n\n if IS_LOG2_GUESS_CR:\n cr_guess = 2 ** segs_df[INPUT_GUESS_CR_COLUMN_NAME]\n else:\n cr_guess = segs_df[INPUT_GUESS_CR_COLUMN_NAME]\n\n cr_guess.rename(GUESS_CR_COLUMN_NAME, inplace=True)\n\n segs_df[GT_CR_COLUMN_NAME] = cr_gt\n segs_df[GUESS_CR_COLUMN_NAME] = cr_guess\n segs_gt_to_consider = segs_df[~segs_df[\"CALL\"].isnull() & (segs_df[\"CONTIG\"] != \"2\")]\n\n ## Amps\n tp = segs_gt_to_consider[(segs_gt_to_consider[\"CALL\"] == \"+\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] >= 5)]\n all_gt_amp = segs_gt_to_consider[segs_gt_to_consider[GT_CN_COLUMN_NAME] >= 5]\n sens_amps = float(len(tp)) / float(len(all_gt_amp))\n sens_amps_ci = clopper_pearson(len(tp), len(all_gt_amp))\n sens_amps_N = len(all_gt_amp)\n\n fp = segs_gt_to_consider[(segs_gt_to_consider[\"CALL\"] == \"+\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] <= 4)]\n prec_amps = float(len(tp)) / float(len(tp) + len(fp))\n prec_amps_ci = clopper_pearson(len(tp), (len(tp) + len(fp)))\n prec_amps_N = len(tp) + len(fp)\n\n amp_result = Series(name=sample, data={result_cols[0]: sens_amps, result_cols[1]: sens_amps_ci[0],\n result_cols[2]: sens_amps_ci[1], result_cols[3]: sens_amps_N,\n result_cols[4]: prec_amps, result_cols[5]: prec_amps_ci[0],\n result_cols[6]: prec_amps_ci[1], result_cols[7]: prec_amps_N,\n result_cols[8]: purity,\n result_cols[9]: is_passing(sens_amps_ci[1], prec_amps_ci[1], purity, min_sensitivity, min_precision,\n min_supported_purity)})\n\n amp_results_df = amp_results_df.append(amp_result)\n amp_results_df.sort_values(result_cols[8], inplace=True)\n\n print(\"Amp sensitivity: \" + str(sens_amps) + \" \" + str(sens_amps_ci))\n print(\"Amp precision: \" + str(prec_amps) + \" \" + str(prec_amps_ci))\n\n ## Dels\n tp_del = segs_gt_to_consider[\n (segs_gt_to_consider[\"CALL\"] == \"-\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] <= 2)]\n all_gt_del = segs_gt_to_consider[segs_gt_to_consider[GT_CN_COLUMN_NAME] <= 2]\n sens_dels = float(len(tp_del)) / float(len(all_gt_del))\n sens_dels_ci = clopper_pearson(len(tp_del), len(all_gt_del))\n sens_dels_N = len(all_gt_del)\n\n fp_del = segs_gt_to_consider[\n (segs_gt_to_consider[\"CALL\"] == \"-\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] > 2)]\n prec_dels = float(len(tp_del)) / float(len(tp_del) + len(fp_del))\n prec_dels_ci = clopper_pearson(len(tp_del), (len(tp_del) + len(fp_del)))\n prec_dels_N = len(tp_del) + len(fp_del)\n\n del_result = Series(name=sample, data={result_cols[0]: sens_dels, result_cols[1]: sens_dels_ci[0],\n result_cols[2]: sens_dels_ci[1], result_cols[3]: sens_dels_N,\n result_cols[4]: prec_dels, result_cols[5]: prec_dels_ci[0],\n result_cols[6]: prec_dels_ci[1], result_cols[7]: prec_dels_N,\n result_cols[8]: purity,\n result_cols[9]: is_passing(sens_dels_ci[1], prec_dels_ci[1], purity,\n min_sensitivity, min_precision,\n min_supported_purity)})\n del_results_df = del_results_df.append(del_result)\n del_results_df.sort_values(result_cols[8], inplace=True)\n\n print(\"Del sensitivity: \" + str(sens_dels) + \" \" + str(sens_dels_ci))\n print(\"Del precision: \" + str(prec_dels) + \" \" + str(prec_dels_ci))\n\n if len(amp_results_df) > 0 and len(del_results_df) > 0:\n plot_purity_series(output_dir, amp_results_df, \"Amplifications\", min_sensitivity, min_precision, min_supported_purity)\n plot_purity_series(output_dir, del_results_df, \"Deletions\", min_sensitivity, min_precision, min_supported_purity)\n amp_results_df.to_csv(output_dir + \"Amplifications_table.tsv\", sep=\"\\t\")\n del_results_df.to_csv(output_dir + \"Deletions_table.tsv\", sep=\"\\t\")", "def get_iPTF16hgs(colorplt = False):\n z = 0.017\n ebv = 0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = pd.read_csv('../data/otherSN/iPTF16hgs/table1.txt', sep=\"\\t\")\n tb = tb.drop(columns=[\"Unnamed: 5\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Magnitude'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Magnitude'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Magnitude'].values])\n tb = tb.drop(columns=[\"Magnitude\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n t_max = 57691.59 # from the paper\n tb['tmax_of'] = tb['mjd'] - t_max\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n \"\"\"\n plt.errorbar(tb[\"tmax_rf\"].values[ixg], tb[\"mag\"].values[ixg], tb[\"emag\"].values[ixg], fmt=\".g\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixr], tb[\"mag\"].values[ixr], tb[\"emag\"].values[ixr], fmt=\".r\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixi], tb[\"mag\"].values[ixi], tb[\"emag\"].values[ixi], fmt=\".y\")\n \"\"\"\n tb = add_datecol(tb)\n tb = add_physcol(tb)\n #tb = tb.drop(columns=[\"datetime64\"])\n if colorplt==False:\n return tb\n else:\n #tb = tb[tb.mjd > 55352.5]\n #tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def main(temp, humid):\n user = 'root'\n password = 'root'\n dbname = 'iot'\n dbuser = 'raspberry'\n dbuser_password = 'password'\n query = 'select temp_value,humid_value from temp_humid;'\n json_body = [\n {\n \"measurement\": \"temp_humid\",\n \"fields\": {\n \"temp_value\": temp,\n \"humid_value\":humid \n\t}\n }\n ]\n\n client = InfluxDBClient('localhost', 8086, user, password, dbname)\n\n #client.create_database(dbname)\n\n print(\"Write points: {0}\".format(json_body))\n client.write_points(json_body)\n\n #print(\"Querying data: \" + query)\n #result = client.query(query)\n\n #print(\"Result: {0}\".format(result))\n\n #client.drop_database(dbname)", "def get_taux(name):\n indicateurResult = get_empty_kpi()\n config = get_config(name)\n log.debug('Processing - '+name)\n\n indicateurResult['nom'] = config['nom']\n indicateurResult['unite'] = config['unite']\n indicateurResult['unite_short'] = config['unite_short']\n indicateurResult['trendType'] = config['trendType']\n indicateurResult['color'] = get_color_indicateur(config)\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_fra'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine_glissante'].apply(lambda x: str(x)[11:])\n for country in tqdm(countries, desc=\"Processing National\"):\n res = process_stock(\n df,\n 'nat',\n 'fra',\n config['trendType'],\n name\n )\n indicateurResult['france'].append(res)\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_reg'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine_glissante'].apply(lambda x: str(x)[11:])\n for reg in tqdm(df.reg.unique(), desc=\"Processing Régions\"):\n res = process_stock(\n df[df['reg'] == reg].copy(),\n 'reg',\n reg,\n config['trendType'],\n name\n )\n indicateurResult['regions'].append(res)\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_dep'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine_glissante'].apply(lambda x: str(x)[11:])\n for dep in tqdm(df.dep.unique(), desc=\"Processing Départements\"):\n res = process_stock(\n df[df['dep'] == dep].copy(),\n 'dep',\n dep,\n config['trendType'],\n name\n )\n indicateurResult['departements'].append(res)\n\n save_result(indicateurResult, name)", "def Q_flux(self):\n fields = self.read_vars(['x','y','z'])\n Z, Y, X = np.meshgrid(fields['z']/self.params['Lz'],\n fields['y']/self.params['Ly'] - 0.5,\n fields['x']/self.params['Lx'] - 0.5, indexing='ij')\n\n r = np.sqrt(X**2 + Y**2)\n r0 = 0.01\n msk = 0.5*(1.-np.tanh(r/r0))\n delta = 1/(self.params[\"global_nz\"])\n Q =1e-5*np.exp(-Z/delta)/delta*msk\n\n return Q" ]
[ "0.6243293", "0.5701022", "0.5552244", "0.5491569", "0.5487227", "0.5479191", "0.54358137", "0.54272795", "0.5394585", "0.5392419", "0.53615534", "0.533306", "0.53179353", "0.5315923", "0.5295476", "0.5265205", "0.52420974", "0.5239961", "0.52259064", "0.52226466", "0.52215695", "0.52029717", "0.51637083", "0.5157517", "0.5152349", "0.51520777", "0.5150459", "0.51497936", "0.5144993", "0.51448935" ]
0.64122236
0
Adapted from nicholls_turton.ipynb sst, sea surface temperature (K) ft_qv, mixedlayer top qv (kg kg^1) use_NT, True or False
def calc_equil(sst, ft_qv, use_NT=False): run_main(sst, ft_qv, use_NT) # grab csv file with open('dumpmodel.csv','r') as f: df_result=pd.read_csv(f) # last time step into named tupple out=df_result.iloc[-1] steady_state=make_tuple(out.to_dict()) steady_state # obtain steady-state values dth=steady_state.deltheta dqt=steady_state.delqv thetal_m=steady_state.theta qt_m=steady_state.qv h=steady_state.h press=tf.find_press(steady_state.h) #kPa thetal_ft = steady_state.theta + dth qt_ft = steady_state.qv + dqt zb = steady_state.LCL zi = steady_state.h we = steady_state.went # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt) gamma = 6e-3 thetal_3000 = thetal_ft + gamma*(3000-h) LTS = thetal_3000 - steady_state.theta # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1 # calculate LWP rho = 1. LWP = 0.5*rho*(zi-zb)**2 # put all required variables into output array out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt]) return out_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_main(sst, ft_qv, use_NT):\n\n dtout=10. #minutes\n end_time=8*24. #hours\n del_time=dtout*60. #seconds\n end_time=end_time*3600. #seconds\n #sst=297\n D=5.e-6 #s-1\n U=7 #m/s\n psfc=100. #kPa\n qsfc=tf.qs_tp(sst,psfc)\n ft_intercept = 292 #K\n ft_gamma = 6.e-3 #K/m\n #ft_qv = 2.e-3\n k=0.2 #entrainment efficiency\n Cd = 1.e-3 #drag coefficient\n tspan = np.arange(0.,end_time,del_time)\n vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start\n the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,\n qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT\n the_tup=make_tuple(the_tup,'coeffs')\n output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))\n result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])\n\n # save time/computation by only doing calculations for the last timestep (equilibrium)\n result['time']=tspan[-1]/3600./24. #days\n result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]\n result['delqv'] = ft_qv - result['qv'].iloc[-1]\n result['LCL'] = calc_lcl(result.iloc[-1], psfc)\n result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)\n result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)\n result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)\n \n # decide how to calculate entrainment\n the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]\n if use_NT:\n result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], \n result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])\n else:\n result['went']=calc_went(result.iloc[-1],the_tup)\n\n result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)\n\n with open('dumpmodel.csv','w') as f:\n result.to_csv(f,index=False)\n \n return None", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):\n thetal_m = the_vars[0]\n qt_m = the_vars[2]\n zi = the_vars[1]\n dth = deltheta\n \n thetal_ft = thetal_m + dth\n qt_ft = coeffs.ft_qv\n \n dqt = qt_ft - qt_m\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-zi)\n LTS = thetal_3000 - coeffs.sst # lower tropospheric stability\n\n # calculate coefficients\n press=tf.find_press(zi)\n Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)\n Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)\n \n invert= tf.t_uos_thetal(thetal_m,qt_m,press)\n T_0 = invert.temp\n lv=tf.L_t(invert.temp)\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n del_thv_dry = Ad * dth + Bd * dqt\n del_thv_sat = Aw * dth + Bw * dqt\n \n # account for evaporative cooling (increases we)\n ql_max = invert.ql\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n Del_thv = del_thv_dry - Cl * ql_max\n \n # calculate buoyancy integral terms\n rho = 1.\n lcl_press=tf.LCL_thetal(thetal_m,qt_m)\n zb=tf.find_height(lcl_press)\n\n T1 = zb/zi\n T2 = 0.5 * zb**2 / zi**2\n T3 = (zi-zb)/zi\n T4 = 0.5 * (zi**2 - zb**2) / zi**2\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n wtl_0=F0\n wqt_0=Fqv0\n Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3\n term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))\n term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))\n term3 = Del_F * (Ad * T2 + Aw * T4)\n\n Theta_NE = term1 + term2 + term3\n \n # calculate w*\n wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)\n \n # calculate chi*\n chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)\n \n # calculate del_m\n Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)\n \n # calculate we\n a2=15.\n Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))\n \n A_NT = 0.2\n fac_NT = 2.5\n\n term4 = Del_thv_NT\n term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)\n denominator = term4 + term5\n\n we = A_NT * fac_NT * Theta_NE / denominator\n \n return we", "def test_set_vT(self):\n s = State(substance=\"water\")\n s.vT = Q_(1.801983936953226, \"m**3/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.vT[0], Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def test_set_sT(self):\n s = State(substance=\"water\")\n s.sT = Q_(7496.2021523754065, \"J/(kg*K)\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.sT[0], Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def test_set_uT(self):\n s = State(substance=\"water\")\n s.uT = Q_(2547715.3635084038, \"J/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.uT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.uT[0], Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def test_set_xT(self):\n s = State(substance=\"water\")\n s.xT = Q_(0.5, \"dimensionless\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore\n s.xT = Q_(50, \"percent\"), Q_(400.0, \"K\")\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore", "def TM_fluid(layer, kx, om):\n\n h = layer.d\n rho = layer.medium.rho\n K = layer.medium.K\n k = om*np.sqrt(rho/K)\n ky = np.sqrt(k**2-kx**2)\n T = np.zeros((2, 2), dtype=complex)\n T[0, 0] = np.cos(ky*h)\n T[1, 0] = (om**2*rho/ky)*np.sin(ky*h)\n T[0, 1] = -(ky/(om**2*rho))*np.sin(ky*h)\n T[1, 1] = np.cos(ky*h)\n return T", "def test_set_hT(self):\n s = State(substance=\"water\")\n s.hT = Q_(2730301.3859201893, \"J/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.hT[0], Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def sky_observed(seed=425, th=150, old=False):\n \n # impact parameters\n M = 3e7*u.Msun\n #M = 6e7*u.Msun\n B = 19.95*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n old_label = ''\n \n if old:\n old_label = '_old'\n \n # impact parameters\n M = 5e7*u.Msun\n B = 19.8*u.kpc\n V = 210*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 150\n theta = coord.Angle(th*u.deg)\n Tenc = 0.05*u.Gyr\n T = 2*u.Gyr\n dt = 0.1*u.Myr\n #dt = 1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n alt_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': -45*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n xr = 20*u.kpc + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n # closest to impact\n ienc = np.argmin(np.abs(x))\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal_alt = coord.Galactocentric(stream['x'], **alt_observer)\n xeq_alt = xgal_alt.transform_to(coord.ICRS)\n veq_alt_ = gc.vgal_to_hel(xeq_alt, stream['v'], **vobs)\n veq_alt = [None] * 3\n veq_alt[0] = veq_alt_[0].to(u.mas/u.yr)\n veq_alt[1] = veq_alt_[1].to(u.mas/u.yr)\n veq_alt[2] = veq_alt_[2].to(u.km/u.s)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal0_alt = coord.Galactocentric(stream0['x'], **alt_observer)\n xeq0_alt = xgal0_alt.transform_to(coord.ICRS)\n veq0_alt_ = gc.vgal_to_hel(xeq0_alt, stream0['v'], **vobs)\n veq0_alt = [None] * 3\n veq0_alt[0] = veq0_alt_[0].to(u.mas/u.yr)\n veq0_alt[1] = veq0_alt_[1].to(u.mas/u.yr)\n veq0_alt[2] = veq0_alt_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n # alternative observer\n R_alt = find_greatcircle(xeq0_alt.ra.deg[::10], xeq0_alt.dec.deg[::10])\n xi0_alt, eta0_alt = myutils.rotate_angles(xeq0_alt.ra, xeq0_alt.dec, R_alt)\n xi0_alt = coord.Angle(xi0_alt*u.deg)\n \n # place gap at xi~0\n xioff_alt = xi0_alt[ienc]\n xi0_alt -= xioff_alt\n \n xi_alt, eta_alt = myutils.rotate_angles(xeq_alt.ra, xeq_alt.dec, R_alt)\n xi_alt = coord.Angle(xi_alt*u.deg)\n xi_alt -= xioff_alt\n \n\n # observed gd1\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-1.5, 1.5], [-1.5, 1.5], [-30,30]]\n color = '0.35'\n ms = 4\n alpha = 0.7\n \n # plotting\n plt.close()\n fig, ax = plt.subplots(2,4,figsize=(17,8), sharex=True, sharey='col')\n \n plt.sca(ax[0][0])\n plt.plot(xi.wrap_at(wangle), eta, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n #plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n plt.sca(ax[1][0])\n plt.plot(xi_alt.wrap_at(wangle), eta_alt, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n xeqs = [xeq.ra, xeq.dec, xeq.distance.to(u.kpc)]\n dv = []\n dv_alt = []\n for i in range(3):\n plt.sca(ax[0][i+1])\n \n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n dv += [veq[i]-vexp]\n plt.plot(xi.wrap_at(wangle), dv[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n \n plt.sca(ax[1][i+1])\n # interpolate expected kinematics from an unperturbed stream\n vexp_alt = np.interp(xi_alt.wrap_at(wangle), xi0_alt.wrap_at(wangle), veq0_alt[i].value) * veq0_alt[i].unit\n dv_alt += [veq_alt[i]-vexp_alt]\n plt.plot(xi_alt.wrap_at(wangle), dv_alt[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n plt.xlabel('$\\phi_1$ [deg]')\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n np.random.seed(seed+1)\n fgaia = np.sqrt(2/5)\n print(2/5, fgaia)\n phi1 = xi[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin]\n pmra = dv[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Stream', 'Spur']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[0][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n print(np.sqrt(np.sum(g['pmra_error'][mask]**2))/np.sum(mask))\n print(np.sqrt(np.sum(g['pmdec_error'][mask]**2))/np.sum(mask))\n\n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.0848, 0.0685])\n \n np.random.seed(seed+2)\n phi1 = xi[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin2]\n pmra = dv[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[0][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n ##############\n # alt observer\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n #np.random.seed(seed+3)\n phi1 = xi_alt[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin]\n pmra = dv_alt[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv_alt[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[1][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.11, 0.08])\n \n np.random.seed(seed+6)\n phi1 = xi_alt[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin2]\n pmra = dv_alt[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv_alt[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[1][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n plt.sca(ax[0][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = 60$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n\n plt.sca(ax[1][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = -45$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n plt.legend(fontsize='small', loc=3, handlelength=0.2)\n \n plt.suptitle('Expected astrometric performance', fontsize='medium')\n plt.tight_layout(rect=[0,0,1,0.94])\n plt.savefig('../plots/astrometric_performance.png')", "def test_set_Tu(self):\n s = State(substance=\"water\")\n s.Tu = Q_(400.0, \"K\"), Q_(2547715.3635084038, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Tu[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Tu[1], Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def test_transport_sanity(self):\n T = 400\n cv_mole, W = 21005.045895231186, 28.014\n species_name = \"N2\"\n\n data = ct_properties.ctThermoTransport(\"gri30.cti\", verbose=False)\n data.evaluate_properties()\n i = data.gas.species_index(species_name)\n\n As, Ts, _, poly_mu, poly_kappa, log_poly_mu, log_poly_kappa= ct2foam_utils.fit_ct_transport(data)\n\n mu_s = tr_fitter.sutherland(T, As[i], Ts[i])\n kappa_s=tr_fitter.euken(mu_s, cv_mole, W, R)\n mu_logp, kappa_logp = tr_fitter.eval_log_polynomial(log_poly_mu[i,:], log_poly_kappa[i,:], T)\n mu_p, kappa_p = tr_fitter.eval_polynomial(poly_mu[i,:], poly_kappa[i,:], T)\n\n\n # rough test whether they are in the same scale...\n mu_ref = 2.2217e-5\n kappa_ref = 0.032205\n\n self.assertTrue(np.abs(mu_s-mu_ref)/np.abs(mu_ref) < 0.07)\n self.assertTrue(np.abs(mu_p-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(mu_logp-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(kappa_s-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_p-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_logp-kappa_ref)/np.abs(kappa_ref) < 0.05)", "def test_virtual_temperature():\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n tv = virtual_temperature(t, qv)\n assert_almost_equal(tv, 288.2796 * units.kelvin, 3)", "async def test_thermostat_heatit_z_trm3_no_value(\n hass: HomeAssistant, client, climate_heatit_z_trm3_no_value, integration\n) -> None:\n # When the config parameter that specifies what sensor to use has no value, we fall\n # back to the first temperature sensor found on the device\n state = hass.states.get(CLIMATE_FLOOR_THERMOSTAT_ENTITY)\n assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 22.5", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def qtf(self, vw, th, gp, psi_l, lai, dt):\n\t\t#if the amount of water in tank is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qtt = th - self.qwf(vw, th, gp, psi_l, lai, dt)\n\t if self.tx*self.ZT*10**6 <= 0:\n\t return 0.\n\t elif self.tx*self.ZT*10**6 <= qtt*dt:\n\t return (self.tx*self.ZT*10**6/dt)\n\t else:\n\t return qtt", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def compSETrueSim(self,taup=None):\r\n \r\n if self.layer_type == 'input':\r\n # Create unit Gaussians for the sampling\r\n zshape = SELayer.sample_shape(self.shape,self.nsamp)\r\n self.wq = np.random.normal(0,1,zshape) \r\n\r\n # Sample the output\r\n self.ztrue = self.sample(nsamp=self.nsamp)\r\n \r\n # Compute the sample mean\r\n tauz = np.mean(np.abs(self.ztrue)**2)\r\n return tauz\r\n \r\n \r\n else:\r\n # Create unit Gaussians for the sampling\r\n pshape = SELayer.sample_shape(self.shape[0],self.nsamp)\r\n zshape = SELayer.sample_shape(self.shape[1],self.nsamp)\r\n self.wp = np.random.normal(0,1,pshape) \r\n self.wq = np.random.normal(0,1,zshape) \r\n\r\n # Generate random Gaussian input\r\n pshape = SELayer.sample_shape(self.shape[0], self.nsamp)\r\n self.ptrue = np.random.normal(0,np.sqrt(taup),pshape)\r\n \r\n # Sample from the ouptut\r\n self.ztrue = self.sample(self.ptrue,nsamp=self.nsamp)\r\n \r\n # Compute the sample mean\r\n tauz = np.mean(np.abs(self.ztrue)**2)\r\n return tauz", "def test_mixed_layer():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.hPa\n temperature = np.array([22.2, 14.6, 12., 9.4, 7., -38.]) * units.degC\n mixed_layer_temperature = mixed_layer(pressure, temperature, depth=250 * units.hPa)[0]\n assert_almost_equal(mixed_layer_temperature, 16.4024930 * units.degC, 6)", "def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K", "def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)", "def test_set_Ts(self):\n s = State(substance=\"water\")\n s.Ts = Q_(400.0, \"K\"), Q_(7496.2021523754065, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Ts[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Ts[1], Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def plotTsneE_3D(datadf,level,descname,v,path_output,Efilter,own_cmap,clustermethod=\"kmeans\",onlyShow=False,selected=False):\n\n if not selected:\n datadf = datadf[datadf[\"{}Energy\".format(level)]<=Efilter].sort_values(by=\"{}_{}_klabel\".format(level,descname))\n\n klabels = datadf[\"{}_{}_klabel\".format(level,descname)].astype(str)\n fig = px.scatter_3d(data_frame=datadf,\n z=\"{}Energy\".format(level),\n x=\"{}_{}_tsne1\".format(level,descname),\n y=\"{}_{}_tsne2\".format(level,descname),\n color=klabels,\n color_discrete_sequence=own_cmap,\n size=\"GyRadius\",\n opacity=0.9,\n #symbol=\"symbol\", # Use if needed in Jupyter\n hover_name=datadf.index,\n title=\"{}'s' t-SNE + {}Energy\".format(descname,level),\n #range_z=[-36,-20],\n width= 1200,\n height= 900)\n\n\n if onlyShow:\n fig.show()\n elif selected:\n fig.write_html(\"{}/{}_{}_EtSNE_selected.html\".format(path_output,level,descname))\n else:\n fig.write_html(\"{}/{}_{}_EtSNE.html\".format(path_output,level,descname))", "def test_set_pT(self):\n s = State(substance=\"water\")\n s.pT = Q_(101325.0, \"Pa\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.pT[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None\n assert s.phase == \"gas\"", "def compute_tsky_hot( xv, yv, hv, thot, tcold):\n\n nData = len(yv) \n epsilons = np.full( nData, EPSILON)\n tsys = np.zeros(nData) # initialize arrays\n\n Z = np.zeros(nData)\n oneMZ = np.zeros(nData)\n # For full Temp calibration, a spectrum taken at high elevation away from \n # The galactic plan is used. For this program the cold spectrum must be\n # the spectrum being calibrated. See the M command for comparision\n epsilons = np.full( nData, EPSILON)\n yv = np.maximum( yv, epsilons)\n hv = np.maximum( hv, epsilons)\n # comput the cold/hot ratio\n Z = yv/hv\n oneMZ = np.full( nData, 1.) - Z\n oneMZ = np.maximum( oneMZ, epsilons)\n\n # the cold, receiver, temperature is this function\n tsys = ((Z*thot) - tcold)/oneMZ\n \n n6 = int(nData/6)\n n56 = 5*n6\n\n tsysmedian = np.median( tsys[n6:n56])\n\n tsky = np.zeros(nData) # initialize arrays\n S = np.zeros(nData) # initialize arrays\n\n # The system gain S is computed assuming a tsys is the cold load\n S = np.full( nData, tsysmedian+thot)/hv\n # scale the observed instensity in counts to Kelvins.\n tsky = S*yv\n\n return tsky", "def temperature() -> float:" ]
[ "0.60827553", "0.5971266", "0.57040036", "0.5694853", "0.56882703", "0.5666581", "0.5637644", "0.5630418", "0.56049395", "0.55450636", "0.54949796", "0.5493195", "0.54648805", "0.54205227", "0.5363305", "0.53159505", "0.5269913", "0.52530104", "0.524438", "0.52441275", "0.5217888", "0.5202955", "0.5199603", "0.51980907", "0.5187931", "0.51861656", "0.51783586", "0.51737976", "0.51603997", "0.51466423" ]
0.65542036
0
Checks a service's replication levels based on how the service's replication should be monitored. (smartstack or mesos)
def check_service_replication( instance_config, all_tasks, smartstack_replication_checker, ): expected_count = instance_config.get_instances() log.info("Expecting %d total tasks for %s" % (expected_count, instance_config.job_id)) proxy_port = marathon_tools.get_proxy_port_for_instance( name=instance_config.service, instance=instance_config.instance, cluster=instance_config.cluster, soa_dir=instance_config.soa_dir, ) registrations = instance_config.get_registrations() # if the primary registration does not match the service_instance name then # the best we can do is check marathon for replication (for now). if proxy_port is not None and registrations[0] == instance_config.job_id: check_smartstack_replication_for_instance( instance_config=instance_config, expected_count=expected_count, smartstack_replication_checker=smartstack_replication_checker, ) else: check_healthy_marathon_tasks_for_service_instance( instance_config=instance_config, expected_count=expected_count, all_tasks=all_tasks, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mmo_configsrv_replication_status(self, mmo_connection):\n replication_state = []\n if self.mmo_is_mongos(mmo_connection):\n configsrv = self.mmo_config_servers(mmo_connection)[0]\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n c = self.mmo_connect_mongod(hostname=configsrv[\"hostname\"],\n \t port=configsrv[\"port\"],\n \t username=auth_dic[\"username\"],\n \t password=auth_dic[\"password\"],\n \tauthentication_db=auth_dic[\"authentication_database\"]\n \t)\n if self.mmo_is_cfg_rs(c):\n command_output = c[\"admin\"].command(\"replSetGetStatus\")\n shard = command_output[\"set\"]\n replication_state.append({\"hostname\": configsrv[\"hostname\"], \"port\": configsrv[\"port\"], \"shard\": shard, \"command_output\": command_output})\n else:\n raise Exception(\"Not a mongos process\")\n return replication_state", "def mmo_replication_status(self, mmo_connection):\n replication_state = []\n if self.mmo_is_mongos(mmo_connection):\n #o = self.mmo_execute_on_primaries(mmo_connection, \"replSetGetStatus\")\n o = self.mmo_execute_on_secondary_or_primary(mmo_connection, \"replSetGetStatus\", \"all\", True)\n #print o2;\n return o\n else:\n raise Exception(\"Not a mongos process\")", "def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def replication_status(self):\n psql = postgresql_svc.PSQL()\n try:\n query_out = psql.execute(self.replication_status_query)\n except PopenError, e:\n if 'function pg_last_xact_replay_timestamp() does not exist' in str(e):\n raise BaseException('This version of PostgreSQL server does not support replication status')\n else:\n raise e\n query_result = self._parse_query_out(query_out)\n\n is_master = int(__postgresql__[OPT_REPLICATION_MASTER])\n\n if query_result['xlog_delay'] is None:\n if is_master:\n return {'master': {'status': 'up'}}\n return {'slave': {'status': 'down',\n 'error': query_result['error']}}\n return {'slave': {'status': 'up',\n 'xlog_delay': query_result['xlog_delay']}}", "def service_level(self, level=None):\n if level is None:\n flags = self._flags()\n if flags['auto_service_level']:\n return 0\n return flags['service_level']\n else:\n if self._request('SL', _service_levels[level])[0]:\n return level\n\n raise EvseError", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def mmo_is_cfg_rs(self, mmo_connection):\n s = None\n if self.mmo_is_configsrv(mmo_connection):\n try:\n r = mmo_connection[\"admin\"].command(\"replSetGetStatus\")\n s = True\n except Exception as exception:\n if \"not running with --replSet\" in str(exception):\n s = False\n else:\n raise exception\n else:\n raise Exception(\"Not a config server\")\n return s", "def check_consul_services(con):\n whitelist = get_whitelist(con)\n\n if whitelist:\n LOG.warning(\"Checks from the following hosts will be ignored, \" +\n \"because service/rebootmgr/ignore_failed_checks is set: {}\".format(\", \".join(whitelist)))\n\n local_checks = get_local_checks(con, tags=[\"rebootmgr\"])\n LOG.debug(\"relevant_checks: %s\" % local_checks)\n\n for name, check in get_failed_cluster_checks(con, local_checks).items():\n if check[\"Node\"] in whitelist:\n continue\n\n LOG.error(\"There were failed consul checks. Exit\")\n sys.exit(EXIT_CONSUL_CHECKS_FAILED)\n\n LOG.info(\"All checks passed\")", "def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")", "def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")", "def _check_rac_srv(cfg, warning=None, critical=None):\n regex = re.compile(\"Instance .* is running on node .*\")\n bin_name = \"srvctl\"\n _check_attrs(cfg, [\"sid\", \"oh\"])\n bin_name = os.path.join(cfg.oh, \"bin\", bin_name)\n try:\n args = bin_name + \" status database -d {sid}\".format(sid=cfg.sid)\n cp = subprocess.run(args, shell=True, check=True, stdout=subprocess.PIPE)\n if cp.stdout is None:\n print(\"None result from crsctl\")\n return UNKNOWN\n out = str(cp.stdout, \"utf-8\")\n running, not_running = 0, 0\n for l in out.split(os.linesep):\n if l.lstrip().rstrip() == \"\":\n continue\n if regex.search(l.lstrip().rstrip()):\n running += 1\n else:\n not_running += 1\n\n if not_running >= running:\n print(\"you got {0} nodes was not running\".format(not_running))\n return CRITICAL\n if not_running > 0:\n print(\"you got {0} nodes was not running\".format(not_running))\n return WARNING\n\n print(\"all {0} nodes is running\".format(running))\n return OK\n except subprocess.CalledProcessError as err:\n print(err.output)\n return UNKNOWN", "def test_contrail_services_status_after_restart_master_node(os_faults_steps):\n services_statuses = contrail_status.get_services_statuses(os_faults_steps)\n master_node_fqdn = None\n for fqdn, services in services_statuses.items():\n for service in services:\n if (service['service'] == 'contrail-schema' and\n service['status'] == contrail_status.STATUS_ACTIVE):\n master_node_fqdn = fqdn\n break\n assert master_node_fqdn is not None, \"Can't find master node\"\n master_node = os_faults_steps.get_node(fqdns=[master_node_fqdn])\n os_faults_steps.reset_nodes(master_node)\n\n waiter.wait(\n contrail_status.check_services_statuses,\n args=(os_faults_steps),\n expected_exceptions=AssertionError,\n timeout=settings.CONTRAIL_NODE_RESET_TIMEOUT)", "def mcstatus(self, irc, msg, args):\n prefix = self.registryValue('prefix')\n suffix = self.registryValue('suffix')\n\n separator = self.registryValue('separator')\n\n svprefix = self.registryValue('service.prefix')\n svsuffix = self.registryValue('service.suffix')\n\n stonline = self.registryValue('status.online')\n stoffline = self.registryValue('status.offline')\n\n\n json_data = urllib2.urlopen(self.registryValue('statusURL')).read()\n data = json.loads(json_data)\n services = []\n\n for pair in data:\n service, status = pair.keys()[0], pair.values()[0]\n services.append('%s%s%s%s' % (svprefix, service, svsuffix,\n stonline if status == 'green' else \\\n stoffline))\n\n irc.reply('%s%s%s' % (prefix, separator.join(services), suffix))", "def _check_ops(self):\n required_ops = ['san_ip', 'san_login', 'san_password']\n for attr in required_ops:\n if not getattr(self.configuration, attr, None):\n raise exception.InvalidInput(reason=_('%s is not set.') % attr)\n\n replica = self.configuration.safe_get('replication_device')\n if replica and isinstance(replica, list):\n replica_ops = ['backend_id', 'login', 'password', 'rpo']\n for attr in replica_ops:\n if attr not in replica[0]:\n msg = _('replication_device %s is not set.') % attr\n raise exception.InvalidInput(reason=msg)\n self.replica = Replication(replica[0])", "def test_redis_increase_replica_count_usual_case():", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def test_check_replication_crit_lag_notworking(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (None, 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag not working \"\n \"(perms issue? check syslog)\".format(repl))\n for repl in ('account', 'object', 'container')])", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def can_change_srv_sets(self):\n\t\treturn bool(call_sdk_function('PrlUsrCfg_CanChangeSrvSets', self.handle))", "def handleCondorStatusService(self):\n if isCMSSWSupported(self.getCmsswVersion(), \"CMSSW_7_6_0\"):\n self.logger.info(\"Tag chirp updates from CMSSW with step %s\", self.step.data._internal_name)\n self.process.add_(cms.Service(\"CondorStatusService\",\n tag=cms.untracked.string(\"_%s_\" % self.step.data._internal_name)))\n\n return", "def ensure_cloudwatch_rule_for_replication(context, installed_region='us-east-1'):\n client = boto3.client('events', region_name=installed_region)\n cw_rule_name = utils.find_replication_cw_event_rule(context)\n current_state = client.describe_rule(Name=cw_rule_name)\n configurations = dynamo.list_configurations(context, installed_region)\n replication = False\n for cfg in configurations:\n if 'replication' in cfg and cfg['replication'] == 'yes':\n replication = True\n\n if replication and current_state['State'] == 'DISABLED':\n LOG.warn('Enabling snapshot replication due to configuration.')\n client.enable_rule(Name=cw_rule_name)\n elif not replication and current_state['State'] == 'ENABLED':\n LOG.warn('Disabling snapshot replication due to configuration.')\n client.disable_rule(Name=cw_rule_name)", "def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)", "def mmo_replica_state(self, mmo_connection):\n\n # https://docs.mongodb.org/manual/reference/replica-states/\n replica_states = [\n { \"id\": 0, \"name\": \"STARTUP\", \"description\": \"Not yet an active member of any set. All members start up in this state. The mongod parses the replica set configuration document while in STARTUP.\" },\n { \"id\": 1, \"name\": \"PRIMARY\", \"description\": \"The member in state primary is the only member that can accept write operations.\" },\n { \"id\": 2, \"name\": \"SECONDARY\", \"description\": \"A member in state secondary is replicating the data store. Data is available for reads, although they may be stale.\" },\n { \"id\": 3, \"name\": \"RECOVERING\", \"description\": \"Can vote. Members either perform startup self-checks, or transition from completing a rollback or resync.\" },\n { \"id\": 5, \"name\": \"STARTUP2\", \"description\": \"The member has joined the set and is running an initial sync.\" },\n { \"id\": 6, \"name\": \"UNKNOWN\", \"description\": \"The member's state, as seen from another member of the set, is not yet known.\" },\n { \"id\": 7, \"name\": \"ARBITER\", \"description\": \"Arbiters do not replicate data and exist solely to participate in elections.\" },\n { \"id\": 8, \"name\": \"DOWN\", \"description\": \"The member, as seen from another member of the set, is unreachable.\" },\n { \"id\": 9, \"name\": \"ROLLBACK\", \"description\": \"This member is actively performing a rollback. Data is not available for reads.\" },\n { \"id\": 10, \"name\": \"REMOVED\", \"description\": \"This member was once in a replica set but was subsequently removed.\" }\n ]\n\n if self.mmo_is_mongod(mmo_connection):\n return replica_states[mmo_connection[\"admin\"].command(\"replSetGetStatus\")[\"myState\"]]\n else:\n raise Exception(\"Not a mongod process\")", "def replication_check():\n\n try:\n entries = [os.path.join(current_app.config['REPLICATION_PACKETS_DIR'], e)\n for e in os.listdir(current_app.config['REPLICATION_PACKETS_DIR'])]\n except OSError as e:\n logging.warning(e)\n return Response(\"UNKNOWN \" + str(e), mimetype='text/plain')\n\n pattern = re.compile(\"replication-[0-9]+.tar.bz2$\")\n entries = filter(lambda x: pattern.search(x), entries)\n entries = filter(os.path.isfile, entries)\n entries = _sort_natural(entries)\n\n if len(entries) == 0:\n return Response(\"UNKNOWN no replication packets available\", mimetype='text/plain')\n\n resp = \"OK\"\n last = -1\n pattern = re.compile(\"replication-([0-9]+).tar.bz2$\")\n for entry in entries:\n m = pattern.search(entry)\n if not m:\n resp = \"UNKNOWN Unkown files in the replication directory\"\n break\n num = int(m.groups()[0])\n if last < 0:\n last = num - 1\n if last != num - 1:\n resp = \"CRITICAL Replication packet %d is missing\" % (num - 1)\n last = num\n\n if resp != \"OK\":\n return Response(resp, mimetype='text/plain')\n \n last_packet_age = time.time() - os.path.getmtime(entries[-1]) \n if last_packet_age > MAX_PACKET_AGE_CRITICAL:\n resp = \"CRITICAL Latest replication packet is %.1f hours old\" % (last_packet_age / 3600)\n elif last_packet_age > MAX_PACKET_AGE_WARNING:\n resp = \"WARNING Latest replication packet is %.1f hours old\" % (last_packet_age / 3600)\n\n return Response(resp, mimetype='text/plain')", "def check_all_server_power_state(self, state):\n api_data = request(\"get\", \"/serviceProfile\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n total_elements = 0\n for server in api_data[\"json\"][\"ServiceProfile\"][\"members\"]:\n if server[\"assoc_state\"] == \"associated\":\n api_data_c = request(\"get\", \"/power\",\n query={\"identifier\": str(server[\"path\"])})\n self.assertEqual(api_data_c['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' +\n str(api_data_c['status']))\n self.assertEqual(api_data_c[\"json\"][\"serverState\"], state,\n 'Server ' + str(server[\"path\"]) + ' reported power state ' +\n str(api_data_c[\"json\"][\"serverState\"]) + ' expected: ' + state)\n total_elements += 1\n self.assertGreater(total_elements, 0, \"Found zero elements\")", "def check_journaled(con, warning, critical,perf_data):\n\n warning = warning or 20\n critical = critical or 40\n try:\n data=get_server_status(con)\n journaled = data['dur']['journaledMB'] \n message=\"Journaled : %.2f MB\" % journaled\n message+=performance_data(perf_data,[(\"%.2f\"%journaled,\"journaled\",warning, critical)])\n return check_levels(journaled,warning, critical, message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def check_wms(self, service: Service, capabilities_only: bool = False):\n wms_helper = WmsHelper(service)\n\n if wms_helper.get_capabilities_url is not None:\n self.check_get_capabilities(wms_helper.get_capabilities_url)\n if not capabilities_only:\n wms_helper.set_operation_urls()\n if wms_helper.get_map_url is not None:\n self.check_service(wms_helper.get_map_url)\n\n if wms_helper.get_feature_info_url is not None:\n self.check_service(wms_helper.get_feature_info_url)\n\n if wms_helper.describe_layer_url is not None:\n self.check_service(wms_helper.describe_layer_url)\n\n if wms_helper.get_legend_graphic_url is not None:\n self.check_service(wms_helper.get_legend_graphic_url)\n\n if wms_helper.get_styles_url is not None:\n self.check_service(wms_helper.get_styles_url)", "def Check_SYSLOG(name):\n\n if name == None: return 0 # Syslog server is optional\n\n if nslookup(name)[0] != 0:\n add_info (name, SYSLOG_SERVER, \"cannot resolve SYSLOG server\")\n return 1\n\n if ping_machine(name) != 0:\n add_info(name, SYSLOG_SERVER, \"cannot ping SYSLOG server\")\n return 2\n add_info(name, SYSLOG_SERVER, \"OK\")\n return 0", "def verify_services(self):\n services = [\"metric_collector\", \"log_collector\"]\n service_version_9 = [\"lma_collector\"]\n pids = {}\n processes_count = {\n \"collectd \": 1,\n \"collectdmon \": 1\n }\n\n if self.settings.version.startswith(\"0.9\"):\n processes_count[\n \"hekad -config[= ]/etc/{}\".format(service_version_9)] = 1\n else:\n # Starting with 0.10, there are one collector for logs and one for\n # metrics\n for service in services:\n processes_count[\"hekad -config[= ]/etc/{}\".format(service)] = 1\n online_nodes = [node for node in self.helpers.get_all_ready_nodes()\n if node[\"online\"]]\n for node in online_nodes:\n pids[node[\"name\"]] = {}\n with self.env.d_env.get_ssh_to_remote(node[\"ip\"]) as remote:\n for process, count in processes_count.items():\n logger.info(\"Checking process {0} on node {1}\".format(\n process, node[\"name\"]\n ))\n pids[node[\"name\"]][process] = (\n self.checkers.check_process_count(\n remote, process, count))\n return pids" ]
[ "0.59834796", "0.5918712", "0.537204", "0.5249215", "0.52178967", "0.5167108", "0.5149234", "0.5141773", "0.5053718", "0.50021493", "0.49990278", "0.49700785", "0.48497504", "0.4836432", "0.48359025", "0.47858948", "0.4753289", "0.47522944", "0.4747612", "0.47456878", "0.4742173", "0.47218564", "0.4710541", "0.46857047", "0.46578512", "0.46370143", "0.46314391", "0.4629667", "0.46250433", "0.4613766" ]
0.59363264
1
Traverses backwards through predecessors from end
def _deconstruct_path(predecessors, end): if end not in predecessors: return None current = end path = [] while current: path.append(current) current = predecessors.get(current) return list(reversed(path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __backward(self):\n if self.is_empty():\n raise StopIteration\n\n current = self._tail\n yield current._data\n while current._previ:\n current = current._previ\n yield current._data", "def reverse_iterative(self):\n \"\"\"O(n) / O(1) solution.\"\"\"\n pre = None\n current = self.head\n while current is not None:\n next = current.next\n current.next = pre\n pre = current\n current = next\n self.head = pre", "def __reversed__(self): \n yield from self._traverse_backward(self.root)", "def digraph_walker_backwards(graph, element, call_back):\r\n call_back(graph, element)\r\n for predecessor in graph.predecessors(element):\r\n call_back(graph, predecessor)\r\n for predecessor in graph.predecessors(element):\r\n digraph_walker_backwards(graph, predecessor, call_back)", "def __reversed__(self):\n if len(self) == 0:\n return\n\n # Create a list containing pointers to each\n # prev_node in the list.\n cur_node = self.head\n prev_nodes = [None]\n while cur_node != self.tail:\n prev_nodes.append(cur_node)\n cur_node = cur_node.next_node\n\n # Using the prev_nodes list, iterate backwards\n while cur_node is not None:\n for x in reversed(cur_node.data_list):\n yield x\n cur_node = prev_nodes[-1]\n del prev_nodes[-1]", "def backtrack(start, end, prev):\n backtracked = False\n curr_node = end\n # instantiate path as list with destination only\n path = [curr_node]\n while not backtracked:\n # retrieve previous node\n prev_node = prev[curr_node]\n # insert it at beginning of path\n path.insert(0, prev_node)\n # move onto previous node as current node for next iteration\n curr_node = prev_node\n # break loop if we reached start\n if curr_node == start:\n backtracked = True\n return path", "def reverse(self):\n current = self.head\n previous = None \n while current is not None:\n next_node = current.next_node \n current.next_node = previous\n current, previous = next_node, current \n self.head = previous", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def connect_backwards(self):\n\n for n in self.nodes:\n n.receives_from = []\n\n for n1 in self.nodes:\n for n2 in n1.sends_to:\n n2.receives_from.append(n1)", "def backward(self, top, propagate_down, bottom):\r\n pass", "def reverse(self):\n curr = self.head\n prev_node = None\n while curr:\n prev_node = curr.prev\n curr.prev = curr.next\n curr.next = prev_node\n curr = curr.prev\n self.head = prev_node.prev", "def _traverse_backward(self, node):\n if node is not None:\n yield from self._traverse_backward(node.right)\n yield node.data\n yield from self._traverse_backward(node.left)", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))", "def reverse(self):\n\n h = self.head\n previous = None\n while h is not None:\n next = h.next\n h.next = previous\n previous = h\n h = next\n\n self.head = previous\n\n # self.head.next = h\n # pass", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def test_remove_predecessors():\n assert remove_predecessors({\"A\": [\"B\", \"C\"]}, \"B\") == {\"A\": [\"C\"]}\n assert remove_predecessors({\"A\": [\"B\", \"C\"]}, \"D\") == {\"A\": [\"B\", \"C\"]}", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass" ]
[ "0.69886434", "0.69002724", "0.6687282", "0.659495", "0.6564393", "0.655781", "0.6412653", "0.63977957", "0.63977957", "0.63977957", "0.6395437", "0.63730013", "0.6367989", "0.6310967", "0.6306441", "0.6306441", "0.6306441", "0.63061464", "0.62905055", "0.6285904", "0.6272839", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229", "0.6269229" ]
0.7261165
0
Recursively creates Level_Pair nodes from start up to end. Assumes that end's attack and strength are greater than start's. Neighbours for a node are stored in graph[node]. Distances between neighbours are stored in graph[nodeA][nodeB].
def populate_graph( graph, start, end, attack_bonus, strength_bonus): # Check if already created if start in graph: return graph[start] = dict() # Recursively create neighbouring level pairs if start.attack < end.attack: inc_attack = Level_Pair(start.attack + 1, start.strength) # Store level-up time graph[start][inc_attack] = level_time_average( start, Attack_Style.ATTACK, attack_bonus, strength_bonus) # Continue at next node populate_graph(graph, inc_attack, end, attack_bonus, strength_bonus) if start.strength < end.strength: inc_strength = Level_Pair(start.attack, start.strength + 1) # Store level-up time graph[start][inc_strength] = level_time_average( start, Attack_Style.STRENGTH, attack_bonus, strength_bonus) # Continue at next node populate_graph(graph, inc_strength, end, attack_bonus, strength_bonus)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def astar_map(map, start, end):\n\n # Create start and end node\n start_node = Node3(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node3(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n x = 0\n while len(open_list) > 0:\n x+=1\n # Get the current node\n current_node = open_list[0]\n \n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node.position == end_node.position:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for key in map:\n if key == tuple(current_node.position):\n for elem in map[key]:\n new_node = Node3(current_node, elem[0],elem[1])\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values \n child.g = current_node.g + child.cost\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n # Add the child to the open list\n open_list.append(child)", "def generate_adjacents(node):\n # Makes a dictionary where keys are current upper token positions and\n # values are the list of positions attainable from one slide move\n slide_moves = {}\n for key, value in node.boardstate.items():\n if value.isupper() and value != \"B\":\n slide_moves[key] = get_slide_moves(key, node.boardstate)\n\n # Append list of swing moves to get all moves\n moves_dict = {}\n #relevant_pieces = [name ]\n for key in slide_moves:\n all_moves = set(slide_moves[key] + get_swing_moves(key, slide_moves))\n moves_dict[key] = list(all_moves)\n\n # Convert from dictionary to list of list of tuples of the form:\n #[[(curr_move, next_move)...]...] where each tokens moves occupy a list\n moves_list = []\n for curr, news in moves_dict.items():\n moves_list.append([(curr, new) for new in news])\n\n # Get all combinations of moves and for each combo construct a new board state\n adjacent_states = []\n turns = list(product(*moves_list))\n\n for turn in turns:\n new_board = apply_turn(node, turn)\n if new_board:\n adjacent_states.append((turn, new_board))\n return adjacent_states", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n children.append(new_node)\n for child in children:\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def a_star_alg(self, p1: int, p2: int, max_level: int = 1000):\r\n \r\n # Create start and end node\r\n start_node = Node(None, p1, self.node_dict[p1])\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, p2, self.node_dict[p2])\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n level = 0\r\n while len(open_list) > 0 and level < max_level:\r\n level += 1\r\n\r\n # Get the current node (the node in open_list with the lowest cost)\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n distance = current_node.g\r\n current = current_node\r\n while current is not None:\r\n path.append(current.number)\r\n current = current.parent\r\n\r\n return path[::-1], distance # Return reversed path\r\n\r\n # Generate children\r\n children = []\r\n for new_number in self.road_tree[current_node.number]: # Adjacent nodes\r\n new_node = Node(current_node, new_number, self.node_dict[new_number])\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n append_to_open_list = False\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + self.road_dict[(current_node.number, child.number)]\r\n child.h = sqrt((child.x - end_node.x) ** 2 + (child.y - end_node.y) ** 2) / 200\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the closed list\r\n closed_list, append_to_open_list = self.check_in_list(child, closed_list, append_to_open_list)\r\n\r\n # Child is already in the open list\r\n open_list, append_to_open_list = self.check_in_list(child, open_list, append_to_open_list)\r\n\r\n # Add the child to the open list\r\n if append_to_open_list:\r\n open_list.append(child)\r\n\r\n return [], 1e10", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n \n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n open_list.append(child)", "def traverse_graph_start_end_extra_node_heuristic(graph):\n\n tree = traverse_graph_start_end_extra_node(graph)\n\n # delete the indexes\n\n for st in tree.subtrees():\n if \"start\" in st.label() or \"end\" in st.label():\n new_label = re.sub(r'[0-9]+', '', st.label())\n st.set_label(new_label)\n\n return tree", "def astar(maze, start, end):\n\n # Create start and end \n start_node = node.Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = node.Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n \n for new_position in [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (1, 0, 0), (-1, 0, 0)]: # Adjacent squares\n \n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1], current_node.position[2] + new_position[2])\n\n # Make sure within range\n if node_position[1] > (len(maze[0]) - 1) or node_position[1] < 0 or node_position[2] > (len(maze[0][len(maze)-1]) -1) or node_position[2] < 0 or node_position[0] < 0 or node_position[0] > len(maze) - 1:\n continue\n \n # Make sure walkable terrain\n if node_position == end_node.position:\n new_node = node.Node(current_node, node_position)\n \n # Append\n children.append(new_node)\n continue\n \n if maze[node_position[0]][node_position[1]][node_position[2]] != 0:\n continue\n\n # Create new node\n new_node = node.Node(current_node, node_position)\n \n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n break\n else:\n # Create the f, g, and h values\n child.g = current_node.g + 1\n # H: Manhattan distance to end point\n child.h = abs(child.position[0] - end_node.position[0]) + abs(child.position[1] - end_node.position[1])\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n # check if the new path to children is worst or equal \n # than one already in the open_list (by measuring g)\n if child == open_node and child.g >= open_node.g:\n break\n else:\n # Add the child to the open list\n open_list.append(child)", "def astar(maze, start, end):\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n for i in open_list:\n jazda1(kratka,i.position)\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares\n\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:\n continue\n\n # Make sure walkable terrain\n if maze[node_position[0]][node_position[1]] != 0:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n child.h = ((abs(child.position[0] - end_node.position[0]))) + ((abs(child.position[1] - end_node.position[1])) )\n child.f = child.g + child.h\n \n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n continue\n\n # Add the child to the open list\n if(child not in open_list):\n open_list.append(child)", "def astar(maze, start, end, agents):\r\n\r\n # Create start and end node\r\n start_node = Node(None, start)\r\n end_node = Node(None, end)\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n open_pos = []\r\n closed_pos = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n open_pos.append(start)\r\n\r\n # Loop until you find the end\r\n while len(open_list) > 0:\r\n\r\n # Get the current node\r\n current_node = open_list[0]\r\n current_index = 0\r\n \r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n open_pos.pop(current_index)\r\n closed_pos.append(current_node.position)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n\r\n current = current_node\r\n while current is not None:\r\n path.append(current.position) \r\n current = current.parent\r\n\r\n return path[::-1] # Return reversed path\r\n\r\n # # Generate children\r\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares\r\n \r\n # Get node position\r\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\r\n\r\n # Make sure within range\r\n if node_position[0] > maze.shape[0]-1 or node_position[0] < 0 or node_position[1] > maze.shape[1]-1 or node_position[1] < 0:\r\n continue\r\n\r\n # Make sure walkable terrain\r\n if maze[node_position[0]][node_position[1]] == 0:\r\n continue\r\n\r\n if not validataPath(current_node, node_position, agents):\r\n continue\r\n\r\n # Create new node\r\n child = Node(current_node, node_position)\r\n\r\n if node_position not in closed_pos:\r\n child = Node(current_node, node_position)\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + 1\r\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the open list\r\n if node_position in open_pos:\r\n index = open_pos.index(node_position)\r\n if open_list[index].g > child.g:\r\n open_list[index] = child\r\n\r\n # Add the child to the open list\r\n else:\r\n open_list.append(child)\r\n open_pos.append(node_position)\r\n\r\n return None", "def dfs(g: nx.Graph, start_node: Any) -> str:\n\n way = []\n stack = [start_node]\n y = {node: [] for node in g.nodes}\n while stack:\n elem = stack.pop()\n way.append(elem)\n for node in list(g.neighbors(elem)):\n if node not in way:\n stack.append(node)\n y[node].extend((*y[elem], elem))\n print(y)\n return \"\".join(way)", "def build_auxiliary_structures(nodes_filename, ways_filename):\n nodes = {}\n for way in read_osm_data(ways_filename):\n highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)')\n if highway_type in ALLOWED_HIGHWAY_TYPES:\n nodes_along_way = way['nodes'] # List of nodes along this way\n for i in range(len(nodes_along_way) - 1):\n # A pair of adjacent nodes along this way\n left = nodes_along_way[i]\n right = nodes_along_way[i + 1]\n default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type]\n # If this way doesn't have a speed limit tag, we use the default value based on highway type\n speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit)\n\n def build_data(root, adjacent):\n \"\"\"\n root: ID of some node along way\n adjacent: ID of some node adjacent to root node along way\n \"\"\"\n new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure\n root_data = nodes.get(root, new_node_data_struct)\n # There might be another way where root and adjacent are directly adjacent, so our\n # speed limit is the max of the speed limits of those two ways:\n root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit)\n nodes[root] = root_data # Add the data on root to our dictionary of node data\n\n build_data(left, right)\n if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes':\n # If this isn't a oneway way, we can build the data structure for the next node as well\n build_data(right, left)\n elif right == nodes_along_way[-1]:\n # In non-oneway ways, the above build_data(right, left) call creates the data structure\n # for the final node at the same time as the penultimate one. However, in the case of a\n # oneway path, we have to do it manually:\n nodes[right] = nodes.get(right, {'adjacent': {}})\n\n for node in read_osm_data(nodes_filename):\n id = node['id']\n if id in nodes:\n # If the id of this node in the generator was on a valid way, we add the data about that node\n # to its dictionary in nodes.\n # Add lat/lon data\n nodes[id]['lat'] = node['lat']\n nodes[id]['lon'] = node['lon']\n\n return nodes", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def create_basic_adjacency_map_2():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\"],\n \"E\": [\"X\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def shortest_path(start, end):\n\n\tmoves = rubik.quarter_twists\n\n\t# Parent nodes: (Parent_State, move)\n\tstartParents = {}\n\tstartParents[start] = None # Start state has no parent\n\n\t# Parent nodes: (Parent_State, move)\n\tendParents = {}\n\tendParents[end] = None # End state has no parent\n\n\tstartFrontier = [] # Current frontier in start BFS\n\tendFrontier = [] # Current frontier in end BFS\n\n\tstartFrontier.append(start) # Add start state as first and only node to generate next frontier\n\tendFrontier.append(end) # Add end state as first and only node to generate next frontier\n\n\tif end in startParents:\n\t\treturn [] # Start == End : No moves required\n\n\t# We only have to search at most 14 levels in BFS\n\t# Two-way BFS therefore requires 7 concurrent levels from both states\n\tfor i in range(7):\n\n\t\tstartNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in startFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in startParents:\n\t\t\t\t\tstartParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tstartNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in endParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tstartFrontier = startNextFrontier # Make the next frontier the current one\n\n\t\tendNextFrontier = [] # New empty set for new frontier to be discovered\n\t\tfor state in endFrontier: # Iterate through each rubiks state in this frontier\n\t\t\tfor move in moves: # Apply each move to this state\n\t\t\t\tnextState = rubik.perm_apply(move, state)\n\n\t\t\t\t# Makes sure this new state is not already in the Graph\n\t\t\t\t# This skips nodes that were already permuted in another path,\n\t\t\t\t# essentially trimming the Graph's leaves\n\t\t\t\tif nextState not in endParents:\n\t\t\t\t\tendParents[nextState] = (state, move) # Store this state's parent + move\n\t\t\t\t\tendNextFrontier.append(nextState) # Create a node in the next frontier\n\t\t\t\t\n\t\t\t\t# Intersect of both Graphs, Intermediate state of path found\n\t\t\t\tif nextState in startParents:\n\t\t\t\t\treturn solution(startParents, endParents, nextState)\n\n\t\tendFrontier = endNextFrontier # Make the next frontier the current one\n\n\treturn None", "def connectNodes(imgR,nodes,start,goal):\n alphabet = string.ascii_lowercase\n nodeConnections = [[] for i in range(len(nodes)+2)]\n for index, node in enumerate(nodes):\n paths = adjPaths(imgR,node)\n for path in paths:\n result = checkPath(imgR,nodes,node,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[index+1].append(alphabet[nIndex+1])\n paths = adjPaths(imgR,start) # add start to nodes\n for path in paths:\n result = checkPath(imgR,nodes,start,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[0].append(alphabet[nIndex+1])\n for node in nodeConnections[0]:\n nodeConnections[alphabet.index(node)].append(alphabet[0])\n paths = adjPaths(imgR,goal) # add goal to nodes\n for path in paths:\n result = checkPath(imgR,nodes,goal,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[len(nodeConnections)-1].append(alphabet[nIndex+1])\n for node in nodeConnections[len(nodeConnections)-1]:\n nodeConnections[alphabet.index(node)].append(alphabet[len(nodeConnections)-1])\n return [alphabet[i] for i in range(len(nodes)+2)], nodeConnections", "def get_subgraph_between_nodes(self, start, end):\n nodes = set()\n nodes.add(start)\n\n to_visit = set()\n to_visit.add(start)\n\n while len(to_visit) > 0:\n current_visit = copy.copy(to_visit)\n for tv in current_visit:\n to_visit.remove(tv)\n if tv is not end:\n for s in self.successors(tv):\n to_visit.add(s)\n nodes.add(s)\n\n nodes.add(end)\n\n return self.subgraph(nodes)", "def create_complete_graph(pair_weights, flip_weights=True):\n g = nx.Graph()\n for k, v in pair_weights.items():\n wt_i = -v if flip_weights else v\n g.add_edge(k[0], k[1], attr_dict={\"distance\": v, \"weight\": wt_i})\n return g", "def get_2_step_neighbours(node):\n for i in range(len(node)):\n yield node[0:i] + (flip(node[i]),) + node[i+1:]\n\n for i, j in itertools.permutations(range(len(node)), 2):\n if i < j:\n yield node[0:i] + (flip(node[i]),) + node[i+1:j] + (flip(node[j]),) + node[j+1:]", "def shortest_path(graph, start, end):\n # Tree will contain both the final path and some other paths in reverse.\n # Each value is a 2-tuple; the first item is the depth of the key, and the\n # second is the parent of the key.\n tree = {start: (0, None)}\n\n # next_edges is a min_heap used as a priority queue; the next item in it\n # will always be the node adjacent to the growing tree that adds the least\n # to the total depth of the branch.\n next_edges = []\n\n # Add all of the edges adjacent to the start to next_edges.\n for edge in graph[start]:\n heapq.heappush(next_edges, edge + (start, ))\n\n # Loop until we run out of edges or we find the end (see condition below).\n while len(next_edges) > 0:\n depth, child, parent = heapq.heappop(next_edges)\n\n # Ignore edges connecting to children nodes already in the tree.\n if child in tree:\n continue\n\n tree[child] = (depth, parent)\n\n # Add the edges from the new node to the list of possible edges.\n for length, new_child in graph[child]:\n if new_child not in tree:\n heapq.heappush(next_edges, (depth + length, new_child, child))\n\n # If we found the end, flush the next_edges queue and end the search.\n if child is end:\n next_edges = []\n break\n\n # Construct the forward path from start -> end from the tree.\n path = []\n node = end\n while node is not None:\n path.append(node)\n node = tree[node][1]\n\n path.reverse()\n\n total_depth = tree[end][0]\n return (total_depth, path)", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def offset_graph():\n pylon_graph = graph.graph()\n base = square(ORIGIN, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n return pylon_graph", "def find_reachable_nodes_from(self, start_node, **kwargs):\r\n\t\treturn BreadthFirstTraverser(start_node, **kwargs)", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def constrained_offset_graph(length=10000, levels=10):\n LEVELS = levels\n LENGTH = length\n pylon_graph = graph.pylon_graph()\n base = square(ORIGIN, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"line\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n if i == 10:\n level_ids = pylon_graph.add_nodes(level, \"line\")\n else:\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n return pylon_graph", "def Dijkstra2(node_init, node_end, graph):\n\n ### Parameter initialisation\n node_list = list(graph.vertices.keys())\n dist = np.full(len(node_list), -np.inf)\n # At the beginning we have not reached the end_node\n node_end_reached = False\n # At the beginning, we assume there is a shortest path:\n no_path = False\n \n # Initialising the distances of the nodes\n dist[node_init] = 0\n # Setting the father_node which contains the provenance of the nodes\n father_node = np.full(len(node_list), -np.inf)\n # Initialising the current node\n current_node = node_init \n # Initialising the dictionnary of fixed node which has the following shape:\n #{fixed_node: (previous_node, iteration, cost)}\n # Fixing the number of iterations\n k = 0\n dict_fixed_node = {node_init:(None,k, 0)}\n \n # In the trivial case where the two nodes are identical\n if node_init == node_end:\n cost = 0\n shortest_path = [node_init]\n no_path = False\n return cost, shortest_path, no_path\n \n # While the end node has not been reached\n while not node_end_reached:\n current_node_adj = graph.node_get_adj(current_node).copy()\n # We get rid off the nodes that have been fixed, except at the first iteration\n if k != 0:\n current_node_adj.remove(dict_fixed_node[current_node][0])\n ## Updating the distances : either the node are neighbors and \n # something might change, either they are not, and their distance \n # does not change.\n # For the neighbors node\n for e in current_node_adj:\n dist_temp = dist[current_node] + graph.weights[(current_node, e)]\n # We change the distance only if it is lower than it used to be\n # otherwise, we keep it\n if dist_temp < dist[e] or dist[e] == -np.inf:\n dist[e] = dist_temp\n # Setting the father node\n father_node[e] = current_node\n father_node[current_node] = None\n # We set the distance of the current node to 0\n dist[current_node] = 0 \n # Index and distances which are not 0 and not minus infty\n sub_dist_index = [i for i, e in enumerate(dist) if e > 0]\n sub_dist_value = np.array([e for i, e in enumerate(dist) if e > 0])\n # If these two lists are empty, we stop the algorithm and that means\n # that we cannot reach our point\n if not sub_dist_index or sub_dist_value.size == 0:\n no_path = True\n cost = 'impossible path'\n shortest_path = 'impossible path'\n break\n # Now we need to set our choice for the next node\n if np.array_equal(sub_dist_value, np.ones(len(sub_dist_value))):\n ## If there are only ones : we pick them up randomly\n current_node = int(random.choice(list(sub_dist_index)))\n min_dist = sub_dist_value.min()\n else:\n ## If not we just pick up the one with the minimum distance.\n current_node = sub_dist_index[sub_dist_value.argmin()]\n min_dist = sub_dist_value.min()\n # Adding this node to the dictionnary\n dict_fixed_node[current_node] = (int(father_node[current_node]), k, min_dist)\n # If the end_node has been reached, we stop the search algorithm\n if node_end in dict_fixed_node.keys():\n node_end_reached = True\n # Incrementing the counter\n k += 1\n #print('current_node : {}'.format(current_node))\n #print(dict_fixed_node)\n # Now we need to get the shortest path from our iterations whose information \n # are in dict_fixed_node. To do this, we need to circle back from the end_node\n # to the init_node in this dictionnary.\n # This is done only if some path between node_init and end_node exists.\n if no_path == False:\n list_father_node = list(dict_fixed_node.values())\n previous_node = list_father_node[-1][0]\n shortest_path = [node_end, previous_node]\n # While the initial node has not been reached, we add the relevant\n # nodes to our shortest path\n while previous_node != node_init:\n previous_node = dict_fixed_node[previous_node][0]\n shortest_path.append(previous_node)\n \n # Computing the cost of this shortest path in terms of weights\n cost = int(dict_fixed_node[node_end][2])\n \n return cost, shortest_path, no_path", "def astar(image, start, end, avg_dist):\n\n # Create start and end node\n start_node = Node(None, start)\n end_node = Node(None, end)\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f <= current_node.f:\n current_node = item\n current_index = index\n if debug:\n print(\"Chosen:\"+str(current_node.position)+\"Cost:\" + str(current_node.f))\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n child_num = 0\n for new_position in [(0, -1), (0, 1), (1, 0), (-1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]: # Adjacent squares\n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\n\n # Make sure within range\n if node_position[0] > (image.shape[1] - 1) or node_position[0] < 0 or node_position[1] < 0 or node_position[\n 1] > (image.shape[0] - 1):\n if debug:\n print(\"beyond range\")\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n child_num += 1\n\n # adding all nodes and assigning extra cost for an ink cut later\n if child_num == 0:\n new_node = Node(current_node, (current_node.position[0], current_node.position[1] + 1))\n children.append(new_node)\n if debug:\n print(\"must cut through line\")\n\n # Loop through children\n for child in children:\n move_on = 0\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n if debug:\n print(str(node_position)+\"in closed list\")\n move_on = 1\n break\n if move_on == 1:\n continue\n # Create the f, g, and h values\n if child.position[1] - current_node.position[1] != 0 and child.position[0] - current_node.position[0] != 0:\n child.n = 14\n else:\n child.n = 10\n child.h = (np.abs(child.position[0] - end_node.position[0])**2) + (np.abs(child.position[1] - end_node.position[1])**2)\n child.v = np.abs(child.position[0] - start_node.position[0]) # cost for horizontal movement\n child.d, child.d2 = blocker_dist(child, image, debug=debug)\n if image[child.position[1], child.position[0]] > 0.9:\n child.m = 1000\n child.g = current_node.g + child.n + child.v + child.m + child.d + child.d2\n child.f = child.g + child.h # heuristic still needed to speed up computations\n if debug:\n print(child.position, child.f)\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node:\n move_on = 1\n if child.f < open_node.f:\n # open_node.position = child.position\n open_node.parent = child.parent\n open_node.g = child.g\n open_node.f = child.f\n break\n\n if move_on == 1:\n continue\n\n # Add the child to the open list\n open_list.append(child)" ]
[ "0.58267266", "0.5793475", "0.56011593", "0.55371314", "0.55277735", "0.55161214", "0.55161214", "0.55161214", "0.54637635", "0.5456182", "0.5430679", "0.5403021", "0.5386815", "0.53318155", "0.5256505", "0.5214026", "0.5162665", "0.51556855", "0.5142307", "0.5126955", "0.5087093", "0.5069114", "0.50669694", "0.5053542", "0.5027528", "0.50077564", "0.49944353", "0.4976715", "0.49651432", "0.49625945" ]
0.7375074
0
Runs simulations to determine time (ticks) to level up attack or strength Enemy is set as sand crab (60hp, 1 def, 0 def bonus) Weapon is best available scimitar
def level_time_simulate(start_levels, attack_style, attack_bonus, strength_bonus): ticks_per_attack = 4 # Scimitar attack speed enemy_health = 60 # Sand crab health max_hit, accuracy = get_max_hit_and_accuracy( start_levels, attack_style, attack_bonus, strength_bonus) if attack_style == Attack_Style.ATTACK: start_exp = osrs.experience[start_levels.attack] end_exp = osrs.experience[start_levels.attack+1] elif attack_style == Attack_Style.STRENGTH: start_exp = osrs.experience[start_levels.strength] end_exp = osrs.experience[start_levels.strength+1] experience = end_exp - start_exp avg_ticks = combat_simulator.ticks_until_exp(max_hit, accuracy, ticks_per_attack, enemy_health, experience, osrs.BASE_EXP_PER_DAMAGE, ITERATIONS) return avg_ticks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testrandom(self):\n for i in range(100):\n WeaponAbility()", "def scenario1(height, speed):\n time = math.sqrt((2 * height) / 9.81)\n result = speed * time\n return result", "def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)", "def attack(health_meter):\n hit_list = 4 * ['player'] + 6 * ['enemy']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"ATTACK! \", end='')\n show_health(health_meter)", "def runSim(self):\n self.simKillResults = {}\n self.simHitResults = {}\n if self.fromArmy == False:\n self.attackingSquad = copy.deepcopy(squad.Squads[self.attackingSpin.get()])\n for num in range(eval(self.simulationSpin.get())):\n defSquad = copy.deepcopy(squad.DefSquads[self.defendingSpin.get()])\n result = self.attackingSquad.squadFire(defSquad)\n if result[0] not in self.simHitResults:\n self.simHitResults[result[0]] = 0\n self.simHitResults[result[0]] += 1\n if result[1] not in self.simKillResults:\n self.simKillResults[result[1]] = 0\n self.simKillResults[result[1]] += 1\n self.simResultsFrame = Frame(self.__mainWindow, padx=15, pady=15)\n self.simResultsFrame.grid(row=2,column=0,sticky=\"nsew\")\n self.hitResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.hitResultsFrame.grid(row=0, column=0,sticky=\"nsew\")\n self.killResultsFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.killResultsFrame.grid(row=0, column=1,sticky=\"nsew\")\n self.maxPosFrame = Frame(self.simResultsFrame, padx=10, pady=15)\n self.maxPosFrame.grid(row=1, sticky=\"nsew\")\n numHitPoss = 0\n numWoundsPoss = 0\n if isinstance(self.attackingSquad, squad.Squad):\n for unit in self.attackingSquad.units:\n numHitPoss += eval(unit.ranged_weapon.attacks)\n else:\n for i in range(self.attackingSquad.current_size):\n for weapon in self.attackingSquad.ranged_weapons:\n numHitPoss += eval(weapon.attacks)\n for unit in squad.DefSquads[self.defendingSpin.get()].units:\n numWoundsPoss += unit.wounds\n rf = 1\n Label(self.hitResultsFrame, text=\"{} hits possible\".format(min(numWoundsPoss,numHitPoss)), font=__item_format__).grid(row=0)\n for hit in self.simHitResults:\n percent = self.simHitResults[hit]/eval(self.simulationSpin.get())*100\n t = \"{} hits: {:6.2f}%\".format(hit, percent)\n Label(self.hitResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1\n Label(self.killResultsFrame, text=\"{} kills possible\".format(defSquad.current_size), font=__item_format__).grid(row=0)\n for kill in self.simKillResults:\n percent = self.simKillResults[kill]/eval(self.simulationSpin.get())*100\n t = \"{} kills: {:6.2f}%\".format(kill, percent)\n Label(self.killResultsFrame, text=t, font=__item_format__).grid(row=rf)\n rf+=1", "def test_reward_tiers_after_battle(self):\n self.alice.loyalists = 350\n self.conf[\"game\"][\"rewardtiers\"] = {\n \"newcomer\": {\n \"begin\": 0,\n \"end\": 300,\n \"reward\": 25,\n }\n }\n self.assertEqual(self.alice.loyalists, 350)\n self.assertEqual(self.bob.loyalists, 100)\n\n s1 = self.battle.create_skirmish(self.alice, 50)\n s1.react(self.bob, 50, troop_type=\"cavalry\")\n\n self.end_battle(self.battle, self.conf)\n\n # Bob wins the fight and the war\n self.assertEqual(self.battle.victor, self.bob.team)\n\n # Alice doesn't match a tier, gets the default 10%/15% settings\n # which mean 10% for her -> 5 troops\n self.assertEqual(self.alice.loyalists, 355)\n # Bob matches the 0-300 tier. Because he used 50% of his troops, he\n # gets 50% of the flat gain -> 12 troops\n self.assertEqual(self.bob.loyalists, 112)", "def run_simulation(self, state):\n \"*** YOUR CODE HERE ***\"\n player = 0\n visited_states = [(player, state)]\n depth_limited = self.depth != -1\n depth = self.depth\n expand = True\n while not visited_states[-1][1].isWin() and not visited_states[-1][1].isLose():\n if depth_limited and depth == 0: break\n state = self.UCB1(state, player) # Selection & Simulation\n if expand and state not in self.plays: # Expansion\n expand = False\n self.plays[state] = 0\n self.wins[state] = 0\n visited_states.append((player, state))\n player = (player + 1) % state.getNumAgents()\n if not expand and depth_limited and player == 0: depth -= 1\n \n for player, state in visited_states:\n if state in self.plays: # Not simulated nodes\n self.plays[state] += 1\n eval = self.evaluationFunction(visited_states[-1][1])\n if depth_limited:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] -= eval\n else:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] += (1 - eval)", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def main():\r\n y1, y2 = 0, -s.screen_height\r\n clock = pg.time.Clock()\r\n time1 = time() # records time that game starts\r\n FPS = 50\r\n level = 1 # game starts on first level\r\n\r\n # first run the start menu\r\n score = start_menu()\r\n\r\n # creates the players' sprites\r\n ship1 = player.spaceship(s.p1_image, False)\r\n\r\n if s.two_player:\r\n ship2 = player.spaceship(s.p2_image, False)\r\n else:\r\n ship2 = 0\r\n\r\n # while player is still alive\r\n while ship1.lives > 0:\r\n clock.tick(FPS) # set the games FPS\r\n helpers.refresh_hearts(ship1.lives)\r\n\r\n # calculates total time played, excluding time spent paused\r\n timer = time() - time1 - s.time_paused\r\n # meteor falling speed gradually increases as time goes on\r\n speed_increase_factor = 100\r\n speed_increase = timer / speed_increase_factor\r\n # checks for collisions between meteors and players\r\n sprites.sprite_collision(ship1, ship2, s.meteor_group)\r\n # checks for colliisions between power ups and players\r\n powerup_collision = pg.sprite.spritecollide(ship1, s.powerup_group, True)\r\n # if collision has occured, player earns money\r\n powerup_reward = 500\r\n for sprite in powerup_collision:\r\n ship1.money += powerup_reward\r\n\r\n if ship2 != 0:\r\n powerup_collision2 = pg.sprite.spritecollide(ship2, s.powerup_group, True)\r\n for sprite in powerup_collision2:\r\n ship1.money += powerup_reward\r\n\r\n # updates all the sprites\r\n helpers.update_screen(ship1, ship2, y1, y2, score, level)\r\n _, score = sprites.update_sprites(s.meteor_group, speed_increase, score)\r\n _, score = sprites.update_sprites(s.powerup_group, speed_increase, score)\r\n\r\n # moves every bullet up the screen, disappearing if it goes over the top\r\n for bullet in s.bullet_group:\r\n bullet.update()\r\n\r\n # list of meteors hit by a bullet\r\n rocks_shot = pg.sprite.groupcollide(s.meteor_group, s.bullet_group, False, True)\r\n\r\n for rock in rocks_shot:\r\n # destroys the meteor and randomly spawns a coin in its place\r\n rock.get_shot()\r\n # score gain for hitting a meteor\r\n score_increase = 200 if s.double_p else 100\r\n score += score_increase\r\n\r\n # despawns coins if they haven't been collected after a given time\r\n if time() >= s.coin_despawn_time:\r\n s.coin_group.empty()\r\n\r\n s.coin_group.draw(screen)\r\n coin_bonus = 50 # money gained from picking up a coin\r\n coins_collected = pg.sprite.spritecollide(ship1, s.coin_group, True)\r\n if ship2 != 0:\r\n coins_collected += pg.sprite.spritecollide(ship2, s.coin_group, True)\r\n\r\n # player gains money for every coin picked up\r\n for sprite in coins_collected:\r\n ship1.money += coin_bonus\r\n\r\n # if all meteors are destroyed, initiate the boss fight\r\n if len(s.meteor_group) == 0:\r\n boss.boss_fight(ship1, ship2, level)\r\n # player advances to the next level\r\n level += 1\r\n s.num_meteors += 10\r\n s.double_p = False\r\n # shows level on the screen\r\n helpers.display_text(\r\n (\"Level \" + str(level)),\r\n \"centerx\",\r\n \"centery\",\r\n screen.get_rect().centerx,\r\n screen.get_rect().centery,\r\n )\r\n pg.display.update()\r\n sleep(1)\r\n # once boss is defeated, enter the shop to buy upgrades\r\n shop(ship1, ship2)\r\n\r\n # draw all sprites\r\n sprites.add_sprites(s.meteor_group, s.powerup_group)\r\n s.meteor_group.draw(screen)\r\n s.bullet_group.draw(screen)\r\n s.powerup_group.draw(screen)\r\n\r\n # makes the screen constantly scroll, giving the illusion of movement\r\n scroll_speed = 0.5 + timer * 0.001\r\n y1 += scroll_speed\r\n y2 += scroll_speed\r\n if y1 > s.screen_height:\r\n y1 = -s.screen_height\r\n if y2 > s.screen_height:\r\n y2 = -s.screen_height\r\n\r\n # show score and number of lives in the corner of the screen\r\n helpers.display_text((str(int(timer ** 2) + int(score))), \"left\", \"top\", 10, 10)\r\n s.heart_group.draw(screen)\r\n pg.display.update()\r\n\r\n # if player runs out of lives, game ends\r\n clock.tick(30) # Sets FPS to 30\r\n game_over(score, ship1, ship2, timer)", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def ninja_turn():\r\n\tglobal men\r\n\tl = [chop, fly, firebreath]\r\n\tx = randint(0,3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn ninja.hit(*l[x])\r\n\telse:\r\n\t\tmen += ninja.sleep(*nsleep)\r\n\t\treturn 0", "def run_func():\n global gain\n if random.random() < 0.8:\n if random.random() < .9:\n gain = random.randint(0, 5) #For example, you have an 80% chance of gaining between 1 and 9 yards on a run play, and you have about a 72% chance to gain between 1 and 6 yards\n else:\n gain = random.randint(6, 9)\n elif random.random() > 0.8 and random.random() < 0.97:\n if random.random() < 0.3:\n gain = random.randint(10, 20)\n else:\n gain = random.randint(-5, -1)\n elif random.random() > 0.97 and random.random() < 0.99:\n gain = random.randint(21, 30)\n else:\n if random.random() < .8:\n gain = random.randint(31, 45)\n else:\n gain = random.randint(46, 99)", "def play_game(self):\n TF = self.TF\n # keep updating\n actions = collections.defaultdict(dict)\n for i in range(10):\n for j in range(self.N):\n actions[i][j] = 0\n\n sums = []\n for time in range(self.MAX):\n print(\"begin time epoch: \" + str(time))\n train_state_pool = collections.defaultdict(dict)\n flow_num = 0\n sum_all = 0\n for i in TF.keys():\n for j in TF[i].keys():\n for agent in self.Ns:\n actions[flow_num][agent.id] = random.randint(0, agent.n_actions - 1)\n\n # update states to ss_\n sum_all = self.update_state(flow_num, actions)\n\n flow_num += 1\n\n sums.append(sum_all)\n print('cut-random: ' + str(sum_all))\n if time % 10000 == 0 and time != 0:\n str1 = 'cut-mini-random' + str(time) + '.txt'\n file = open(str1, 'w')\n file.write(str(sums))\n file.close()", "def RunTurn( lobound=1, hibound=20 ):\n\tpass", "def test_do_boss_science(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['boss_science'])\n nExp = 2\n self._do_boss_science(nExp, 35, 0, 0, nExp=nExp)", "def testrandom(self):\n for i in range(100):\n AmuletAbility()", "def simulate(self):\n while self.character_1.is_alive and self.character_2.is_alive:\n # flip a coin (0,1), if 1 player 1 attacks\n if random.randint(0, 1):\n self.turn(self.character_1, self.character_2)\n else:\n self.turn(self.character_2, self.character_1)\n\n print('_____-----<< -*- >>-----_____')\n time.sleep(.5)\n # if a character dies print final stats of winner\n if self.character_1.is_alive:\n print(f'{self.character_1.name} has won!! o.o7\\nfinal stats:')\n print(self.character_1)\n else:\n print(f'{self.character_2.name} has won!! o.o7\\nfinal stats:')\n print(self.character_2)", "def takeHit(self, amount, type, enemyShip):\n if type == 'energy':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount\n amount = 0\n else:\n amount -= self.currentSP\n self.currentSP = 0\n # go through armor next\n if self.currentAP > 0 and amount > 0:\n # set experience only if shot goes through shields\n if self.typeAP == 'energy':\n if self.currentAP >= (amount * globals.reflectiveArmorModifier):\n self.currentAP -= (amount * globals.reflectiveArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.reflectiveArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n elif type == 'impact':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount/2\n amount = amount/2\n else:\n amount -= self.currentSP\n self.currentSP = 0\n \n # now goto armor\n if self.currentAP > 0 and amount > 0:\n if self.typeAP == 'impact':\n if self.currentAP >= (amount * globals.impactArmorModifier):\n self.currentAP -= (amount * globals.impactArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.impactArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n \n # now that shields and armor are taken care of transfer remaining damage to internal components\n self.myParent.setExperience(amount, enemyShip)\n componentDamage = 0\n if amount > 0 and self.components != {}:\n while amount > 0:\n keyList = funcs.sortStringList(self.components.keys())\n componentDamage = 1\n for componentID in keyList:\n component = self.components[componentID]\n if component.currentHP > amount:\n component.currentHP -= amount\n amount = 0\n break\n elif component.currentHP > 0:\n # remove component\n amount -= component.currentHP\n del self.components[componentID]\n \n # check if all components destroyed, or damage absorbed\n if self.components == {} or amount == 0:\n break\n \n if componentDamage == 1:\n self.setMyStatus()\n self.myParent.setMyStatus()\n \n if amount > 0:\n if self.myParent.currentISP > amount:\n self.myParent.currentISP -= amount\n self.myParent.setMyStatus()\n amount = 0\n else:\n self.myParent.destroyMe()\n amount = 0\n \n self.myParent.updateAllGUIValues()", "def test_smoke(self):\n\t\tinit_state = torch.tensor(0.0)\n\t\ttotal_time = torch.tensor(4.0)\n\t\tprint('Agent state trajectory and actions:')\n\t\tAgent().play(init_state, total_time)\n\t\tpyro.clear_param_store()", "def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)", "def startBattle(self):\n defender = self.map.getUnitAt(self.pos)\n attacker = self.selectedUnit\n defender.takeDamage(int(attacker.firepower * attacker.hp))\n attacker.takeDamage(int(defender.firepower * defender.hp))\n self.endBattle()", "def test_attack_types(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 10) # Attack 10 infantry\n s1.react(self.bob, 8, troop_type='cavalry') # --Attack 8 cavalry\n\n # Cavalry should get a 50% bonus here, for a total of 8+4=12\n # So Bob should win by 2 despite lesser numbers\n result = s1.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)\n\n s2 = battle.create_skirmish(self.bob, 10,\n troop_type='cavalry') # attack 10 cavalry\n s2.react(self.alice, 8, troop_type='ranged') # -- oppose 8 ranged\n result = s2.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.alice.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)\n\n s3 = battle.create_skirmish(self.carol, 10, # Attack 10 ranged\n troop_type='ranged')\n s3.react(self.bob, 8) # -- oppose 8 infantry\n result = s3.resolve()\n self.assert_(result)\n self.assertEqual(result.victor, self.bob.team)\n self.assertEqual(result.margin, 2)\n self.assertEqual(result.vp, 10)", "def __init__(\n self,\n height=20,\n width=20,\n initial_sheep=100,\n initial_wolves=50,\n sheep_reproduce=0.04,\n wolf_reproduce=0.05,\n wolf_gain_from_food=20,\n grass=False,\n grass_regrowth_time=30,\n sheep_gain_from_food=4,\n trees_carrots_ratio=0.5,\n YEAR=20,\n nb_of_hunters=0,\n ):\n super().__init__()\n # Set parameters\n self.height = height\n self.width = width\n self.initial_sheep = initial_sheep\n self.initial_wolves = initial_wolves\n self.sheep_reproduce = sheep_reproduce\n self.wolf_reproduce = wolf_reproduce\n self.wolf_gain_from_food = wolf_gain_from_food\n self.grass = grass\n self.grass_regrowth_time = grass_regrowth_time\n self.sheep_gain_from_food = sheep_gain_from_food\n self.trees_carrots_ratio = trees_carrots_ratio\n self.YEAR = YEAR\n self.nb_of_hunters = nb_of_hunters\n\n self.schedule = RandomActivationByBreed(self) # classe contenant un dictionnaire des types d'agents et agents existants par type, avec une ordre d'activation possible\n self.grid = MultiGrid(self.height, self.width, torus=True)\n self.datacollector = DataCollector(\n {\n \"Fox\": lambda m: m.schedule.get_breed_count(Predator),\n \"Rabbit\": lambda m: m.schedule.get_breed_count(Prey),\n }\n )\n\n # Create sheep:\n for i in range(self.initial_sheep):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(3*self.YEAR)\n energy = self.random.randrange( int(self.sheep_gain_from_food/2), 2 * self.sheep_gain_from_food)\n sheep = Prey(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(sheep, (x, y))\n self.schedule.add(sheep)\n\n # Create wolves\n for i in range(self.initial_wolves):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n age = self.random.randrange(4*self.YEAR)\n energy = self.random.randrange(int(self.wolf_gain_from_food/2), 2 * self.wolf_gain_from_food)\n wolf = Predator(self.next_id(), (x, y), self, True, energy, age)\n self.grid.place_agent(wolf, (x, y))\n self.schedule.add(wolf)\n\n # Create grass patches\n if self.grass:\n for agent, x, y in self.grid.coord_iter():\n if self.trees_carrots_ratio < self.random.random(): # aléatoire du nombre d'arbres et de carottes\n fully_grown = self.random.choice([True, False])\n if fully_grown: # carottes ou pousses de carotes\n countdown = self.grass_regrowth_time\n else:\n countdown = self.random.randrange(self.grass_regrowth_time)\n plant = Plant(self.next_id(), (x, y), self, fully_grown, countdown)\n else:\n plant = Tree(self.next_id(), (x, y), self)\n self.grid.place_agent(plant, (x, y))\n self.schedule.add(plant)\n\n # create hunters\n for i in range(self.nb_of_hunters):\n x = self.random.randrange(self.width-13, self.width-7) # HUNTERMODIF\n y = self.random.randrange(self.height-13, self.height-7) # HUNTERMODIF\n hunter = Hunter(self.next_id(), (x, y), self)\n self.grid.place_agent(hunter, (x, y))\n self.schedule.add(hunter)\n\n self.running = True\n self.datacollector.collect(self)", "def main(player):\n saved_score = 0\n rat_array = [\"reset\"]\n current_fight = \"\"\n while player.hp >= 1:\n\n system.clear_screen()\n if player.location == route_list[0]:\n pass\n else:\n rat_array = []\n rat_chance = randint(1, 100)\n if rat_chance >= 50:\n rat_array = system.npc_swarm_spawn()\n else:\n # must reset here, or a sub 50 roll crashes with no rat_array found\n rat_array = [\"reset\"]\n pass\n if player.location == current_fight:\n rat_array = [\"reset\"]\n else:\n pass\n\n # encounter spawn gotta go somewhere how bout here\n system.encounter_chance(player)\n\n status_array = system.status_message(route_list, player, rat_array)\n print(f\"{status_array[0]}\\n{status_array[1]}\")\n\n movement_options = system.movement_options(route_list, player)\n print(\"\\nAdjacent systems to your current location are:\")\n for movement_option in movement_options:\n print(movement_option)\n if len(movement_options) == 1:\n print(\n f\"\\nWhat is your decision? \\n\\nAvailable commands are {movement_options[0]}, \"\n + \"or type 'rat' to shoot rats.\"\n )\n else:\n print(\n f\"\\nWhat is your decision? \\n\\nAvailable commands are {movement_options[0]}, \"\n + f\"{movement_options[1]} or type 'rat' to shoot rats.\"\n )\n try:\n player_action = str(input())\n except ValueError:\n print(\"You spin your ship.\")\n\n action = system.parse_input(player_action, movement_options, player)\n # print(rat_array)\n if action.lower() == \"rat\":\n if rat_array[0] != \"reset\":\n # print('fightin')\n system.rat_fight(rat_array, player)\n # system.clear_screen()\n try:\n for rat_item in rat_array:\n rat_array[rat_item].remove()\n rat_array = [\"reset\"]\n current_fight = player.location\n except:\n rat_array = [\"reset\"]\n current_fight = player.location\n\n if player.location == destination_system:\n print(\n f\"\\n\\nCongratulations, you have arrived at {player.location}. \"\n + \"\\nYou may now set a new destination, or dock up and use your points you've gained to reship. \"\n + \"\\nOr you may choose to either hold onto your points, in which case they might be lost on death \"\n + \"or save them to buy bigger and better ships\"\n + \"\\no7 capsuleer the system is clear. \"\n + f\"\\n\\nYour final score from this trip was {player.score}\")\n saved_score += player.score\n\n if(player.hp < 1):\n print(\n f\"\\n\\nYour ship explodes in to tiny pieces at the stargate in {player.location}. \"\n + \"\\nYour capsule containing your body shatters from the force of the explosion. \"\n + \"\\nYou are dead. You wake up in your hangar where your death clone is set to and \"\n + \"prepare to voyage out once again. \"\n + \"\\no7 capsuleer the cyno is now lit. \"\n + f\"\\n\\nYour final score was {player.score}\"\n )", "def __init__(self, name, loot, strength):\n self.name = name\n self.x = 0\n self.y = 0\n self.health = 10\n self.strength = strength\n self.loot = loot\n self.is_alive = True\n self.MAX_HEALTH = 15\n self.magic_key = False\n logging.debug(\"{0} created with health of {1} and strength of {2}\"\n .format(self.name, self.health, self.strength))\n \"\"\" Test Results Part A:\n When increasing MAX_HEATH to 100, rounds tended to go on.\n When decreasing MAX_HEATH to 0.05, rounds end very quickly.\n This is expected because the Sprites will be easier or harder \n to defeat depending on how high their health can get. It will \n take more attacks to defeat a Sprite with more health and less\n attacks to defeat a Sprite with less health. \n \n Test Results Part B:\n Test: change strength of Enemy to 20 (higher than Avatar)\n Prediction: the Enemy should win most/all of the time because the player \n with more strength has a harder attack.\n Results: The Enemy won during all trials. If the roles were switched, the \n same could be said about Avatar.\n \n Test: set health of Avatar to 5\n Prediction: the Avatar will die more often than the Enemy because it can \n receive less attacks\n Results: The Avatar died during most trials. \n \n Test: set MAX_HEALTH for Enemy to 5\n Prediction: Enemy will be able to have less health, so it will be defeated\n more often than the Avatar\n Results: The enemy died in almost all trials\n \"\"\"", "def run(self):\n #Level Musik starten\n pygame.mixer.music.stop()\n pygame.mouse.set_cursor(*pygame.cursors.broken_x)\n pygame.key.set_repeat(10)\n pygame.mixer.music.load(os.path.join(self.sourceFileDir,\"Musik\",\"Decktonic - Night Drive (Strong Suit Remix).wav\"))\n \n pygame.mixer.music.play(-1)\n \n #Level Daten aus Datei einlesen\n for i in range(25):\n \n self.level += 1 \n spiel = Spiel(self,self.data[\"levels\"][self.level-1],self.win,godmode= self.godmode)\n #Wenn der Spieler das Level nicht geschafft haben sollte\n if not spiel.schleife_haupt():\n return self.level, self.kills\n \n geschwindigkeit = 10\n\n #Grundlevel erstellen\n lvl_data = {\n \n \"level\": self.level,\n \"hindernisse\": 2,\n \n \"GBall_Shoot\": {\n \"Anzahl\": 2,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Normal\": {\n \"Anzahl\": 2,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Verdoppler\": {\n \"Anzahl\": 3,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_RNG\": {\n \"Anzahl\": 4,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Two\": {\n \"Anzahl\": 3,\n \"geschwindigkeit\": geschwindigkeit\n }\n \n }\n #Level immer schwerer machen\n for k in range(self.maxlvl):\n\n\n self.level += 1\n geschwindigkeit += 1\n spiel = Spiel(self,lvl_data,self.win,godmode= self.godmode)\n if not spiel.schleife_haupt():\n return self.level, self.kills", "def time(self):\n\n self.timing = True\n self.scramble()\n\n self.disp = False", "async def play_shotgun(game_state) -> None:\n big_inside, lesser_counter = count_zombies(game_state)\n if big_inside and lesser_counter == 0:\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter <= 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN)\n elif lesser_counter > 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n message = 'What survivors should do [0/1]?\\n[0]: kill big zombie\\n' \\\n f'[1]: kill up to two lesser zombies ({lesser_counter} inside)\\n>'\n action = await get_action(game_state, message, ['0', '1'])\n if action == '0':\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter == 1:\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)", "def run(\n asgn,\n std_per_petal=10,\n sky_per_petal=40,\n sky_per_slitblock=0,\n start_tile=-1,\n stop_tile=-1,\n redistribute=True,\n use_zero_obsremain=True\n):\n gt = GlobalTimers.get()\n\n log = Logger.get()\n\n def print_counts(when=None):\n counts = asgn.get_counts(start_tile, stop_tile)\n tiles = list(counts.keys())\n tiles.sort()\n for tile in tiles:\n msg = 'Tile %i: ' % tile\n if when is not None:\n msg += when\n tilecounts = counts[tile]\n keys = [('SCIENCE',True), ('SCIENCE not STANDARD',False), ('STANDARD',True),\n ('SKY',True), ('SUPPSKY',False), ('SAFE',False)]\n ss = []\n for k,always in keys:\n n = tilecounts.get(k, None)\n if n is None:\n log.warning('Key', k, 'missing from Assignment.get_counts return value')\n else:\n if n>0 or always:\n ss.append('%s: %i' % (k,n))\n log.info(msg + ', '.join(ss))\n\n print_counts('Start: ')\n\n # First-pass assignment of science targets\n gt.start(\"Assign unused fibers to science targets\")\n asgn.assign_unused(TARGET_TYPE_SCIENCE, -1, -1, \"POS\", start_tile, stop_tile)\n gt.stop(\"Assign unused fibers to science targets\")\n print_counts('After assigning unused fibers to science targets: ')\n\n # Redistribute science targets across available petals\n if redistribute:\n gt.start(\"Redistribute science targets\")\n asgn.redistribute_science(start_tile, stop_tile)\n gt.stop(\"Redistribute science targets\")\n print_counts('After redistributing science targets: ')\n\n # Assign standards, up to some limit\n gt.start(\"Assign unused fibers to standards\")\n asgn.assign_unused(\n TARGET_TYPE_STANDARD, std_per_petal, -1, \"POS\", start_tile, stop_tile\n )\n gt.stop(\"Assign unused fibers to standards\")\n print_counts('After assigning standards: ')\n\n def do_assign_unused_sky(ttype, supp=False):\n tag = 'supp' if supp else ''\n if sky_per_petal > 0 and sky_per_slitblock > 0:\n # Assign using the slitblock requirement first, because it is\n # more specific\n asgn.assign_unused(\n ttype, -1, sky_per_slitblock, \"POS\",\n start_tile, stop_tile\n )\n print_counts('After assigning %ssky per-slitblock: ' % tag)\n\n # Then assign using the petal requirement, because it may(should) require\n # more fibers overall.\n asgn.assign_unused(\n ttype, sky_per_petal, -1, \"POS\",\n start_tile, stop_tile\n )\n print_counts('After assigning %ssky per-petal: ' % tag)\n else:\n asgn.assign_unused(\n ttype, sky_per_petal, sky_per_slitblock, \"POS\",\n start_tile, stop_tile\n )\n print_counts('After assigning %ssky: ' % tag)\n\n # Assign sky to unused fibers, up to some limit\n gt.start(\"Assign unused fibers to sky\")\n do_assign_unused_sky(TARGET_TYPE_SKY)\n gt.stop(\"Assign unused fibers to sky\")\n\n # Assign suppsky to unused fibers, up to some limit\n gt.start(\"Assign unused fibers to supp_sky\")\n do_assign_unused_sky(TARGET_TYPE_SUPPSKY, supp=True)\n gt.stop(\"Assign unused fibers to supp_sky\")\n\n # Force assignment if needed\n gt.start(\"Force assignment of sufficient standards\")\n asgn.assign_force(\n TARGET_TYPE_STANDARD, std_per_petal, -1, start_tile, stop_tile\n )\n gt.stop(\"Force assignment of sufficient standards\")\n print_counts('After force-assigning standards: ')\n\n def do_assign_forced_sky(ttype, supp=False):\n tag = 'supp' if supp else ''\n # This function really feels redundant with do_assign_unused_sky, but\n # when I tried to make a single function to do both calls, I had to call\n # f(*(preargs + pos_arg + postargs)) and it looked too mysterious.\n if sky_per_petal > 0 and sky_per_slitblock > 0:\n # Slitblock first\n asgn.assign_force(\n ttype, -1, sky_per_slitblock, start_tile, stop_tile)\n print_counts('After force-assigning %ssky per-slitblock: ' % tag)\n # Then petal\n asgn.assign_force(\n ttype, sky_per_petal, -1, start_tile, stop_tile)\n print_counts('After force-assigning %ssky per-petal: ' % tag)\n else:\n asgn.assign_force(\n ttype, sky_per_petal, sky_per_slitblock, start_tile, stop_tile)\n print_counts('After force-assigning %ssky: ' % tag)\n\n gt.start(\"Force assignment of sufficient sky\")\n do_assign_forced_sky(TARGET_TYPE_SKY)\n gt.stop(\"Force assignment of sufficient sky\")\n\n gt.start(\"Force assignment of sufficient supp_sky\")\n do_assign_forced_sky(TARGET_TYPE_SUPPSKY, supp=True)\n gt.stop(\"Force assignment of sufficient supp_sky\")\n\n # If there are any unassigned fibers, try to place them somewhere.\n # When assigning science targets to these unused fibers, also consider targets\n # with no remaining observations. Getting extra observations of science\n # targets is preferred over additional standards and sky. See desi-survey email\n # list archive message 1865 and preceding discussion thread.\n gt.start(\"Assign remaining unassigned fibers\")\n asgn.assign_unused(\n TARGET_TYPE_SCIENCE,\n -1,\n -1,\n \"POS\",\n start_tile,\n stop_tile,\n use_zero_obsremain=use_zero_obsremain\n )\n print_counts('After assigning reobservations of science targets: ')\n\n asgn.assign_unused(TARGET_TYPE_STANDARD, -1, -1, \"POS\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SKY, -1, -1, \"POS\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SUPPSKY, -1, -1, \"POS\", start_tile, stop_tile)\n\n # Assign safe location to unused fibers (no maximum). There should\n # always be at least one safe location (i.e. \"BAD_SKY\") for each fiber.\n # So after this is run every fiber should be assigned to something.\n asgn.assign_unused(TARGET_TYPE_SAFE, -1, -1, \"POS\", start_tile, stop_tile)\n gt.stop(\"Assign remaining unassigned fibers\")\n print_counts('Final assignments: ')\n\n # Assign sky monitor fibers\n gt.start(\"Assign sky monitor fibers\")\n asgn.assign_unused(TARGET_TYPE_SKY, -1, -1, \"ETC\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SUPPSKY, -1, -1, \"ETC\", start_tile, stop_tile)\n asgn.assign_unused(TARGET_TYPE_SAFE, -1, -1, \"ETC\", start_tile, stop_tile)\n gt.stop(\"Assign sky monitor fibers\")\n\n return asgn", "def retrieve_handcrafted_inputs(self, obs):\n self.detect_self_unit_types(obs)\n\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n selected_allies = [unit for unit in allies if unit.unit_type == self.current_group_id]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n hitpoints = 0\n for unit in selected_allies:\n hitpoints += unit.health\n\n if self.current_group_id in unit_health.keys():\n init_hp = 0\n init_hp = unit_health[self.current_group_id] * self.init_unit_counts[self.current_group_id]\n else:\n init_hp = self.initial_self_hit_points\n current_hp = hitpoints / init_hp\n\n weapon_cooldown = 0\n for ally in selected_allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(selected_allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(selected_allies) > 0:\n self_weapon_range = weapon_ranges[self.current_group_id]\n self_radius = unit_sizes[self.current_group_id] / float(2)\n self_unit_type = unit_type[self.current_group_id]\n self_speed = unit_speed[self.current_group_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n # TODO can be inaccurate if using melee units\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in selected_allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n if in_enemy_range:\n break\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs, for_subgroup=True)\n\n if self.previous_commands[self.current_group_id] == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_commands[self.current_group_id] == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs,\n for_subgroup=True)\n\n distance_to_enemy = self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs))\n distance_to_enemy = distance_to_enemy / float((32 ** 2 + 20 ** 2) ** 0.5)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, distance_to_enemy]" ]
[ "0.6155685", "0.6128568", "0.61060804", "0.6067592", "0.6018623", "0.5902906", "0.5877961", "0.5872772", "0.5862628", "0.58594483", "0.57941294", "0.5765011", "0.57348096", "0.5728712", "0.57282984", "0.5714876", "0.5714854", "0.5682435", "0.56775427", "0.5648126", "0.5637001", "0.5620304", "0.56162286", "0.5608905", "0.56052065", "0.56012285", "0.5587965", "0.5584335", "0.5582074", "0.55809677" ]
0.6836062
0