query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Returns the preferred runway for the given airport. Right now we're only selecting runways based on whether or not they have ILS, but we could also choose based on wind conditions, or which direction flight plans should follow. | def get_preferred_runway(self, airport: Airport) -> RunwayData:
runways = list(RunwayData.for_pydcs_airport(airport))
for runway in runways:
# Prefer any runway with ILS.
if runway.ils is not None:
return runway
# Otherwise we lack the mission information to pick more usefully,
# so just use the first runway.
return runways[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_runway(self, airport: Union[str, \"Airport\"]) -> Optional[\"Flight\"]:\n msg = \"Use .aligned_on_runway(airport).max() instead.\"\n warnings.warn(msg, DeprecationWarning)\n\n return max(\n self.aligned_on_runway(airport),\n key=attrgetter(\"duration\"),\n default=None,\n )",
"def aligned_on_runway(\n self, airport: Union[str, \"Airport\"]\n ) -> Iterator[\"Flight\"]:\n\n from ..data import airports\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n _airport = airports[airport] if isinstance(airport, str) else airport\n if (\n _airport is None\n or _airport.runways is None\n or _airport.runways.shape.is_empty\n ):\n return None\n\n if isinstance(_airport.runways.shape, LineString):\n candidate_shapes = [\n LineString(list(self.xy_time)).intersection(\n _airport.runways.shape.buffer(5e-4)\n )\n ]\n else:\n candidate_shapes = [\n LineString(list(self.xy_time)).intersection(\n on_runway.buffer(5e-4)\n )\n for on_runway in _airport.runways.shape.geoms\n ]\n\n for intersection in candidate_shapes:\n if intersection.is_empty:\n continue\n if isinstance(intersection, LineString):\n (*_, start), *_, (*_, stop) = intersection.coords\n segment = self.between(start, stop, strict=False)\n if segment is not None:\n yield segment\n if isinstance(intersection, MultiLineString):\n (*_, start), *_, (*_, stop) = intersection.geoms[0].coords\n for chunk in intersection.geoms:\n (*_, start_bak), *_, (*_, stop) = chunk.coords\n if stop - start > 40: # crossing runways and back\n start = start_bak\n segment = self.between(start, stop, strict=False)\n if segment is not None:\n yield segment",
"def findAirport(state):\n if state == \"NSW\":\n airport = \"Sydney Airport\"\n elif state == \"VIC\":\n airport = \"Melbourne Airport\"\n elif state == \"QLD\":\n airport = \"Brisbane Airport\"\n elif state == \"TAS\":\n airport = \"Hobart Airport\"\n elif state == \"WA\":\n airport = \"Perth Airport\"\n elif state == \"SA\":\n airport = \"Adelaide Airport\"\n elif state == \"NT\":\n airport = \"Darwin Airport\"\n return airport",
"def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]",
"def takeoff_airport(self, **kwargs: Any) -> \"Airport\":\n\n from ..core.distance import guess_airport\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n data = self.data.sort_values(\"timestamp\")\n return guess_airport(data.iloc[0], **kwargs)",
"def takeoff_from_runway(\n self,\n airport: Union[str, \"Airport\"],\n threshold_alt: int = 2000,\n zone_length: int = 6000,\n little_base: int = 50,\n opening: float = 5,\n ) -> Iterator[\"Flight\"]:\n\n from ..data import airports\n\n # Access completion on Flight objects\n self = cast(\"Flight\", self).phases()\n\n _airport = airports[airport] if isinstance(airport, str) else airport\n if (\n _airport is None\n or _airport.runways is None\n or _airport.runways.shape.is_empty\n ):\n return None\n\n nb_run = len(_airport.runways.data)\n alt = _airport.altitude + threshold_alt\n base = zone_length * np.tan(opening * np.pi / 180) + little_base\n\n # Create shapes around each runway\n list_p0 = geo.destination(\n list(_airport.runways.data.latitude),\n list(_airport.runways.data.longitude),\n list(_airport.runways.data.bearing),\n [zone_length for i in range(nb_run)],\n )\n list_p1 = geo.destination(\n list(_airport.runways.data.latitude),\n list(_airport.runways.data.longitude),\n [x + 90 for x in list(_airport.runways.data.bearing)],\n [little_base for i in range(nb_run)],\n )\n list_p2 = geo.destination(\n list(_airport.runways.data.latitude),\n list(_airport.runways.data.longitude),\n [x - 90 for x in list(_airport.runways.data.bearing)],\n [little_base for i in range(nb_run)],\n )\n list_p3 = geo.destination(\n list_p0[0],\n list_p0[1],\n [x - 90 for x in list(_airport.runways.data.bearing)],\n [base for i in range(nb_run)],\n )\n list_p4 = geo.destination(\n list_p0[0],\n list_p0[1],\n [x + 90 for x in list(_airport.runways.data.bearing)],\n [base for i in range(nb_run)],\n )\n\n runway_polygons = {}\n\n for i, name in enumerate(_airport.runways.data.name):\n lat = [list_p1[0][i], list_p2[0][i], list_p3[0][i], list_p4[0][i]]\n lon = [list_p1[1][i], list_p2[1][i], list_p3[1][i], list_p4[1][i]]\n\n poly = Polygon(zip(lon, lat))\n runway_polygons[name] = poly\n\n low_traj = self.query(\n f\"(phase == 'CLIMB' or phase == 'LEVEL') and altitude < {alt}\"\n )\n\n if low_traj is None:\n return\n\n for segment in low_traj.split(\"2T\"):\n candidates_set = []\n for name, polygon in runway_polygons.items():\n if segment.intersects(polygon):\n candidate = (\n segment.cumulative_distance()\n .clip_iterate(polygon)\n .max(key=\"compute_gs_max\")\n )\n if candidate is None or candidate.shape is None:\n continue\n start_runway = candidate.aligned_on_runway(airport).max()\n\n if start_runway is not None:\n candidate = candidate.after(start_runway.start)\n if candidate is None or candidate.shape is None:\n continue\n if candidate.max(\"compute_gs\") < 140:\n continue\n\n candidates_set.append(candidate.assign(runway=name))\n\n result = max(\n candidates_set, key=attrgetter(\"duration\"), default=None\n )\n if result is not None:\n yield result",
"def runway_change(\n self,\n airport: Union[str, \"Airport\", None] = None,\n dataset: Optional[\"Airports\"] = None,\n **kwargs: Any,\n ) -> Iterator[\"Flight\"]:\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if airport is None:\n if dataset is None:\n airport = self.landing_airport()\n else:\n airport = self.landing_airport(dataset=dataset)\n\n if airport is None:\n return None\n\n aligned = iter(self.aligned_on_ils(airport, **kwargs))\n first = next(aligned, None)\n if first is None:\n return\n\n for second in aligned:\n candidate = self.between(first.start, second.stop)\n assert candidate is not None\n candidate = candidate.assign(ILS=None)\n if candidate.phases().query('phase == \"CLIMB\"') is None:\n candidate.data.loc[\n candidate.data.timestamp <= first.stop, \"ILS\"\n ] = first.max(\"ILS\")\n candidate.data.loc[\n candidate.data.timestamp >= second.start, \"ILS\"\n ] = second.max(\"ILS\")\n\n yield candidate.assign(\n airport=airport\n if isinstance(airport, str)\n else airport.icao\n )\n\n first = second",
"def landing_airport(self, **kwargs: Any) -> \"Airport\":\n\n from ..core.distance import guess_airport\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n data = self.data.sort_values(\"timestamp\")\n return guess_airport(data.iloc[-1], **kwargs)",
"def on_taxiway(\n self,\n airport_or_taxiways: Union[str, pd.DataFrame, \"Airport\", \"Overpass\"],\n *,\n tolerance: float = 15,\n max_dist: float = 85,\n ) -> Iterator[\"Flight\"]:\n\n from ..core.structure import Airport\n from ..data import airports\n\n self = cast(\"Flight\", self)\n if isinstance(airport_or_taxiways, str):\n airport_or_taxiways = airports[airport_or_taxiways]\n # This is obvious, but not for MyPy\n assert not isinstance(airport_or_taxiways, str)\n\n taxiways_ = (\n airport_or_taxiways.taxiway\n if isinstance(airport_or_taxiways, Airport)\n else airport_or_taxiways\n )\n\n # decompose with a function because MyPy is lost\n def taxi_df(taxiways_: Union[\"Overpass\", pd.DataFrame]) -> pd.DataFrame:\n if isinstance(taxiways_, pd.DataFrame):\n return taxiways_\n if taxiways_.data is None:\n raise ValueError(\"No taxiway information\")\n return taxiways_.data\n\n taxiways = ( # one entry per runway label\n taxi_df(taxiways_)\n .groupby(\"ref\")\n .agg({\"geometry\": list})[\"geometry\"]\n .apply(MultiLineString)\n .to_frame()\n )\n\n simplified_df = cast(\n pd.DataFrame, self.simplify(tolerance=tolerance).data\n )\n if simplified_df.shape[0] < 2:\n return\n\n previous_candidate = None\n first = simplified_df.iloc[0]\n for _, second in simplified_df.iloc[1:].iterrows():\n p1 = Point(first.longitude, first.latitude)\n p2 = Point(second.longitude, second.latitude)\n\n def extremities_dist(twy: MultiLineString) -> float:\n p1_proj = twy.interpolate(twy.project(p1))\n p2_proj = twy.interpolate(twy.project(p2))\n d1 = geo.distance(p1_proj.y, p1_proj.x, p1.y, p1.x)\n d2 = geo.distance(p2_proj.y, p2_proj.x, p2.y, p2.x)\n return d1 + d2 # type: ignore\n\n temp_ = taxiways.assign(dist=np.vectorize(extremities_dist))\n start, stop, ref, dist = (\n first.timestamp,\n second.timestamp,\n temp_.dist.idxmin(),\n temp_.dist.min(),\n )\n if dist < max_dist:\n candidate = self.assign(taxiway=ref).between(start, stop)\n if previous_candidate is None:\n previous_candidate = candidate\n\n else:\n prev_ref = previous_candidate.taxiway_max\n delta = start - previous_candidate.stop\n if prev_ref == ref and delta < pd.Timedelta(\"1T\"):\n previous_candidate = self.assign(taxiway=ref).between(\n previous_candidate.start, stop\n )\n\n else:\n yield previous_candidate\n previous_candidate = candidate\n\n first = second\n\n if previous_candidate is not None:\n yield previous_candidate",
"def find_nearest_airport(coords, airports):\r\n nearest_airport = None\r\n nearest_airport_dist = None\r\n for airport in airports:\r\n airport_coords = (airports[airport].get(\"lat\"), airports[airport].get(\"lon\"))\r\n dist = vincenty(airport_coords, coords).miles\r\n if nearest_airport_dist == None:\r\n nearest_airport_dist = dist\r\n nearest_airport = airport\r\n elif dist < nearest_airport_dist:\r\n nearest_airport_dist = dist\r\n nearest_airport = airport\r\n return(nearest_airport, nearest_airport_dist)",
"def get_lane(self, index: LaneIndex) -> Optional[AbstractLane]:\n o, d, i = index\n\n try:\n return self.graph[o][d][i]\n except KeyError:\n if i is None and len(self.graph[o][d]) == 1:\n return list(self.graph[o][d].values())[0]",
"def get_best_roundtrip(self):\n out = min(self.outgoing_flights, key=lambda f: f.price)\n ret = min(self.return_flights, key=lambda f: f.price)\n\n return RoundTrip(out, ret)",
"def find_airport_code_by_city(city):\n airports = get_airports()\n\n if city == 'London':\n return 'LHR'\n\n for airport_code in airports:\n if airports[airport_code].lower() == city.lower():\n return airport_code\n return None",
"def get_closest_station_by_zipcode(zipcode):\n\n station_lookup_method_by_zipcode = lookup_usaf_station_by_zipcode(zipcode)\n try:\n station, warnings, lat, lon = _get_closest_station_by_zcta_ranked(zipcode)\n\n isd_metadata = get_isd_file_metadata(str(station))\n if len(isd_metadata) == 0:\n logging.warning(\"Zipcode %s mapped to station %s, but no ISD metadata was found.\" % (zipcode, station))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedUSAFIDError as e:\n logging.warning(\"Closest station %s is not a recognized station. Using backup-method station %s for zipcode %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode,\n zipcode))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedZCTAError as e:\n logging.warning(\"Unrecognized ZCTA %s\" % e)\n return None\n\n if str(station) != station_lookup_method_by_zipcode:\n logging.debug(\"Previously would have selected station %s instead of %s for zip code %s\" % (\n station_lookup_method_by_zipcode,\n str(station),\n zipcode))\n\n if warnings:\n logging.warning(\"Station %s is %d meters over maximum %d meters (%d meters) (zip code %s is at lat/lon %f, %f)\" % (\n str(station),\n int(warnings[0].data['distance_meters'] - warnings[0].data['max_distance_meters']),\n int(warnings[0].data['max_distance_meters']),\n int(warnings[0].data['distance_meters']),\n zipcode,\n lat,\n lon,\n ))\n logging.warning(\"Closest station %s is too far. Using backup-method station %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode))\n return station_lookup_method_by_zipcode\n\n return str(station)",
"def get_less_priority_aircraft(self, scenario):\n first, second = (scenario.get_flight(self.aircrafts[0]),\n scenario.get_flight(self.aircrafts[1]))\n return (\n self.aircrafts[1]\n if first.departure_time < second.departure_time\n else self.aircrafts[0]\n )",
"def get_opt_plan(task):\n\n\tT = 15.0\n\tweights = 0\n\tif task == TABLE_TASK or task == COFFEE_TASK:\n\t\tweights = 1\n\telif task == LAPTOP_TASK:\n\t\tweights = 10\n\n\t# initialize start/goal based on task \n\tif task == COFFEE_TASK or task == HUMAN_TASK:\n\t\tpick = pick_shelf\n\telse:\n\t\tpick = pick_basic\n\n\tif task == LAPTOP_TASK:\n\t\tplace = place_higher\n\telse:\n\t\tplace = place_lower\n\t\t\n\tstartRad = np.array(pick)*(math.pi/180.0)\n\tgoalRad = np.array(place)*(math.pi/180.0)\n\tstart = startRad\n\tgoal = goalRad\n\n\tplan = Planner(task)\t\n\tplan.replan(start, goal, weights, 0.0, T, 0.1)\n\n\tplan.kill_planner()\n\treturn plan",
"def go_around(\n self,\n airport: None | str | \"Airport\" = None,\n dataset: None | \"Airports\" = None,\n **kwargs: Any,\n ) -> Iterator[\"Flight\"]:\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if airport is None:\n if dataset is None:\n airport = self.landing_airport()\n else:\n airport = self.landing_airport(dataset=dataset)\n\n if airport is None:\n return None\n\n attempts = self.aligned_on_ils(airport, **kwargs)\n # you need to be aligned at least twice on a rway to have a GA:\n if len(attempts) < 2:\n return\n\n first_attempt = next(attempts, None)\n\n while first_attempt is not None:\n after_first_attempt = self.after(first_attempt.start)\n assert after_first_attempt is not None\n\n climb = after_first_attempt.phases().query('phase == \"CLIMB\"')\n if climb is None:\n return\n\n after_climb = self.after(next(climb.split(\"10T\")).stop)\n if after_climb is None:\n return\n\n next_attempt = next(\n after_climb.aligned_on_ils(airport, **kwargs), None\n )\n\n if next_attempt is not None:\n goaround = self.between(first_attempt.start, next_attempt.stop)\n assert goaround is not None\n\n goaround = goaround.assign(\n ILS=None,\n airport=airport\n if isinstance(airport, str)\n else airport.icao,\n )\n goaround.data.loc[\n goaround.data.timestamp <= first_attempt.stop, \"ILS\"\n ] = first_attempt.max(\"ILS\")\n goaround.data.loc[\n goaround.data.timestamp >= next_attempt.start, \"ILS\"\n ] = next_attempt.max(\"ILS\")\n yield goaround\n\n first_attempt = next_attempt",
"def get_pathway(identifier, organism):\n pass",
"def _get_potential_right_way(self, lanelet):\n if lanelet.adj_right:\n if lanelet.adj_right_same_direction:\n potential_right_way = self.left_ways.get(lanelet.adj_right)\n else:\n potential_right_way = self.right_ways.get(lanelet.adj_right)\n if potential_right_way:\n adj_right = self.lanelet_network.find_lanelet_by_id(lanelet.adj_right)\n vertices = (\n adj_right.left_vertices\n if lanelet.adj_right_same_direction\n else adj_right.right_vertices[::-1]\n )\n if _vertices_are_equal(lanelet.right_vertices, vertices):\n return potential_right_way\n\n return None",
"def airport_info(airport_code):\n\n r = requests.get(\"{}AirportBoards\".format(FA_ENDPOINT), auth=(USERNAME,FA_KEY), params={\n \"airport_code\":airport_code,\n \"type\":\"departures\",\n \"howMany\": 100\n })\n\n return r",
"def choose_trial_to_run(self, trial_runner):\n\n candidates = []\n for trial in trial_runner.get_trials():\n if trial.status in [Trial.PENDING, Trial.PAUSED] and \\\n trial_runner.has_resources(trial.resources):\n candidates.append(trial)\n candidates.sort(\n key=lambda trial: self._trial_state[trial].last_perturbation_time)\n return candidates[0] if candidates else None",
"def best_route(self, player: Optional[Pt] = None, goal: Optional[Pt] = None):\n best = empty_path()\n\n if player is None and goal is None:\n diff = self._goal - self._player\n else:\n diff = goal - player\n\n horz = diff.x // self.PLAYER_DIM\n vert = diff.y // self.PLAYER_DIM\n\n if vert < 0:\n best[\"UP\"] = abs(vert)\n elif vert > 0:\n best[\"DOWN\"] = vert\n if horz > 0:\n best[\"RIGHT\"] = horz\n elif horz < 0:\n best[\"LEFT\"] = abs(horz)\n\n return best",
"def get_matching_war(self, clan, war=None):\n\n if war and war.date_start <= self.time <= war.date_end:\n return war\n\n try:\n return ClanWar.objects.get(\n clan=clan,\n date_start__lte=self.time,\n date_end__gte=self.time\n )\n except ClanWar.DoesNotExist:\n return None\n except ClanWar.MultipleObjectsReturned:\n return None",
"def get_airplane_state(self, airplane_instance, chosen_time_and_date):\r\n\r\n chosen_airplane = airplane_instance\r\n voyages_list = self.ioAPI.load_all_voyages() # List of all voyages\r\n airplane_state = \"IDLE\" # initializes the airplane state at IDLE\r\n NOW = datetime.datetime.fromisoformat(chosen_time_and_date)\r\n\r\n for voyage in voyages_list:\r\n voyage_plane = voyage.get_plane_id()\r\n\r\n departure_out = datetime.datetime.fromisoformat(voyage.get_departure_out())\r\n arrival_out = datetime.datetime.fromisoformat(voyage.get_arrival_out())\r\n departure_home = datetime.datetime.fromisoformat(voyage.get_departure_home())\r\n arrival_home = datetime.datetime.fromisoformat(voyage.get_arrival_home())\r\n available = arrival_home + datetime.timedelta(hours = 1)\r\n \r\n if voyage_plane == chosen_airplane:\r\n if departure_out <= NOW and arrival_home > NOW:\r\n break \r\n else:\r\n return airplane_state\r\n \r\n if departure_out <= NOW and NOW <= arrival_out:\r\n airplane_state = \"Flight {} is on its way to {} and will be available again on: {}\".format(voyage.get_flight_number_out(), voyage.get_dest_id(), available)\r\n elif departure_home <= NOW and NOW <= arrival_home:\r\n airplane_state = \"Flight {} is on its way to KEF and will be available again on: {}\".format(voyage.get_flight_number_back(), available)\r\n elif arrival_out <= NOW and NOW <= departure_home:\r\n airplane_state = \"IN INTERMISSION\" \r\n\r\n return airplane_state",
"def lookup_active_flight(options):\n if 'vehicle' not in options:\n vehicle = get_default_vehicle()\n options['vehicle'] = vehicle.name\n else:\n vehicle = get_vehicle(options['vehicle'])\n flight = getActiveFlight(vehicle)\n if not flight:\n options['flight'] = None\n else:\n options['flight'] = flight.name\n return flight",
"def opt_road(player, board, building_vertex):\n player_buildings = board.get_player_settlements(player.player_id) + board.get_player_cities(player.player_id)\n player_roads = board.get_player_roads(player.player_id)\n accessible_vertices = sorted(set(player_buildings+ [v for pair in player_roads for v in pair]), \\\n key = lambda v: manhattan_distance(v,building_vertex,board))\n if building_vertex in accessible_vertices:\n print(\"Error: Building vertex already accessible, do not need road.\")\n return None, None\n for v in accessible_vertices:\n neighbor_vertices = []\n x,y = board.get_vertex_location(v)\n for dx, dy in [[0,1],[0,-1],[1,0],[-1,0]]:\n xx = x + dx\n yy = y + dy\n if board.get_vertex_number(xx,yy) in range(board.max_vertex+1):\n neighbor_vertices.append(board.get_vertex_number(xx,yy))\n neighbor_vertices = sorted(neighbor_vertices, key = lambda v: manhattan_distance(v,building_vertex,board))\n for n in neighbor_vertices:\n if board.if_can_build_road(v, n, player.player_id):\n v_t = list(board.get_vertex_location(v))\n n_t = list(board.get_vertex_location(n))\n return v_t, n_t\n print(\"need to implement default behavior\")\n return None,None",
"def get_floor_plan(port_id):\n url = 'https://api.archisketch.com/v1/public/projects/'\n response = requests.get(url + port_id + '/detail')\n response = response.json()['project']\n floor_plan = response['floorplans'][0]\n return floor_plan",
"def preferred_disks(self) -> Optional[Mapping[str, int]]:\n return pulumi.get(self, \"preferred_disks\")",
"def stairway_path(stairway: Sequence[Union[float, int]]) -> Union[float, int]:\n # определяем начальный список стоимости перехода по ступенькам\n sum_cost = [0 for _ in range(len(stairway))]\n sum_cost[0] = stairway[0] # стоимость шага на 1 ступеньку\n sum_cost[1] = stairway[1] # стоимость шага на 2 ступеньку\n\n for i in range(2, len(stairway)): # цикл начиная со 2 ступеньки и до конца лестницы\n # соимость перехода = стоимость на i-ой ступ. + минимум стоимости(переход с i-2, переход с i-1)\n sum_cost[i] = stairway[i] + min(sum_cost[i-2], sum_cost[i-1])\n\n return sum_cost[-1] # возвращаем последний результат в списке",
"def getBestPath(self):\n if self._bestPathVertex.getNextWaypoint() is None:\n numWaypointsCompleted = len(self._waypoints)\n quality = 2\n if self._vertexQueue.isEmpty():\n quality += 1\n else:\n numWaypointsCompleted = self._bestPathVertex.getNextWaypoint().getIndex()\n quality = 1\n if self._vertexQueue.isEmpty():\n quality -= 1\n \n return outputPath.generatePath(self._bestPathVertex, self._params.waypointAcceptanceRadii, quality, numWaypointsCompleted)"
] | [
"0.6588333",
"0.5721019",
"0.5678698",
"0.54820865",
"0.5445117",
"0.5388726",
"0.5355984",
"0.51527876",
"0.50692517",
"0.49168152",
"0.49161312",
"0.48849434",
"0.4864975",
"0.48553964",
"0.47927576",
"0.47726896",
"0.475217",
"0.47335708",
"0.47126895",
"0.4672348",
"0.45865738",
"0.45731226",
"0.4507251",
"0.44981074",
"0.44912243",
"0.448457",
"0.4470083",
"0.44597727",
"0.44489044",
"0.44454935"
] | 0.8533307 | 0 |
prune out entries with lowest weight. | def _prunelowestweight(self):
# note: must be called with acquired self._lock!
numentries = len(self._dict)
if numentries >= self.maxentries:
# evict according to entry's weight
items = [(entry.weight, key) for key, entry in self._dict.iteritems()]
items.sort()
index = numentries - self.prunenum
if index > 0:
for weight, key in items[:index]:
del self._dict[key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_heavier_than(self, w):\n G = nx.DiGraph()\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] <= w:\n G.add_edge(u, v, weight=self.graph[u][v][\"weight\"], alignment=self.graph[u][v][\"alignment\"])\n self.graph = G",
"def filter_by_weight(self, w):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] >= w:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break",
"def limit_weight(self, weight_max):\n # remove items with low values\n if self.total_weight > weight_max:\n items_sorted_by_fitness = sorted(self.items, key=lambda item: item.fitness, reverse=False)\n while items_sorted_by_fitness and self.total_weight > weight_max:\n least_fit_item = items_sorted_by_fitness.pop(0)\n if self.item_stats[least_fit_item.id] == 1:\n self.item_stats[least_fit_item.id] = 0\n self.update_values() # have to update each time an item is change to recompute weight",
"def trim_edges(g, weight=1):\n g2 = nx.Graph()\n for fnode, tonode, edgedata in g.edges(data=True):\n if edgedata[\"weight\"] > weight:\n g2.add_edge(fnode, tonode, **edgedata)\n return g2",
"def eliminate(self):\n deleteKey = []\n for key,value in self._sets[self._currentSet].items():\n if value < self._minSupport:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._sets[self._currentSet][key]",
"def remove_lowfee(entries, feethresh):\n # Build a dependency map\n depmap = defaultdict(list)\n for txid, entry in entries.items():\n for dep in entry.depends:\n depmap[dep].append(txid)\n removed = set()\n for txid, entry in entries.items():\n if entry.feerate < feethresh:\n removelist = [txid]\n while removelist:\n txid_remove = removelist.pop()\n if txid_remove in removed:\n continue\n removed.add(txid_remove)\n removelist.extend(depmap[txid_remove])\n return {txid: entry for txid, entry in entries.items()\n if txid not in removed}",
"def sortByWeight(xs):\n xs = [x for x in xs if len(x.reshape(-1)) > 0]\n return list(sorted(xs, key=lambda x: (x > 0).sum()))",
"def filter_non_one(self):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] == 1:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def _reweight_and_discard_irrelevant(self, weighted_sample_pool, t):\n tmp = []\n ret = []\n wc = self.classifiers[t]\n theta_a = wc.theta_a\n theta_b = wc.theta_b\n\n norm_factor = 0\n discarded = 0\n for patch, w in weighted_sample_pool:\n response = self.h_t(patch, t)\n # if t > 3:\n # if response < theta_a or response > theta_b: # throw it away\n # discarded += 1\n # continue\n r = self.classify(patch)\n label = patch.label\n new_weight = w * np.exp(-label * r)\n\n tmp.append([patch, new_weight])\n norm_factor += new_weight\n for patch, w in tmp: # normalize weights\n normalized_weight = w / norm_factor\n ret.append([patch, normalized_weight])\n print \"Discarded %d training samples\" % discarded\n return ret",
"def prune(candidate_aspect_list, min_sup):\n l_k = deepcopy(candidate_aspect_list)\n for key, value in list(l_k.items()):\n if value < min_sup:\n del l_k[key]\n return l_k",
"def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the\n # first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]",
"def prune(self):\n self.sort(key=lambda chunk: chunk.probability)\n del self[:-self.model.num_parses]",
"def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]",
"def DropSmallEntries(self, tol):\n return _hypre.HypreParMatrix_DropSmallEntries(self, tol)",
"def prune(read_objects):\r\n\t\timport numpy as np\r\n\t\tbest_reads = []\r\n\t\tread_name_reads = {}\r\n\t\tfor read in read_objects:\r\n\t\t\t\tif read.name in read_name_reads:\r\n\t\t\t\t\t\tread_name_reads[read.name].append(read)\r\n\t\t\t\telse:\r\n\t\t\t\t\t\tread_name_reads[read.name] = [read]\r\n\t\tfor read_name, read_objects in read_name_reads.items():\r\n\t\t\t\te_scores = [read.e_score for read in read_objects]\r\n\t\t\t\ti = np.argmin(e_scores)\r\n\t\t\t\tbest_reads.append(read_objects[i])\r\n\t\treturn best_reads",
"def exclude_minimal_triforce_hunt(weight_dict, random_settings):\n weights = weight_dict['item_pool_value']\n if 'minimal' in weights.keys() and random_settings['triforce_hunt'] == \"true\":\n weights.pop('minimal')\n random_settings['item_pool_value'] = random.choices(list(weights.keys()), weights=list(weights.values()))[0]",
"def _remove_key(self):\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heaps.pop(keys)",
"def prune(self, alignment_infos):\n alignments = []\n best_score = 0\n\n for alignment_info in alignment_infos:\n score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self)\n best_score = max(score, best_score)\n alignments.append((alignment_info, score))\n\n threshold = IBMModel5.MIN_SCORE_FACTOR * best_score\n alignments = [a[0] for a in alignments if a[1] > threshold]\n return set(alignments)",
"def trim(self):\n while np.any(self.vertex_valance <= 1):\n edge_to_keep = np.all(self.vertex_valance[self.edges] > 1,\n axis=1).tolist();\n self.raw_wires.filter_edges(edge_to_keep);\n vertex_to_keep = [len(self.get_vertex_neighbors(i)) > 0 for i in\n range(self.num_vertices)];\n self.raw_wires.filter_vertices(vertex_to_keep);\n\n self.__initialize_wires();\n if len(self.vertices) == 0:\n raise RuntimeError(\"Zero vertices left after trimming.\");",
"def sort_weight(self):\n self._elements = list(\n _[-1] for _ in sorted((e.weight, e.value, e) for e in self)\n )",
"def cut_ppl_off(self, G):\r\n for pre, node in list(G.edges):\r\n ew = G.edges[pre, node]['weight']\r\n if ew <= -.95:\r\n G.remove_edge(pre, node)\r\n elif ew >= 1:\r\n G.edges[pre, node]['weight'] = 1.0\r\n else:\r\n continue\r\n return G",
"def prune_trie(trie, threshold):\n\tnode = trie.root\n\tpq = []\n\tfor i in node.children.keys():\n\t\tpq.append((node.children[i],node.children[i].char))\n\twhile len(pq) > 0:\n\t\tcur_node, char = pq.pop()\n\t\tif cur_node.isEnd == False:\n\t\t\tfor i in cur_node.children.keys():\n\t\t\t\tpq.append((cur_node.children[i],char + cur_node.children[i].char))\n\t\telse:\n\t\t\tif cur_node.weight < threshold:\n\t\t\t\tdelete(trie, char)\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn trie",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)",
"def eliminateRules(self):\n deleteKey = []\n for key,value in self._rules.items():\n if value[0] < self._minConfidence:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._rules[key]",
"def prune_weights(model, fraction):\n weights = model.get_weights()\n\n def prune_weight_matrix(weight_matrix):\n # Copy the weights so we don't modify the original network.\n weight_matrix = np.copy(weight_matrix)\n flat_weight_matrix = np.reshape(weight_matrix, (-1,))\n kth = int(len(flat_weight_matrix) * fraction)\n # Determine the k least relevant weights using np.argpartition.\n indices = np.argpartition(np.abs(flat_weight_matrix), kth)\n # Prune them.\n flat_weight_matrix[indices[:kth]] = 0\n weight_matrix = np.reshape(flat_weight_matrix, weight_matrix.shape)\n return weight_matrix\n\n weights[:-1] = list(map(prune_weight_matrix, weights[:-1]))\n\n (_, n_classes) = weights[-1].shape\n # Create a pruned model.\n return create_model(\n LAYER_SIZES,\n n_classes,\n layer_fn=Sparse,\n layer_kwargs_fn=sparse_kwargs,\n weights=weights,\n )",
"def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)",
"def prune(self, min_freq):\n new_forward = {}\n new_backward = [\"OOV\"]\n new_freq = [0]\n j = 1\n for i in xrange(1,len(self.backward)):\n f = self.backward[i]\n if self.freq[i] >= min_freq:\n new_forward[f] = j\n new_backward.append(f)\n new_freq.append(self.freq[i])\n j += 1\n self.forward = new_forward\n self.backward = new_backward\n self.freq = new_freq\n self.counter = j",
"def remove_min(self):\n p = self._find_min()\n item = self._data.delete(p)\n return (item._key, item._value)",
"def removeLowFreqWords(self, words, minFreq):\n\t\tfrequency = defaultdict(int)\n\t\tfor word in words:\n\t\t\tfrequency[word] += 1\n\t\tremoved = [word for word in words if frequency[word] > minFreq]\t\t\n\t\treturn removed"
] | [
"0.6713875",
"0.668257",
"0.66254586",
"0.6615816",
"0.6482034",
"0.64277047",
"0.6270732",
"0.6145243",
"0.6112046",
"0.6069026",
"0.6012259",
"0.6003529",
"0.59894925",
"0.59878355",
"0.5927412",
"0.5920374",
"0.5901654",
"0.58522505",
"0.58449197",
"0.5830802",
"0.5825418",
"0.5769159",
"0.5735446",
"0.57276756",
"0.56944287",
"0.5686511",
"0.56834525",
"0.56594086",
"0.5651333",
"0.5628791"
] | 0.8380892 | 0 |
Prints all of the contacts in a specified path. | def print_all(self):
with open(self.file, 'r', encoding='utf-8') as self.contacts_file:
for i in self.contacts_file.readlines():
print(i) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)",
"def view_contacts(self):\n with open(self.filename, \"r\") as contactsFile:\n contacts = self.display_contact(contactsFile.readlines())\n\n if not contacts:\n return self.msgbox(\"No contacts found.\")\n\n self.msgbox(msg=\"\\n\".join(contacts), title=\"Showing All Contacts\")",
"def do_show(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tprint(\"Contacts in the current book\\n\")\n\t\t\tself.cl.list_contacts()\n\t\telse:\n\t\t\tprint(\"To see contacts you need to open or create book\")",
"def do_show(self, line):\n\t\tif not(self.db is None):\n\t\t\tfor contact in self.db.contact.find():\n\t\t\t\tpprint.pprint(contact)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")",
"def show_contacts():\n data_list = queries2.contacts()[0]\n table_titles = queries2.contacts()[1]\n title = \"Contacts\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)",
"def list_contact(name):\n db = get_db()\n name = hashlib.sha256(name).hexdigest()\n \n if name in db:\n info = db[name]\n print logger.ok(\"\"\"\n Contact Information:\n Name: %s\n Phone Number: %s\n Email Address: %s\n \"\"\" % (info['name'], info['phone'], info['email']))\n else:\n sys.exit(logger.fail('fatal: contact does not exist'))",
"def paths_print(atree):\n\n l = atree.pathFromHere_explore('/')\n for d in l:\n print(d)",
"def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result",
"def phone_dir_nav():\n\n emps = Employee.query.all()\n\n for emp in emps: # [<Emp>, <Emp>]\n if emp.dept is not None:\n print(emp.name, emp.dept.dept_code, emp.dept.phone)\n else:\n print(emp.name, \"-\", \"-\")",
"def contacts():\n return render_template(\n \"contacts.html\",\n title = \"Contacts\")",
"def list_contacts(self, prefix):\n sub_trie = self.find(prefix.lower())\n _crawl_trie(sub_trie, prefix)",
"def get_contacts(userid):\n return 'get contacts - ' + userid",
"def list_contacts(self):\n return self.contacts",
"def do_print_routes(self, line=''):\n self.fibbing.print_routes()",
"def Run(self):\n return self.ListAllContacts()",
"def view(args):\n print(\"List of all available phonebooks:\")\n for file in glob.glob(\"*.ph\"):\n print(file)",
"def printPath(path):\n result = ''\n for i in range(len(path)):\n result = result + str(path[i])\n if i != len(path) - 1:\n result = result + '->'\n return result",
"def display_path(self, path):\n graph = path.graph\n if not graph:\n return\n for v in sorted(graph.vertices()):\n p = graph.get_vertex_attribute(v, 'xy')\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('define v{} ellipse 2 2 c_vertex {} {}'.format(v, x, y))\n #print('define v{0}t text {0} 14 white {1} {2}'.format(v, x, y))\n for u, v in graph.edges():\n print('define - link v{} v{} 1 c_edge'.format(u, v))\n # NOTE: this code assumes paths will not move indefinitely\n print('fix /./')",
"def printPath(path):\n result =''\n for i in range(len(path)):\n result = result + str(path[i])\n if i != len(path) -1:\n result = result + '->'\n return result",
"def printPath(path):\r\n result = ''\r\n for i in range(len(path)):\r\n result = result + str(path[i])\r\n if i != len(path) - 1:\r\n result = result + '->'\r\n return result",
"def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts",
"def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts",
"def getallcontacts(self):\n feed_url = self.contacts_client.GetFeedUri(projection='full')\n total_read = 0\n while True:\n print('Retrieving contacts... (%d retrieved so far)' % total_read)\n feed = self.contacts_client.get_feed(uri=feed_url,\n auth_token=None,\n desired_class=gdata.contacts.data.ContactsFeed)\n total_read += len(feed.entry)\n for entry in feed.entry:\n yield entry\n next_link = feed.GetNextLink()\n if next_link is None:\n print('All contacts retrieved: %d total' % total_read)\n break\n feed_url = next_link.href",
"def list_contact(self, key, value):\n self.db.list_contact(\n key,\n value,\n )",
"def get_contacts():\n return jsonify(g.driver.get_contacts())",
"def do_fsearch(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tst = time()\n\t\t\tprint(\"\\nSearch results for: \",str(line))\n\t\t\tfor i in self.cl.fsearch_contact(str(line)):\n\t\t\t\tprint(i)\n\n\t\t\tprint('Time elapsed: {:10f}'.format(time()-st))\n\t\telse:\n\t\t\tprint(\"To search contacts you need to open or create a book.\")",
"def simple_contacts(filename):\n\n try:\n file_path = open(filename, 'r', encoding='utf-8')\n\n except FileNotFoundError:\n pretty_print(\"Cannot open contacts.txt\", \":\")\n sleep(3)\n\n else:\n with file_path:\n print_list = []\n email_dict = {}\n for line in file_path:\n split_line = line.strip().split('|')\n\n if split_line[0].isnumeric():\n\n command = int(split_line[0])\n email = split_line[-1]\n print_list.append(split_line)\n email_dict[command] = email\n\n return print_list, email_dict",
"def get_all_contacts(self,\n hook,\n resource,\n data=None,\n headers=None,\n extra_options=None):\n all_pages = []\n total_contacts = -1\n next_token = None\n\n while len(all_pages) != total_contacts:\n if not next_token:\n result = hook.run('{}/contacts'.format(resource),\n data,\n headers,\n extra_options).json()\n else:\n result = hook.run('{}/contacts/{}'.format(resource, next_token),\n data,\n headers,\n extra_options).json()\n\n all_pages += result.get('contacts', None)\n\n total_contacts = result.get('total_contacts', None)\n\n if 'bookmark' in result:\n next_token = result.get('bookmark', None)\n\n return all_pages",
"def get_contacts():\n # Parse command line options\n try:\n opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'password='])\n except getopt.error, msg:\n print 'python contacts_example.py --user [username] --password [password]'\n sys.exit(2)\n user = ''\n password = ''\n # Process options\n for option, arg in opts:\n if option == '--user':\n user = arg\n elif option == '--password':\n password = arg\n\n while not user:\n print 'NOTE: Please run these tests only with a test account.'\n user = raw_input('Please enter your username: ')\n while not password:\n password = getpass.getpass()\n if not password:\n print 'Password cannot be blank.'\n try:\n contacts = GoogleContacts(user, password)\n except gdata.client.BadAuthentication:\n print 'Invalid user credentials given.'\n exit(1)\n contacts_list = contacts.Run()\n return contacts_list",
"def print_paths(self):\n for path_key, path_value in self.paths.items():\n # Handler for request in path\n self.current_path = path_key\n for request_key, request_value in path_value.items():\n if request_key == 'parameters':\n continue\n self.get_main_title(path_key, request_key)\n self.get_description(request_value)\n self.get_status_code_and_schema_rst(request_value['responses'])\n self.get_params(path_value['parameters'], 'param')\n self.get_params(request_value['parameters'], 'query')"
] | [
"0.67285544",
"0.6681615",
"0.6376812",
"0.60674775",
"0.58926827",
"0.58382577",
"0.5805111",
"0.5754822",
"0.57326335",
"0.56835014",
"0.5655461",
"0.56480414",
"0.5593876",
"0.55586046",
"0.5556061",
"0.5505184",
"0.5464751",
"0.54598343",
"0.54571724",
"0.5447254",
"0.54190886",
"0.53948087",
"0.53764606",
"0.532036",
"0.5268163",
"0.52350944",
"0.52344155",
"0.5219457",
"0.5192159",
"0.5186855"
] | 0.6947206 | 0 |
Takes full name as input and returns the one contact | def pull_one_contact(self, name):
contact = []
for x in self.contacts:
if x[0] == name:
contact_name = x[0]
number = x[1]
email = x[2]
zipcode = x[3]
contact = [contact_name, number, email, zipcode]
print(contact)
return contact, self.contacts.index(x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_contact_first_name(app):\n name = app.get(CONTACT_NAME_KEY)\n if name:\n return ' {}'.format(name.split(' ')[0])",
"def contact_full_name(self):\n first = self.contact_first_name\n last = self.contact_last_name\n if first and last:\n return f'{first} {last}'\n return first or last",
"def getContactByName(self, name):\n for contact in self.contacts:\n if name == contact.name:\n return contact\n\n return None",
"def full_name(first_name, last_name):\n\t\n\treturn first_name + \" \" + last_name",
"def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result",
"def get_or_create_by_full_name(self, full_name):\n tokens = full_name.rsplit(' ', 1)\n if len(tokens) == 2:\n return self.get_or_create(first_name=tokens[0], last_name=tokens[1])\n else:\n return self.get_or_create(first_name=tokens[0])",
"def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")",
"def full_name(first_name, last_name):\n return first_name + \" \" + last_name",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def find_person(name):\n if ' ' in name:\n name = name.replace(',', '')\n else:\n return None\n\n try:\n (first, last) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n try:\n (last, first) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n return None",
"def full_name(self,first_name):\n full_name = self.first_name + ' ' + self.last_name\n return full_name",
"def find_mentor_by_full_name(self, full_name):\n name_list = full_name.split(\" \")\n for mentor in self.mentors:\n if mentor.first_name == name_list[0]:\n if mentor.last_name == name_list[1]:\n print(mentor.first_name, mentor.last_name + \" mentor found in our class.\")\n return mentor\n print(full_name + \" mentor is not in our class.\")\n return False",
"def get_full_name(self):\n\t\treturn self.email",
"def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()",
"def get_full_name(self):\n full_name = \"%s %s\" % (self.firstname, self.lastname)\n return full_name.strip()",
"def return_street(streetname):\r\n if streetname == None:\r\n return streetname\r\n if streetname.split(\" \")[-1] in valid_suffix:\r\n return \" \".join(str(streetname).split(\" \")[:-1])\r\n\r\n return streetname",
"def find_student_by_full_name(self, full_name):\n name_list = full_name.split(\" \")\n for student in self.students:\n if student.first_name == name_list[0]:\n if student.last_name == name_list[1]:\n print(student.first_name, student.last_name + \" found in our class.\")\n return student\n print(full_name + \" student is not in our class.\")\n return False",
"def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()",
"def fetch_full_name_from_people(self):\n url = 'https://people.djangoproject.com/search/?q={0}'.format(self.full_name.replace(\" \", \"+\"))\n request = requests.get(url)\n soup = BeautifulSoup(request.content)\n vcards = soup.findAll(\"li\", { \"class\" : \"vcard\" })\n if len(vcards) == 1:\n for vcard in soup.findAll(\"li\", { \"class\" : \"vcard\" }):\n people_username = vcard.findAll(\"a\", { \"class\" : \"url fn n\" })[0].attrs['href'].strip(\"/\")\n if self.get_existing_speaker_by_people(people_username):\n self = self.get_existing_speaker_by_people(people_username)\n self.people = people_username\n self.photo = soup.findAll(\"img\", { \"class\" : \"main photo\" })[0].attrs['src']\n self.prenom = soup.findAll(\"span\", { \"class\" : \"given-name\" })[0].renderContents()\n self.save()\n elif len(vcards) == 0:\n return False\n elif len(vcards) > 1:\n raise Exception(\"{0} results found! No records created.\"\n \"\".format(len(vcards)))",
"def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()",
"def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()",
"def get_full_name(self):\n return self.last_name + self.first_name",
"def _get_full_name(self):\n if self.middle_name:\n return u'%s %s %s' % (self.first_name, self.middle_name,\n self.last_name)\n else:\n return u'%s %s' % (self.first_name, self.last_name)",
"def get_full_name(self):\n return self.name+self.last_name",
"def get_contact(self, object_name, user_key = None):\n\t\treturn self.get_object('contact',object_name, user_key = user_key)",
"def get_short_name(self):\n last_name = self.last_name\n first_name = self.first_name\n if (not (last_name and not last_name.isspace())):\n \"\"\" If last name is empty or none then return first name\"\"\"\n return first_name\n else:\n return last_name",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()"
] | [
"0.70435905",
"0.6775905",
"0.6767916",
"0.64946306",
"0.6442939",
"0.6439752",
"0.64186746",
"0.6342796",
"0.6310614",
"0.6242107",
"0.6131929",
"0.611",
"0.60856724",
"0.6079963",
"0.6060275",
"0.6046511",
"0.6039928",
"0.6009768",
"0.60023797",
"0.59919226",
"0.59919226",
"0.59753656",
"0.5969047",
"0.5925496",
"0.5918429",
"0.589666",
"0.58957285",
"0.58957285",
"0.58957285",
"0.58957285"
] | 0.6841914 | 1 |
Sorts the contact book by the name or zipcode of the contacts and displays the contact book in ascending or descending order | def sort_contacts(self, method, order):
method_l = method.lower()
order_l = order.lower()
if method_l == 'name' and order_l == 'asc':
name_sort = sorted(self.contacts, key=lambda x: x[0])
for x in name_sort:
print(x)
return name_sort
elif method_l == 'name' and order_l == 'desc':
name_sort = sorted(self.contacts, key=lambda x: x[0], reverse=True)
for x in name_sort:
print(x)
return name_sort
elif method_l == 'zipcode' and order_l == 'asc':
zip_sort = sorted(self.contacts, key=lambda y: y[3])
for x in zip_sort:
print(x)
return zip_sort
elif method_l == 'zipcode' and order_l == 'desc':
zip_sort = sorted(self.contacts, key=lambda y: y[3],reverse=True)
for x in zip_sort:
print(x)
return zip_sort | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort_contacts(contacts):\n \n key_list = list(contacts.keys()) #get keys\n key_list.sort() #sort key_list\n sorted_list = [] #initialize sorted list\n for key in key_list:\n contact = (key, contacts[key][0], contacts[key][1]) #create tuple\n sorted_list += [contact] #add tuple to list\n \n return(sorted_list)",
"def sort_list(self, key_):\n options = {\n 'index': 0,\n 'name' : 1,\n 'surname': 2,\n 'email': 3,\n 'phone': 4,\n }\n if key_ in options.keys():\n key_ = options.get(key_)\n\n return(sorted(self.contacts, key = lambda x: x[key_]))",
"def display(phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n for name in sorted(phonebook_data.keys(), key=str.lower):\n print name, phonebook_data[name]",
"def getSorted(self):\n return sorted(self.contacts)",
"def sort_books(self):\n self.foods = sorted(self.foods, key=attrgetter(\"name\"))",
"def sort_entries(self):\n if not len(self.student_list):\n print('There is no contents to sort')\n return\n\n opt = self.input_options(['n', 'a', 'g'], 1, 'Sort by name(n) or average(a) or grade(g)')\n if opt.upper() == 'N':\n self.print_dataframe(self.student_list.sort_values(by=['name', 'average'], ascending=[True,False]))\n elif opt.upper() == 'A' or opt.upper() == 'G':\n self.print_dataframe(self.student_list.sort_values(by=['average', 'name'], ascending=[False,True]))",
"def selection_sort(book_array, sorting):\r\n if sorting == \"author\":\r\n for i in range(len(book_array)):\r\n min_index = i\r\n\r\n for b in range(i + 1, len(book_array)):\r\n author1 = str(book_array[min_index].author)\r\n author2 = str(book_array[b].author)\r\n author1 = author1.split(\" \")\r\n author2 = author2.split(\" \")\r\n if author1[len(author1) - 1] > author2[len(author2) - 1]:\r\n min_index = b\r\n\r\n book_array[i], book_array[min_index] = book_array[min_index], book_array[i]\r\n\r\n elif sorting == \"title\":\r\n for i in range(len(book_array)):\r\n min_index = i\r\n\r\n for b in range(i + 1, len(book_array)):\r\n title1 = str(book_array[min_index].title)\r\n title2 = str(book_array[b].title)\r\n title1 = title1.split(\" \")\r\n title2 = title2.split(\" \")\r\n if title1[0] == \"The\" and title2[0] == \"The\":\r\n if str(title1[1]) > str(title2[1]):\r\n min_index = b\r\n elif title1[0] == \"The\" and title2[0] != \"The\":\r\n if str(title1[1]) > str(book_array[b].title):\r\n min_index = b\r\n elif title1[0] != \"The\" and title2[0] == \"The\":\r\n if str(book_array[min_index].title) > str(title2[1]):\r\n min_index = b\r\n elif str(book_array[min_index].title) > str(book_array[b].title):\r\n min_index = b\r\n\r\n book_array[i], book_array[min_index] = book_array[min_index], book_array[i]",
"def sort_records_by_name(records):\n return sorted(records, key=lambda x: (x.last_name, x.first_name), reverse=True)",
"def contact_sorts(self):\n return self._contact_sorts",
"def ordered_list_by_first_name(d):\n\n ordered_d = dict(sorted(d.items(), key=lambda x: x[1]['First name']))\n\n print(\"\\nPhone book ordered by first name: \")\n for pid, info in ordered_d.items():\n print('\\nPerson number: ', pid)\n for key in info:\n print(key + ':', info[key])",
"def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)",
"def sortby(self):\n ...",
"def sort_roster(sorting_argument):\r\n if sorting_argument == 'Name':\r\n StudentRoster.sort(key=lambda x: x[0])\r\n find_duplicate(StudentRoster)\r\n elif sorting_argument == 'Assignment':\r\n StudentRoster.sort(reverse=True, key=lambda x: x[2])\r\n for student in StudentRoster:\r\n print(student_format(student))\r\n elif sorting_argument == 'Exam':\r\n StudentRoster.sort(reverse=True, key=lambda x: x[3])\r\n for student in StudentRoster:\r\n print(student_format(student))",
"def show_contacts():\n data_list = queries2.contacts()[0]\n table_titles = queries2.contacts()[1]\n title = \"Contacts\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)",
"def view_contacts(self):\n with open(self.filename, \"r\") as contactsFile:\n contacts = self.display_contact(contactsFile.readlines())\n\n if not contacts:\n return self.msgbox(\"No contacts found.\")\n\n self.msgbox(msg=\"\\n\".join(contacts), title=\"Showing All Contacts\")",
"def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result",
"def contact_sorts(self, contact_sorts):\n \n self._contact_sorts = contact_sorts",
"def _sort_by_name(bam_fn):",
"def ordered_list_by_last_name(d):\n\n ordered_d = dict(sorted(d.items(), key=lambda x: x[1]['Last name']))\n\n print(\"\\nPhone book ordered by last name: \")\n for pid, info in ordered_d.items():\n print('\\nPerson number: ', pid)\n for key in info:\n print(key + ':', info[key])",
"def sort_results(self):\n pass",
"def archive_contacts(self, contacts):\n self._post('contact_actions', None, self._build_params(contacts=contacts, action='archive'))",
"def contacts():\n return render_template(\n \"contacts.html\",\n title = \"Contacts\")",
"def sort(request, code):\n filtered = Listing.objects.filter(category=code, is_active=True)\n\n # Returns page with variables. \n return render(request, \"auctions/categories.html\", {\n \"bo\": True,\n \"listings\": filtered,\n })",
"def order(self, searcher, docnums, reverse = False):\n raise NotImplementedError",
"def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))",
"def sort(self):\n self.cards.sort()",
"def sort(self):\n self.cards.sort()",
"def sort_by_name(self, reverse=False):\n self.sort_by(\"name\", reverse=reverse)",
"def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()",
"def get_names(book, phone):\n # поиск в словаре\n i_min = 0\n i_max = len(book)\n i = math.ceil(i_max / 2)\n\n while book[i][0]!=phone:\n #print(i, i_min, i_max, phone, book[i][0])\n #input()\n\n if book[i][0]==phone:\n return book[i][1]\n\n elif book[i][0] < phone:\n i_min = i\n i = i_min + math.ceil((i_max - i_min) / 2)\n\n elif book[i][0] > phone:\n i_max = i\n i = i_min + math.ceil((i_max - i_min) / 2)\n else:\n print(\"что-то пошло не так\")\n return None\n\n if i==i_min or i==i_max:\n return None\n\n if book[i][0]==phone:\n return book[i][1]\n\n return None"
] | [
"0.6573599",
"0.6458184",
"0.6305928",
"0.597216",
"0.5893325",
"0.57316965",
"0.5590592",
"0.5550864",
"0.5550434",
"0.5493951",
"0.54850143",
"0.54358536",
"0.5435454",
"0.5424167",
"0.5414283",
"0.5410285",
"0.54061854",
"0.53835547",
"0.52583706",
"0.5231712",
"0.5225571",
"0.5222699",
"0.5194154",
"0.51886487",
"0.51883656",
"0.5163857",
"0.5163857",
"0.5146335",
"0.5135334",
"0.5132775"
] | 0.69229335 | 0 |
uses images2gif.py to turn all png images in a folder into an animated GIF | def animated_gif(folder_with_images, gif_filename, loop_duration, size):
os.chdir(folder_with_images) # changes directory to the folder with the images
png_files = []
# get list of png files in folder
for fn in os.listdir(folder_with_images):
if fn.endswith('.png'):
png_files.append(fn)
sort_nicely(png_files)
print(png_files)
# number of png_files
num_pngs = len(png_files)
png_time = float(loop_duration)/ float(num_pngs)
images = [Image.open(fn) for fn in png_files]
dim = (size, size) # change sizes for the image file dimension
#for im in images:
# im.thumbnail(dim, Image.ANTIALIAS)
output_file = os.path.join(folder_with_images, gif_filename) # path for output file
writeGif(output_file, images, png_time) # writes out GIF
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_gif(base_folder):\n img_list = []\n search_path = glob.glob(os.path.join(base_folder, '*.png'))\n search_path.sort()\n for f in search_path:\n im = Image.open(f)\n img_list.append(im)\n save_file = os.path.join(base_folder, 'animated_gif.gif')\n img_list[0].save(save_file,\n save_all=True, append_images=img_list[1:], optimize=False, duration=180, loop=0)",
"def make_gif():\n if MIGRATION:\n import imageio\n for n, JPG_DIR in enumerate(JPG_DIRS):\n images, image_file_names = [], []\n for file_name in os.listdir(JPG_DIR):\n if file_name.endswith('.jpg'):\n image_file_names.append(file_name) \n sorted_files = sorted(image_file_names, key=lambda y: int(y.split('_')[1]))\n for i in range(len(sorted_files)): \n file_path = os.path.join(JPG_DIR, sorted_files[i])\n images.append(imageio.imread(file_path))\n imageio.mimsave(FNAME.rsplit('.', 1)[0] + '_migration' + str(n) + '.gif', images, 'GIF', loop=1, fps=FPS)",
"def create_gif():\n anim_file = 'sample/training.gif'\n\n with imageio.get_writer(anim_file, mode='I') as writer:\n filenames = glob.glob('sample/*.jpg')\n filenames = sorted(filenames, key=lambda filename: int(filename[11:-4]))\n for filename in filenames:\n image = imageio.imread(filename)\n writer.append_data(image)\n image = imageio.imread(filename)\n writer.append_data(image)",
"def makeGif(imgPath):\r\n import imageio\r\n filenames = os.listdir(imgPath)\r\n filenames.sort()\r\n images = []\r\n for filename in filenames:\r\n images.append(imageio.imread(os.path.join(imgPath, filename)))\r\n imageio.mimsave(os.path.join(imgPath, \"sharpVid.gif\"), images, duration=0.2)",
"def generate_gif(directory: (\"Folder name\", \"positional\"),\n image_format: ('Image format', 'positional') = '.png',\n print_file_names=False):\n import imageio\n from glob import glob\n from natsort import natsorted\n\n images = []\n # Create a list of file names in the specified directory\n filenames = glob(directory + '/*' + image_format)\n\n filenames = natsorted(filenames, key=lambda y: y.lower())\n # Sort the list 'filenames' using the traditional method.\n # Traditional method -\n # isolate the entire first number in the string, then sort by that number\n # If this step is not included,\n # files will be sorted like so: 0, 100, 110, 200, 3, 420, etc...\n\n if print_file_names: # For troubleshooting\n for i in filenames:\n print(i)\n\n for filename in filenames:\n images.append(imageio.imread(filename))\n # Append each file to the list that will become the gif\n\n imageio.mimsave(directory + '.gif', images)\n # Save the gif as the name of the directory\n # that the images were generated from\n return",
"def make_gif(image_list, gif_name):\n if not gif_name.endswith(\".gif\"):\n gif_name += \".gif\"\n imageio.mimsave(gif_name, [imageio.imread(x) for x in image_list])",
"def make_GIF(image_path: Union[Path, str]) -> None:\n import imageio\n from pygifsicle import optimize\n\n if isinstance(image_path, str):\n image_path = Path(image_path)\n\n image_dir = image_path.parent\n image_file = image_path.stem\n gif_path = image_dir / f\"{image_file}.gif\"\n gif_path = Path(\"./xxxx.gif\")\n with imageio.get_writer(gif_path, mode='I') as writer:\n img_files = sorted((img_file for img_file in image_dir.glob('*.png')))\n for img_file in img_files:\n writer.append_data(imageio.imread(img_file))\n print(f\"{len(img_files)} images loaded from {image_path}\")\n try:\n optimize(gif_path)\n except Exception:\n print(\"gifsicle not installed\")",
"def make_gif(im_dir, out_file, pattern='*.png', fps=10):\n im_files = glob.glob(os.path.join(im_dir, pattern))\n if len(im_files) == 0:\n raise ValueError(f'No images found in {im_dir}!')\n \n writer = imageio.get_writer(out_file, mode='I', fps=fps)\n for im_file in im_files:\n im = imageio.imread(im_file)\n writer.append_data(im)\n writer.close()",
"def create_gifs(folder, time_per_image=0.1):\n # Retrieve images paths\n images_dict = defaultdict(list)\n folders_sorting_key = lambda s: int(s.split(\"_\")[-1])\n obs_folders = [f for f in os.listdir(folder) if f.split(\"_\")[0] == \"observations\"]\n obs_folders = sorted(obs_folders, key=folders_sorting_key)\n for obs_folder in obs_folders:\n for f in os.listdir(os.path.join(folder, obs_folder)):\n image_name = \"_\".join(f.split(\"_\")[:-1])\n images_dict[image_name].append(os.path.join(folder, obs_folder, f))\n # Create gifs\n for name in images_dict:\n target = os.path.join(folder, name + \".gif\")\n LungsLoader._create_gif(images_dict[name], target, time_per_image)",
"def compose_in_gif(images, output_file, delay):\n images[0].save(\n output_file, \n format='GIF', append_images=images[1:], \n save_all=True, duration=delay, loop=0,\n )",
"def make_gifs_test(title, sort, path):\n images = os.listdir(path)\n generated_images = []\n\n for i in range(len(images)):\n file = os.path.join(path, '%s_%s_Results_%03d.png' % (title, sort, i+1))\n generated_images.append(imageio.imread(file))\n\n imageio.mimsave(path + '{}_{}_Test_Results.gif'.format(sort, title), generated_images, fps=2)\n print(\"{} gif file is generated.\".format(title))",
"def animated_gif(file):\n\n\timport os.path\n\timport Image\n\tfrom conf import *\n\tfrom util_errors import gen_error\n\tANIMGIF_TAGID = 2\n\n\tfilepath = os.path.join(PROBATION_DIR, file[\"filename\"])\n\ttry:\n\t\timg = Image.open(filepath)\n\t\ttry:\n\t\t\timg.seek(1)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tdel(img)\n\t\t\treturn [ANIMGIF_TAGID]\n\texcept Exception, data:\n\t\tgen_error('GENERIC', \"File couldn't be operated on, check perms -- \" + str(data))\n\n\tdel(img)\n\treturn []",
"def saveGIFBatch(directory, path, name=''):\n # for each frame in batch\n images = []\n for filename in directory:\n print(filename)\n images.append(imageio.imread(filename))\n\n name_gif = path + '/' + name + '.gif'\n imageio.mimsave(name_gif, images)",
"def save_gif(frames):\n print(\"Saving gif images!\")\n for i in range(len(frames)):\n im_out_path = \"gif/gif_emilie_will_\" + str(i) + \".png\"\n plt.imsave(im_out_path, frames[i])",
"def AnimFromPng(name, gif=True, fps=15):\n if(gif):\n imgconvert = \"convert \" + \"-delay \" + str(int(1000/fps))\n imgconvert += \" -dispose None \" + name + \"*.png -loop 0 \" + name + \".gif\"\n system(imgconvert)\n print imgconvert\n else:\n aviconvert = \"ffmpeg -i \" + name + \"%03d.png -b:v 2048k -r \" + str(fps) + \" \" + name + \".avi\"\n system(aviconvert)\n print aviconvert",
"def animate(directory,gifname,n_t,step=2,duration=0.2):\n\t# create list of filenames\n\tfnames = dir_fname(directory,\"*\")\n\t# create list of plots\n\timages=[] \n\tfor k in range(0,n_t):\n\t\tk = k*step\n\t\tprint('Mounting Im '+ str(k))\n\t\tFIG_NAME=fnames[k]\n\t\timages.append(imageio.imread(FIG_NAME)) # read\n\t# Now we can assemble the video\n\timageio.mimsave(gifname, images,duration=duration) # create gif\n\tprint('Animation'+gifname+'Ready')\n\treturn True",
"def animate(images):\n images = np.array(images)\n converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)\n imageio.mimsave('./animation.gif', converted_images)\n return embed.embed_file('./animation.gif')",
"def gif(self, delay=20, savefile=None, iterations=0, show_path=False,\n use_ffmpeg=False):\n from sage.misc.sage_ostools import have_program\n have_convert = have_program('convert')\n have_ffmpeg = self._have_ffmpeg()\n if use_ffmpeg or not have_convert:\n if have_ffmpeg:\n self.ffmpeg(savefile=savefile, show_path=show_path,\n output_format='.gif', delay=delay,\n iterations=iterations)\n else:\n if not have_convert:\n msg = \"\"\"\nError: Neither ImageMagick nor ffmpeg appears to be installed. Saving an\nanimation to a GIF file or displaying an animation requires one of these\npackages, so please install one of them and try again.\n\nSee www.imagemagick.org and www.ffmpeg.org for more information.\"\"\"\n else:\n msg = \"\"\"\nError: ffmpeg does not appear to be installed. Download it from\nwww.ffmpeg.org, or use 'convert' to produce gifs instead.\"\"\"\n raise OSError(msg)\n else:\n if not savefile:\n savefile = graphics_filename(ext='.gif')\n if not savefile.endswith('.gif'):\n savefile += '.gif'\n savefile = os.path.abspath(savefile)\n d = self.png()\n cmd = ( 'cd \"%s\"; sage-native-execute convert -dispose Background '\n '-delay %s -loop %s *.png \"%s\"' ) % ( d, int(delay),\n int(iterations), savefile )\n from subprocess import check_call, CalledProcessError\n try:\n check_call(cmd, shell=True)\n if show_path:\n print(\"Animation saved to file %s.\" % savefile)\n except (CalledProcessError, OSError):\n msg = \"\"\"\nError: Cannot generate GIF animation. Verify that convert\n(ImageMagick) or ffmpeg is installed, and that the objects passed to\nthe animate command can be saved in PNG image format.\n\nSee www.imagemagick.org and www.ffmpeg.org for more information.\"\"\"\n raise OSError(msg)",
"def make_gifs_train(title, path):\n images = os.listdir(path)\n generated_images = []\n\n for i in range(len(images)):\n file = os.path.join(path, '%s_Samples_Epoch_%03d.png' % (title, i+1))\n generated_images.append(imageio.imread(file))\n\n imageio.mimsave(path + '{}_Train_Results.gif'.format(title), generated_images, fps=2)\n print(\"{} gif file is generated.\".format(title))",
"def gif(filename, array, fps=10, scale=1.0):\n # ensure that the file has the .gif extension\n filename = filename + '.gif'\n\n # copy into the color dimension if the images are black and white\n if array.ndim == 3:\n array = array[..., np.newaxis] * np.ones(3)\n\n # make the moviepy clip\n clip = ImageSequenceClip(list(array), fps=fps).resize(scale)\n clip.write_gif(filename, fps=fps)\n return True",
"def convert_gif(ctx):\n ctx.run(\n 'ffmpeg '\n '-i resources/demo.mkv -filter_complex \"[0:v] palettegen\" '\n 'resources/palette.png',\n pty=True\n )\n ctx.run(\n 'ffmpeg -i resources/demo.mkv '\n '-i resources/palette.png '\n '-filter_complex \"[0:v][1:v] paletteuse\" '\n 'resources/demo.gif',\n pty=True\n )",
"def writeGif(filename, images, duration=0.1, loops=0, dither=1):\n \n if PIL is None:\n raise RuntimeError(\"Need PIL to write animated gif files.\")\n \n AD = Image.ADAPTIVE\n images2 = []\n \n # convert to PIL\n for im in images:\n \n if isinstance(im,Image.Image):\n images2.append( im.convert('P', palette=AD, dither=dither) )\n \n elif np and isinstance(im, np.ndarray):\n if im.dtype == np.uint8:\n pass\n elif im.dtype in [np.float32, np.float64]:\n im = (im*255).astype(np.uint8)\n else:\n im = im.astype(np.uint8)\n # convert\n if len(im.shape)==3 and im.shape[2]==3:\n im = Image.fromarray(im,'RGB').convert('P', palette=AD, dither=dither)\n elif len(im.shape)==2:\n im = Image.fromarray(im,'L').convert('P', palette=AD, dither=dither)\n else:\n raise ValueError(\"Array has invalid shape to be an image.\")\n images2.append(im)\n \n else:\n raise ValueError(\"Unknown image type.\")\n \n # check duration\n if hasattr(duration, '__len__'):\n if len(duration) == len(images2):\n durations = [d for d in duration]\n else:\n raise ValueError(\"len(duration) doesn't match amount of images.\")\n else:\n durations = [duration for im in images2]\n \n \n # open file\n fp = open(filename, 'wb')\n \n # write\n try:\n n = _writeGifToFile(fp, images2, durations, loops)\n print n, 'frames written'\n finally:\n fp.close()",
"def convert_gif(path: str,\n gif_name: str = 'gif_name.gif',\n frame_limit: int = 100):\n video_path = glob(path + \"/*.mp4\")[-1]\n vidcap = cv2.VideoCapture(video_path)\n success, frame = vidcap.read()\n count = 0\n\n # extracting and saving video frames.\n while success:\n cv2.imwrite(f\"{path}/frame{count}.png\", frame) \n success, frame = vidcap.read()\n count += 1\n if count > frame_limit:\n break\n print(\"total frames:\", count)\n\n # generate animated GIF.\n img, *imgs = [Image.open(f) for f in sorted(glob(path+\"/*.png\"))]\n img.save(fp=f\"{path}/{gif_name}\", format='GIF', append_images=imgs,\n save_all=True, duration=200, loop=0)\n \n # remove frames\n [os.remove(os.path.join(path, f)) for f in glob(path+\"/*.png\")]",
"def convert(filepath, duration=100):\n\n # Getting images from HDF5 file\n h5_file = h5py.File(filepath, 'r')\n images = h5_file['entry']['data']['data']\n # Converting to PIL.Image\n images = [Image.fromarray(i).convert() for i in images]\n # Saving as GIF\n images[0].save(filepath.split('/')[-1] + '.gif',\n save_all=True, append_images=images[1:],\n duration=duration, loop=0)\n return",
"def to_gif(diagram, *diagrams, **params): # pragma: no cover\n path = params.get(\"path\", None)\n timestep = params.get(\"timestep\", 500)\n loop = params.get(\"loop\", False)\n steps, frames = (diagram, ) + diagrams, []\n path = path or os.path.basename(NamedTemporaryFile(\n suffix='.gif', prefix='tmp_', dir='.').name)\n with TemporaryDirectory() as directory:\n for i, _diagram in enumerate(steps):\n tmp_path = os.path.join(directory, '{}.png'.format(i))\n _diagram.draw(path=tmp_path, **params)\n frames.append(Image.open(tmp_path))\n if loop:\n frames = frames + frames[::-1]\n frames[0].save(path, format='GIF', append_images=frames[1:],\n save_all=True, duration=timestep,\n **{'loop': 0} if loop else {})\n try:\n from IPython.display import HTML\n return HTML('<img src=\"{}\">'.format(path))\n except ImportError:\n return '<img src=\"{}\">'.format(path)",
"def create_gif(trajectory):\r\n gif = []\r\n for i in range(len(trajectory)):\r\n p, s = trajectory[i][0]\r\n filename = 'images/car{}.jpeg'.format(i)\r\n save_caronthehill_image(p, s, filename)\r\n img = imageio.imread(filename)\r\n height, width, layers = img.shape\r\n gif.append(img)\r\n \r\n \r\n imageio.mimsave(\"visualization.gif\", gif, 'GIF')",
"def gif_generation(orig_label_path, bound_data_path):\n for sample in os.listdir(bound_data_path):\n if not sample.startswith('.') and osp.isdir(osp.join(bound_data_path, sample)):\n sample_path = osp.join(bound_data_path, sample)\n for artery in os.listdir(sample_path):\n orig_label_pick_path = osp.join(orig_label_path, sample, artery, 'data.pkl')\n bound_pick_path = osp.join(bound_data_path, sample, artery, 'data.pkl')\n\n # function to save result of each artery into gif\n save_gif_artery(orig_label_pick_path, bound_pick_path)",
"def save_GIF(ht, name=\"trajectory\"):\n # Generation of images\n counter = 0\n images = []\n for e in range(0, len(ht), 3):\n p = ht[e][0]\n s = ht[e][1]\n save_caronthehill_image(p, s, \"image\\\\state\" + str(counter) + \".png\")\n images.append(imageio.imread(\"image\\\\state\" + str(counter) + \".png\"))\n counter += 1\n imageio.mimsave(\"{}.gif\".format(name), images)",
"def write_gifs(self, clip, gifs_dir, **kwargs):\n for start, end, _, _ in self:\n name = \"%s/%08d_%08d.gif\" % (gifs_dir, 100 * start, 100 * end)\n clip.subclip(start, end).write_gif(name, **kwargs)",
"def build_list_gif(self, pathgif, nocv2 = True):\n dsize = (self.size, self.size)\n gif = mimread(pathgif)\n # convert form RGB to BGR\n listcv2 = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in gif]\n listgif = []\n for img in listcv2:\n listgif.append(cv2.resize(img, dsize))\n if nocv2:\n return self.convert_list_images(listgif)\n else:\n return listgif"
] | [
"0.8201988",
"0.784977",
"0.78173923",
"0.77489877",
"0.7678784",
"0.7650474",
"0.7548243",
"0.7523239",
"0.7439131",
"0.7356226",
"0.72612065",
"0.72492033",
"0.72406083",
"0.7114576",
"0.70953244",
"0.7068107",
"0.70551455",
"0.6906683",
"0.69029206",
"0.67948526",
"0.6790206",
"0.6741159",
"0.67271346",
"0.6681377",
"0.6627549",
"0.65682286",
"0.6510595",
"0.64601934",
"0.63987976",
"0.63486856"
] | 0.8292782 | 0 |
Display the Tweet form | def get(self, request, *args, **kwargs):
return render(request, 'tweets/index.html', {'form': self.form_class(user=request.user)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_form():\n\n prompts = story.prompts\n\n return render_template(\"base.html\", prompts = prompts )",
"def user_timeline(username=None): # pylint: disable=unused-argument\n form = PostTweetForm()\n if form.validate_on_submit():\n try:\n current_user.post_tweet(form.tweet.data)\n flash('Tweet successfully posted')\n except ValueError as excep:\n flash(str(excep))\n return render_template('timeline.html',\n general=False,\n show_username=True,\n form=form)\n\n return render_template('timeline.html',\n general=False,\n show_username=True,\n form=form)",
"def post(self, request, *args, **kwargs):\n form = self.form_class(data=request.POST, user=request.user)\n\n if form.is_valid():\n instance = form.save()\n\n # Deal with posting twitter status\n posted_status = False\n if instance.category != get_default_category():\n # Create tweeted message and post status\n tweeted_message = \"RT:<%s>%s#Custom#%s\" % (instance.created_by.username, instance.message, instance.category)\n api = twitter.Api(consumer_key=settings.TWITTER_CONSUMER_KEY, consumer_secret=settings.TWITTER_CONSUMER_SECRET,\n access_token_key=settings.TWITTER_USER_OAUTH_TOKEN, access_token_secret=settings.TWITTER_USER_OAUTH_TOKEN_SECRET)\n api.PostUpdate(status=tweeted_message)\n\n # Save time published and update postedStatus\n instance.published_on = timezone.now()\n instance.save()\n posteds_status = True\n\n return render(request, 'tweets/index.html', {'form': self.form_class(user=request.user), 'success': True, 'posted_status': posted_status})\n return render(request, 'tweets/index.html', {'form': form})",
"def show_form():\n\n return render_template(\"form.html\")",
"def tweet_btn_clicked(self,widget, data=None):\n tweet_text = self.get_text(\"txt_tweet\") \n \n #double check the length and go.\n if (len(tweet_text) <= 140): \n self.twitter.UpdateStatus(tweet_text) \n status_label = self.builder.get_object(\"status_lbl\")\n #clear the text box and update the status\n self.builder.get_object(\"txt_tweet\").set_text(\"\")\n my_tweet_bufffer = self.builder.get_object(\"personal_tweet_buffer\")\n iters = my_tweet_bufffer.get_end_iter()\n my_tweet_bufffer.insert(iters, \"%s\\n\\n\" % tweet_text)\n else:\n status_label = self.builder.get_object(\"status_lbl\")\n status_label.set_text(\"Too long: Tweet != Blog -__-\")\n print tweet_text",
"def search_tweets(request):\n return render(request, 'ede/search.html')",
"def show_form():\n\n story_title = request.args[\"madlib\"]\n for story in stories.values():\n if story.title == story_title:\n story_for_form = story\n \n return render_template('form.html', s=story_for_form, story_title=story_title)",
"def command_tweet(self, bot, update):\n\n bot.sendChatAction(update.message.chat_id, action='typing')\n\n tweet = ext.get_last_tweet(self.config['twitter'])\n\n for url in tweet.get('images', []):\n self.send_photo_url(bot, update, url)\n\n messages = [\n u'{text}',\n '[@{user[screen_name]}](https://twitter.com/{user[screen_name]}) '\n '- {ago}'\n ]\n\n for msg in messages:\n self.send_message(bot, update, msg.format(**tweet))",
"def form(update, context):\n update.message.reply_text(\"\"\"Fill out the form 👇 👇 👇\n https://forms.gle/VREhdtCNqJ6rZNfQ7\"\"\")",
"def display_form():\n return render_template(\"form.html\",\n title=\"Welcome Form\",\n heading=\"Please fill in this form\",)",
"def story_form(story_id):\n story = story_list[story_id]\n return render('story_form.html', prompts=story.prompts, story_id = story_id)",
"def view_form(self):\n if not self.valid:\n raise MudderyError(\"Invalid form: %s.\" % self.form_name)\n\n context = self.get_context()\n\n return render(self.request, self.template_file, context)",
"def form():\n return render_template(\n 'form.html'\n )",
"async def tweet_menu(self, ctx, post_list: list,\n message: discord.Message=None,\n page=0, timeout: int=30):\n s = post_list[page]\n colour =\\\n ''.join([randchoice('0123456789ABCDEF')\n for x in range(6)])\n colour = int(colour, 16)\n created_at = s.created_at\n created_at = created_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n post_url =\\\n \"https://twitter.com/{}/status/{}\".format(s.user.screen_name, s.id)\n desc = \"Created at: {}\".format(created_at)\n em = discord.Embed(title=\"Tweet by {}\".format(s.user.name),\n colour=discord.Colour(value=colour),\n url=post_url,\n description=desc)\n em.add_field(name=\"Text\", value=s.text)\n em.add_field(name=\"Retweet count\", value=str(s.retweet_count))\n if hasattr(s, \"extended_entities\"):\n em.set_image(url=s.extended_entities[\"media\"][0][\"media_url\"] + \":thumb\")\n if not message:\n message =\\\n await self.bot.send_message(ctx.message.channel, embed=em)\n await self.bot.add_reaction(message, \"⬅\")\n await self.bot.add_reaction(message, \"❌\")\n await self.bot.add_reaction(message, \"➡\")\n else:\n message = await self.bot.edit_message(message, embed=em)\n react = await self.bot.wait_for_reaction(\n message=message, user=ctx.message.author, timeout=timeout,\n emoji=[\"➡\", \"⬅\", \"❌\"]\n )\n if react is None:\n await self.bot.remove_reaction(message, \"⬅\", self.bot.user)\n await self.bot.remove_reaction(message, \"❌\", self.bot.user)\n await self.bot.remove_reaction(message, \"➡\", self.bot.user)\n return None\n reacts = {v: k for k, v in numbs.items()}\n react = reacts[react.reaction.emoji]\n if react == \"next\":\n next_page = 0\n if page == len(post_list) - 1:\n next_page = 0 # Loop around to the first item\n else:\n next_page = page + 1\n return await self.tweet_menu(ctx, post_list, message=message,\n page=next_page, timeout=timeout)\n elif react == \"back\":\n next_page = 0\n if page == 0:\n next_page = len(post_list) - 1 # Loop around to the last item\n else:\n next_page = page - 1\n return await self.tweet_menu(ctx, post_list, message=message,\n page=next_page, timeout=timeout)\n else:\n return await\\\n self.bot.delete_message(message)",
"def index(request):\n form = textForm()\n return render(request, 'index.html', {'form': form})",
"def tweet_at_provider(self):\n url = \"https://twitter.com\"\n self.driver.get(url)\n\n time.sleep(3)\n\n email_box = self.driver.find_element_by_css_selector(\"#react-root > div > div > div > main > div > div > div \"\n \"> div.css-1dbjc4n.r-1777fci.r-1qmwkkh.r-nsbfu8 > \"\n \"div.css-1dbjc4n.r-1awozwy.r-1d2f490.r-dnmrzs.r-1dye5f7\"\n \".r-u8s1d.r-19lq7b1 > div > form > div > div:nth-child(\"\n \"6) > div > label > div > \"\n \"div.css-1dbjc4n.r-18u37iz.r-16y2uox.r-1wbh5a2.r-1wzrnnt\"\n \".r-1udh08x.r-xd6kpl.r-1pn2ns4.r-ttdzmv > div > input\")\n email_box.send_keys(TWITTER_USERNAME)\n\n password_box = self.driver.find_element_by_css_selector(\"#react-root > div > div > div > main > div > div > \"\n \"div > div.css-1dbjc4n.r-1777fci.r-1qmwkkh.r-nsbfu8 > \"\n \"div.css-1dbjc4n.r-1awozwy.r-1d2f490.r-dnmrzs.r\"\n \"-1dye5f7.r-u8s1d.r-19lq7b1 > div > form > div > \"\n \"div:nth-child(7) > div > label > div > \"\n \"div.css-1dbjc4n.r-18u37iz.r-16y2uox.r-1wbh5a2.r\"\n \"-1wzrnnt.r-1udh08x.r-xd6kpl.r-1pn2ns4.r-ttdzmv > div \"\n \"> input\")\n password_box.send_keys(TWITTER_PASSWORD)\n\n time.sleep(3)\n\n log_in_button = self.driver.find_element_by_xpath('//*[@id=\"react-root\"]/div/div/div/main/div/div/div/div['\n '1]/div[1]/div/form/div/div[3]/div')\n log_in_button.click()\n\n tweet_text = f\"Hey Spectrum, why is my Internet speed {self.down}down/{self.up}up when I pay for\" \\\n f\" {PROMISED_DOWN}down/{PROMISED_UP}up? \"\n\n time.sleep(3)\n\n tweet_box = self.driver.find_element_by_class_name(\n \"public-DraftStyleDefault-block.public-DraftStyleDefault-ltr\")\n tweet_box.send_keys(tweet_text)\n\n time.sleep(3)\n\n if self.down < PROMISED_DOWN or self.up < PROMISED_UP:\n tweet_button = self.driver.find_element_by_xpath(\n '//*[@id=\"react-root\"]/div/div/div[2]/main/div/div/div/div/div/div[2]/div/div[2]/div['\n '1]/div/div/div/div[ '\n '2]/div[4]/div/div/div[2]/div[3]/div/span/span')\n tweet_button.click()\n else:\n print(\"Internet speeds are in line with Spectrum's promised speeds.\")\n\n time.sleep(3)\n\n self.driver.quit()",
"def displayTwitterTweets(self, twitResult):\n strVal = self.txtTwitter.get(\"1.0\", 'end')\n if (strVal.strip()):\n self.txtTwitter.delete(\"1.0\", 'end')\n twitterCCount = 0\n twitterICount = 0\n\n for myTwitData in twitResult:\n retweetsArray.append(myTwitData.commentCount)\n likesArray.append(myTwitData.interactionCount)\n twitterCCount += myTwitData.commentCount # RETWEETS\n twitterICount += myTwitData.interactionCount # LIKES\n self.txtTwitter.insert(tk.END, \"\\n=====================================================\")\n for tweet in myTwitData.getTopComments():\n if 'twitter' in tweet.url.lower():\n self.txtTwitter.insert(tk.END, \"\\nTweet: \\n\" + tweet.getText())\n self.txtTwitter.insert(tk.END, \"\\n\\nRead More: \" + tweet.getUrl())\n self.txtTwitter.insert(tk.END, \"\\n\\nPosted On: \" + str(tweet.getDate()))\n self.txtTwitter.insert(tk.END, \"\\n---------------------------------------------------------------------------------------------\")\n self.lblRetweets.configure(text=\"Retweets: \" + str(twitterCCount))\n self.lblLikes.configure(text=\"Likes: \" + str(twitterICount))",
"def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()",
"def show_add_actor(self):\n\t\tformulario = view_form_actor.Form(self)\n\t\tformulario.exec_()\n\t\tself.load_data()",
"def show_new_user_form():\r\n return render_template('user-form.html')",
"def render_form(self, teacher=\"\", temail=\"\", tphone=\"\", specialty=\"\", error=\"\"):\n t = jinja_env.get_template(\"createteacher.html\")\n response = t.render(teacher=teacher, temail=temail, tphone=tphone, specialty=specialty, error=error)\n self.response.out.write(response)",
"def show_twitter(self):\n\n # -- THIS DOESN'T WORK B/C OF PACK()...NEED TO REDRAW EVERYTHING\n \n # -- change the button\n self.twitter_hide.config(text='hide twitter') \n self.twitter_hide.config(command=self.hide_twitter)\n\n self.twitter_canvas.pack()\n self.twitter_scrollbar.pack()",
"def view_task(self, task):\n self.layout.clear_widgets()\n self.add_AdDescription(task.url, task.description)\n self.add_CheckBox(task.checkbox_rating)\n self.add_Slider(task.slider_rating)\n self.add_Toggle_Button(task.toggle_button_rating)\n self.layout.add_widget(TextInput(hint_text = 'Add a comment...', multiline = True))\n self.add_NextButton()\n self.add_Exit_Button()",
"def teaser_index(request):\n # Thank you message for users who signed up\n if request.session.pop('thanks', False):\n return render_to_response('teaser/teaser_thanks.html', {}, RequestContext(request))\n\n # Handle the signup form\n if request.method == 'POST':\n form = forms.TeaserSignupForm(request.POST)\n if form.is_valid():\n signup = form.save()\n request.session['thanks'] = True\n return redirect(teaser_index)\n else:\n form = forms.TeaserSignupForm()\n\n return render_to_response('teaser/teaser_index.html', {\n 'form': form,\n 'adjective': random.choice(BDAY_PONY_ADJECTIVES),\n }, RequestContext(request))",
"def show_forms():\n\n return render_template(\"signup_login.html\")",
"def sentimental_analysis_component():\n sentence = st.text_area(\"Enter Text to Analyze:\")\n if st.button(\"Submit\"):\n result = sentiment_analyzer_scores(sentence)\n st.success(result)\n\n #if st.checkbox('Lookup Twitter Status', True):\n id_input = st.text_area(\"Enter Tweet ID to Analyze:\")\n st.markdown(' e.g. 1333434829438906376 or 1257038775785422848')\n\n # Modules for twitter API\n import tweepy \n import os\n \n # API Keys\n consumer_key = os.environ.get('TWITTER_CONSUMER_KEY')\n consumer_secret = os.environ.get('TWITTER_CONSUMER_SECRET')\n access_token = os.environ.get('TWITTER_ACCESS_TOKEN')\n access_token_secret = os.environ.get('TWITTER_ACCESS_TOKEN_SECRET')\n \n # Auth type and API options\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth,wait_on_rate_limit=True)\n\n # Tweet ID to fetch\n id_ = [id_input]\n \n # API Call \n statuses = api.statuses_lookup(id_, tweet_mode=\"extended\")\n \n # API Response to variables\n for status in statuses:\n tweet_text = status.full_text\n tweet_user = status.user.screen_name\n covid_check = covid_mention(tweet_text.lower())\n\n if st.button(\"Analyze Tweet\"):\n lookup_result = sentiment_analyzer_scores(tweet_text)\n st.markdown('## Tweet Sentiment Results')\n st.success(lookup_result)\n st.markdown(f'## Full Text:')\n st.success(f'{tweet_text}')\n\n st.markdown(f\"\"\"## Tweet Stats:\n Tweet ID:{id_}\n User: {status.user.screen_name}\n Created at: {status.created_at}\n Source: {status.source}\n Engagement:\n Retweets: {status.retweet_count}\n Favourited: {status.favorite_count}\n Pandemic Related: {covid_check}\"\"\")",
"def display_form():\n\n return render_template('add_new_student.html')",
"def preview_handler(self, _, __):\r\n template = self.system.render_template('lti_form.html', self.get_context())\r\n return Response(template, content_type='text/html')",
"def render_form():",
"def Wraith_Form(self):\t\t\n\t\tprint(self.name.Title() + \"Wraith\")"
] | [
"0.65626603",
"0.62451655",
"0.61054873",
"0.60839933",
"0.6069921",
"0.58719504",
"0.58517045",
"0.5813749",
"0.5784378",
"0.5781207",
"0.56855977",
"0.5594863",
"0.5562113",
"0.5535937",
"0.5510648",
"0.5489431",
"0.54881275",
"0.5482093",
"0.54636025",
"0.5461098",
"0.5456793",
"0.54361695",
"0.5415173",
"0.5405155",
"0.5393397",
"0.538263",
"0.5373194",
"0.53622586",
"0.5356664",
"0.5345988"
] | 0.6484199 | 1 |
{{{ Docstrings Reads GO and TPM files into dictionary. }}} | def read_GO_TPM_file(self, *GO_TPM_file):
dictionary = {}
for i in GO_TPM_file:
with open(i, 'r') as f:
f = f.readlines()[1:]
f = map(lambda x: x.strip(), f)
if '_GO.txt' in i:
dictionary['GO'] = f
else:
dictionary['TPM'] = f
return(dictionary['GO'], dictionary['TPM']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_to_dictionary():\n\n return;",
"def build_GO_TPM_dict(self, GO_TPM_file_list, Is_GO):\n\n f = map(lambda x: x.split('\\t'), GO_TPM_file_list)\n if Is_GO:\n go_dict = dict((i[0], i[1].split('`')) for i in f)\n return go_dict\n else:\n tpm_dict = dict((i[0], int(i[1])) for i in f)\n return tpm_dict",
"def _read_output_files(self):\n self.manage = {} # Empty the dictionary matching phrases\n self.manage['spin'] = (re.compile(' *net spin of'), self._read_spin)\n self.manage['nelect'] = (re.compile(' *number of electrons'), self._read_nelect)\n self.manage['cellcontents'] = (re.compile(' *Unit Cell'), self._read_cellcontents)\n self.manage['pspots'] = (re.compile(' *Files used for pseudopotentials:'), self._read_pspot)\n self.manage['masses'] = (re.compile(' *Mass of species in AMU'), self._read_masses)\n self.manage['kpoints'] = (re.compile(' *Number of kpoints used'), self._read_kpoints)\n self.manage['kpoint_grid'] = (re.compile(' *MP grid size for SCF'), self._read_kpoint_grid)\n self.manage['finalenergy'] = (re.compile(' *Final energy, E'), self._read_energies)\n self.manage['finalenergy2'] = (re.compile('Final energy ='), self._read_energies2)\n self.manage['finalenergy3'] = (re.compile('Dispersion corrected final energy'), self._read_energies3)\n self.manage['energy_cutoff'] = (re.compile(' *plane wave basis set cut'), self._read_energy_cutoff)\n self.manage['nbands'] = (re.compile(' *number of bands'), self._read_nbands)\n self.manage['pressure'] = (re.compile(' *\\* *Pressure: '), self._read_external_pressure)\n self.manage['opticalDielectric'] = (re.compile(' *Optical Permittivity'), self._read_dielectric)\n self.manage['bornCharges'] = (re.compile(' *Born Effective Charges'), self._read_born_charges)\n # For the .phonon file\n self.manage['frequency'] = (re.compile(' q-pt= 1 0.000000 0.000000 0.000000 1.0000000000 *$'), self._read_frequencies)\n self.manage['nbranches'] = (re.compile(' Number of branches'), self._read_nbranches)\n for f in self._outputfiles:\n self._read_output_file(f)\n return",
"async def load(self, file: IO) -> dict:",
"def load_dcos_conf_templete(fpath):\n p_key = re.compile(r' *- path: (?P<g1>.*)')\n c_key = re.compile(r' *content: [|].*')\n h_key = re.compile(r' *#.*$')\n\n with fpath.open() as fp:\n\n aggregator = {'package': []}\n path = ''\n content = []\n\n for line in fp:\n pk_match = p_key.match(line)\n ck_match = c_key.match(line)\n hk_match = h_key.match(line)\n\n if pk_match:\n\n if path:\n item = {'path': path, 'content': ''.join(content)}\n aggregator['package'].append(item)\n path = pk_match.group('g1')\n content = []\n else:\n path = pk_match.group('g1')\n elif ck_match:\n continue\n elif hk_match:\n continue\n else:\n if not path:\n continue\n else:\n content.append(line.strip(' '))\n\n item = {'path': path, 'content': ''.join(content)}\n aggregator['package'].append(item)\n\n return aggregator",
"def read_file():\r\n # https://blog.csdn.net/lzgs_4/article/details/50371030\r\n\r\n path = input(\"Please input the path of the dataset (e.g. ...\\cranfieldDocs) : \")\r\n # path = r\"C:\\Users\\15451\\PycharmProjects\\Nan\\dataset\\cranfieldDocs\" # the path of all the files\r\n\r\n files = os.listdir(path) # obtain all the file names in the file folder\r\n file_content = {}\r\n for file in files: # file is the file name\r\n f = open(path + \"/\" + file)\r\n iter_f = iter(f)\r\n str = \"\"\r\n for line in iter_f:\r\n line = line.strip()\r\n line = line.lower()\r\n str = str + \" \" + line\r\n str = remove_field_name(str)\r\n str = split_to_obtain_token(str)\r\n file_content[file] = str # str is the contect of the file choosen\r\n return file_content",
"def ReadFromFile(self):\n\n data = \"\"\n try:\n with open(self.fileLoc, \"r\") as file:\n data += file.read()\n except IOError:\n with open(self.fileLoc, \"w\") as file:\n file.write(\" \")\n return {}\n \n if len(data) == 0:\n return {}\n\n data = self.Decrypt(data)\n\n data = \"\".join(data.split())\n kvstrings = data.split(\"%\")\n kvstrings = filter(None, kvstrings)\n\n pairs = {}\n for x in kvstrings:\n kv = x.split(\":\")\n pairs[kv[0]] = kv[1]\n\n return pairs",
"def _file_dict(self, fn_):\n if not os.path.isfile(fn_):\n err = \"The referenced file, {} is not available.\".format(fn_)\n sys.stderr.write(err + \"\\n\")\n sys.exit(42)\n with salt.utils.files.fopen(fn_, \"r\") as fp_:\n data = fp_.read()\n return {fn_: data}",
"def read(file_path: str) -> dict:\n\n if not os.path.isfile(file_path):\n raise FileNotFoundError(\"The file `%s` must exist and be a BLM file\" % file_path)\n\n file_contents = open(file_path, 'r').read()\n headers = parse_headers(file_contents)\n definitions = parse_definitions(headers, file_contents)\n data = parse_data(headers, definitions, file_contents)\n\n return {'headers': headers, 'definitions': definitions, 'data': data}",
"def parse(self):\n results = {}\n\n # get the signature info via the codesign utility\n args = [\"codesign\",\"-dvvvv\", self.file_name]\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, error_output = proc.communicate()\n if proc.returncode: #error, probably file not signed\n results[\"signature\"] = error_output\n else:\n results[\"signature\"] = output\n\n #get the file object\n file_object = open(self.file_name, 'rb')\n\n\n #Use the macho library to parse out some structures\n pFile = MachO(self.file_name)\n\n #if this is a fat file, it will have multiple Mach-O objects inside it\n results[\"FAT_header\"] = self.parseFATHeader(file_object, pFile)\n\n #parse all the Mach-O headers\n i = 1\n for h in pFile.headers:\n results[\"MachO_header\" + str(i)] = self.parseMachOHeader(h, file_object)\n i +=1\n\n #close the file\n file_object.close()\n\n #return the dict of results\n return results",
"def readfile(filename):\n import ROOT\n f = ROOT.TFile(filename)\n keys = f.GetListOfKeys()\n\n extract = lambda _type: filter(lambda x: x.GetClassName() == _type,keys)\n builddict = lambda _type: dict(map(lambda x: (x.GetName(),f.Get(x.GetName())),\n extract(_type)))\n\n # Retrieve all the stuff\n obsdict = builddict('RooRealVar')\n data = builddict('RooDataSet')\n datahists = builddict('RooDataHist')\n data.update(datahists)\n modeldict = builddict('RooRealPdf')\n\n databkgdict = dict(filter(lambda (x,y): x.find('dvbkg') == 0, data.iteritems()))\n datasigdict = dict(filter(lambda (x,y): x.find('dvsig') == 0, data.iteritems()))\n\n return f,obsdict,modeldict,databkgdict,datasigdict",
"def init() -> None:\n init_dict()\n parse_file(\"alphabet.txt\", letters)\n parse_file(\"numbers.txt\", numbers)\n parse_file(\"symbols.txt\", symbols)",
"def user_defined_descriptions(path):\n try:\n lines = [line.rstrip() for line in open(path).readlines()]\n return dict([x.split(maxsplit=1) for x in lines])\n except FileNotFoundError:\n return dict()",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}",
"def read(cls, filename: str) -> dict:\n raise NotImplemented(\"Read method for MCPL is not implemented nor required\")",
"def parse_gc(gc):\n prop_gc = {}\n with open(gc, 'r') as f:\n for line in f:\n tmp = line.strip().split()\n prop_gc[tmp[0]] = tmp[1]\n return prop_gc",
"def main():\n print \"=\" * 78\n print \"%s %s\" % (__prog_name__, __version__)\n debug, input_file_names = check_cli()\n if not input_file_names:\n _error(\"No input file name found!\\n\\n%s\" % __help__)\n for input_file_name in input_file_names:\n print \"* Reading\", input_file_name\n file_base_name = os.path.splitext(os.path.basename(input_file_name))[0]\n file_dir_name = os.path.dirname(input_file_name)\n sections = {}\n tex_map = {}\n with open(input_file_name, 'rU') as in_fd:\n sections = get_sections(in_fd.read())\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"sec\",\n {\"sections\": sections})\n\n if not sections:\n _error(\"Nothing could be read from '%s'.\\nIs this an Oolite .dat file?\" \\\n % input_file_name)\n\n # Magically call the 'check' functions\n for name in sections.keys():\n f_name = \"check_%s\" % name.lower()\n if f_name in globals().keys():\n if not globals()[f_name](sections):\n _error(\"Number of entries in '%s' section is different as declared!\" % name)\n\n def get_data(name, sections=sections):\n \"\"\"Returns the 'data' object from the 'name' one found in the\n 'sections' one.\n :sections: dictionary: Object returned by 'get_sections'.\n :name: string: The name of the section to get the 'data'.\n Returns a list of 'lines'.\n \"\"\"\n return sections.get(name, {}).get(\"data\", [])\n\n oti_file_name = build_file_path(file_dir_name, file_base_name, \"oti\")\n tex_map = parse_names(get_data(\"NAMES\"), oti_file_name)\n\n tex_refs, tex_lines_out = parse_textures(get_data(\"TEXTURES\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"tex\",\n {\"tex_refs\": tex_refs,\n \"tex_lines_out\": tex_lines_out})\n\n # Update the tex_map object if textures indexes and names are both\n # used in 'TEXTURES'.\n if sorted(tex_map.keys()) != sorted(tex_refs.get(\"named\").keys()):\n tex_map = update_tex_map(tex_map,\n set(tex_refs[\"named\"].keys()).difference(tex_map.keys()))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"txm\",\n {\"tex_map\": tex_map})\n\n n_verts, vertex_lines_out = parse_vertex(get_data(\"VERTEX\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"ver\",\n {\"n_verts\": n_verts,\n \"vertex_lines_out\": vertex_lines_out})\n\n n_normals, normals_lines_out = parse_normals(get_data(\"NORMALS\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"nor\",\n {\"n_normals\": n_normals,\n \"normals_lines_out\": normals_lines_out})\n\n n_faces, faces_groups = parse_faces(get_data(\"FACES\"), tex_refs,\n normals_lines_out)\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"fac\",\n {\"n_faces\": n_faces,\n \"faces_groups\": faces_groups})\n\n output_file_name = build_file_path(file_dir_name,\n file_base_name, 'obj')\n material_file_name = build_file_path(file_dir_name,\n file_base_name, 'mtl')\n mtl_lib_file = os.path.basename(material_file_name)\n\n write_obj(output_file_name, file_base_name, mtl_lib_file,\n tex_lines_out, tex_map, n_verts, vertex_lines_out,\n n_normals, normals_lines_out, n_faces, faces_groups)\n\n write_mtl(material_file_name, tex_map)\n\n _exit(\"* Done\")",
"def load(self):\n \n with open(os.path.join(self.output_dir, 'terms.dict'), 'rb') as f:\n self.term_id_map = pkl.load(f)\n with open(os.path.join(self.output_dir, 'docs.dict'), 'rb') as f:\n self.doc_id_map = pkl.load(f)",
"def forges():\n\n forges = {}\n\n for forge_path in sorted(glob.glob(\"/opt/service/forge/*.yaml\")):\n if forge_path.split(\"/\")[-1] not in [\"fields.yaml\", \"values.yaml\"]:\n with open(forge_path, \"r\") as forge_file:\n forges[forge_path.split(\"/\")[-1].split(\".\")[0]] = yaml.safe_load(forge_file)[\"description\"]\n\n return forges",
"def read_dictionary():\n\tglobal dictionary\n\twith open(FILE, \"r\") as f:\n\t\tfor words in f:\n\t\t\tdictionary += words.split()",
"def get_completion_data_for_file(fn):\n app_to_translations = {}\n\n lang = get_language(fn)\n\n try:\n pofile = polib.pofile(fn)\n except IOError as ioe:\n print 'Error opening file: {fn}'.format(fn=fn)\n print ioe.message\n return 1\n\n for poentry in pofile:\n if poentry.obsolete:\n continue\n\n for occ in poentry.occurrences:\n path = occ[0].split(os.sep)\n if path[0] == 'kitsune':\n path = path[1]\n else:\n path = 'vendor/' + path[2]\n app_to_translations.setdefault(\n path, []).append(poentry.translated())\n\n all_total = 0\n all_translated = 0\n\n data = {}\n for app, tr_list in app_to_translations.items():\n total = len(tr_list)\n translated = len([tr for tr in tr_list if tr])\n\n data[app] = {\n 'total': total,\n 'translated': translated,\n }\n\n all_total += total\n all_translated += translated\n\n return {\n lang: {\n 'total': all_total,\n 'translated': all_translated,\n 'apps': data\n }\n }",
"def get_key_from_file():\n json_data = request.get_json()\n \n is_reference = json_data['is_reference']\n filename = json_data['filename']\n key_name = json_data['key_name']\n\n \n settings.setOptionsFile(get_info('uid'))\n f = ROOT.TFile(filename)\n\n d = eval(cppyy.gbl.getDictionary(f,key_name))\n \n f.Close()\n return jsonify(d)",
"def take_auth_data():\n home = str(Path.home())\n path_to_keys = '/Documents/twitter/keys/'\n\n files = [f for f in listdir(home+path_to_keys) if '.DS' not in f]\n\n tokens = []\n for f in files:\n with open(home+path_to_keys+f, 'r') as lines:\n ln = lines.readline().replace(\" \", \"\")\n tokens.append(ln)\n\n auth_data = dict(zip(files, tokens))\n return auth_data",
"def read_dict():\n\n\tfilename = 'diction10k.txt'\n\t\n\ttry:\n\t\ttarget = open(filename, 'r')\n\n\texcept:\n\t\tprint(\"Dictionary not found. Please make sure it is located in the same\" \n\t\t\t+ \" folder as strings.py\")\n\t\tsys.exit(1)\n\n\tfor line in target:\n\t\tDICTSET.add(line.strip())",
"def load_dependencies() -> dict:\n global CARRIERS_DICT\n with open('mms_gateways.json') as mms:\n CARRIERS_DICT = json.loads(mms.read())\n with open('config.json', 'r') as cfig:\n cfig_dict = json.loads(cfig.read())\n return cfig_dict",
"def get_file_metadata(file, wordlist=None, dictionary=None):\n if wordlist == None:\n wordlist = joblib.load('wordlist.pkl')\n if dictionary == None:\n dictionary = joblib.load('dict.pkl')\n utterance = file.split('/')[-1]\n speaker = utterance.split('_')[0]\n delimiter = '_'\n word = wordlist[delimiter.join((utterance.split('_')[1], utterance.split('_')[2]))]\n phones = dictionary[word.upper()]\n return {'speaker': speaker, 'word': word, 'phones': phones, 'utterance': utterance[:-4]}",
"def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None",
"def load_files(directory):\n fileDict = {}\n files = os.listdir(directory)\n for f in files:\n fpath = os.path.join(directory, f)\n with open(fpath) as myFile:\n text = myFile.read()\n fileDict[f] = text\n return fileDict",
"def read(self):\r\n entById = {}\r\n entsByName = {}\r\n header = 'HEADER '\r\n readheader = False\r\n for line in self.file:\r\n e = self.parseLine(line)\r\n if e:\r\n entById[int(e[\"id\"])] = e\r\n ids = e.get(e[\"name\"],[])\r\n ids.append(e[\"id\"])\r\n entsByName[e[\"name\"]] = list(set(ids))\r\n elif 'HEADER' in line:\r\n readheader = True\r\n elif readheader:\r\n if 'ENDSEC' in line:\r\n readheader = False\r\n else:\r\n header += line\r\n \r\n return [entById, entsByName, header]",
"def load_pecfile_dict(pec_file):\n file_dict = {}\n while True:\n record = pec_file.readline()[:-1]\n if record == '':\n break\n else:\n record = pec.PecRecSeq(record)\n if record.sequence in file_dict:\n return 1\n else:\n file_dict[record.sequence] = record\n return file_dict"
] | [
"0.62764466",
"0.6119582",
"0.61059195",
"0.60157615",
"0.60089105",
"0.59994686",
"0.59959334",
"0.5899969",
"0.5897268",
"0.5874877",
"0.5860927",
"0.58559364",
"0.5806741",
"0.5797594",
"0.576075",
"0.5753465",
"0.57491076",
"0.5746906",
"0.5733645",
"0.5697019",
"0.56846964",
"0.5667377",
"0.5660366",
"0.5648909",
"0.5647839",
"0.5607853",
"0.5594802",
"0.5572355",
"0.55511713",
"0.5550419"
] | 0.68165624 | 0 |
{{{ Docstrings Writes the cumulative GO/TPM data to file. }}} | def write_cum_file(self, cum_data):
with open(self.IDs[3], 'w') as cum:
cum.write('GO_id\tCumulative_TPM\n')
for k, v in cum_data.iteritems():
cum.write(k + '\t' + str(v) + '\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_data():",
"def write_data(self):\n\n data_string = \"%s, %d, %d, %s\\n\" % (\n self.name, self.repos, self.members, self.created)\n\n file_path = os.path.join(os.path.dirname(__file__), self.data_file)\n\n with open(os.path.abspath(file_path), 'a') as f:\n f.write(data_string)",
"def _write_cache_file(self, data):\n\n with open(self.cache_file, mode='wb') as f:\n f.write(data)\n\n self.log.info(f\"Cached facilities at {self.cache_file}\")",
"def saveenergyfile(path, meta, data):\n def serializemeta(meta):\n \"\"\"Convert metadata object to list of comment strings\"\"\"\n return [u\"#CTE_%s: %s\" % (key, meta[key]) for key in meta]\n\n with io.open(path, 'w+') as ff:\n ff.write(u\"\\n\".join(serializemeta(meta)))\n ff.write(u\"\\nvector,tipo,src_dst\\n\")\n for c in data:\n carrier = c['carrier']\n ctype = c['ctype']\n originoruse = c['originoruse']\n values = u\", \".join(u\"%.2f\" % v for v in c['values'])\n comment = u\" # %s\" % c['comment'] if c['comment'] else u\"\"\n ff.write(u\"%s, %s, %s, %s%s\\n\" % (carrier, ctype, originoruse, values, comment))",
"def writeRawFCD():\n global vehId, vehIdDict\n vehIdDict = {}\n vehId = 0\n day = 0\n\n def getVehId(orgId):\n \"\"\"creates new vehicle id's which consists only numerics\"\"\"\n global vehId, vehIdDict\n value = vehIdDict.get(orgId, vehId)\n if value is vehId:\n vehIdDict[orgId] = vehId\n vehId = (vehId + 1) % 65500\n return value\n\n outputFile = open(path.FQrawFCD, 'w')\n\n for period, quota, vtypeDictR, taxiSum in generatePeriodQuotaSets():\n day += 86400\n # reset dict so that every taxi (even if the vehicle is chosen several\n # times) gets its own id\n vehIdDict = {}\n # dataset=0\n sortedKeys = vtypeDictR.keys()\n sortedKeys.sort()\n for timestep in sortedKeys:\n taxiList = vtypeDictR[timestep]\n for tup in taxiList: # all elements in this timestep\n # calc timestep ->for every period /quota set a new day\n time = timestep + day\n time = calcTime.getDateFromDepart(time)\n # dataset+=1\n # print ouptut\n # veh_id date (time to simDate+time) x (remove and\n # set comma new)\n outputFile.write(str(getVehId(tup[0])) + '\\t' + time + '\\t' + tup[3][0:2] + '.' + tup[3][2:7] + tup[3][8:] +\n # y (remove and set comma new)\n # status speed form m/s in km/h\n '\\t' + tup[4][0:2] + '.' + tup[4][2:7] + tup[4][8:] + '\\t' + \"90\" + '\\t' + str(int(round(tup[2] * 3.6))) + '\\n')\n # print dataset, time\n print(vehId)\n outputFile.close()",
"def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")",
"def _write(self):\n # Reload\n with portalocker.Lock(self.filename, 'w') as fh:\n self.data.to_csv(fh, index=False)\n fh.flush()\n os.fsync(fh.fileno())",
"def write_ctl_file(self):\n # Make sure all paths are relative to the working directory\n try:\n self._set_rel_paths()\n except (AttributeError, ValueError) as error:\n raise error\n with open(self.ctl_file, 'w') as ctl_handle:\n ctl_handle.write(\"seqfile = {0}\\n\".format(self._rel_alignment))\n ctl_handle.write(\"outfile = {0}\\n\".format(self._rel_out_file))\n for option in self._options.items():\n if option[1] == None:\n # If an option has a value of None, there's no need\n # to write it in the control file; it's normally just\n # commented out.\n continue\n ctl_handle.write(\"{0} = {1}\\n\".format(option[0], \n option[1]))",
"def write(data):",
"def write_file(self, file_path, acc, dict_tags):\n logging.info('Escrevendo arquivo em {0}'.format(file_path))\n file_write = open(file_path, \"w\")\n file_write.write(\"Taxa de acerto geral: {0:.2f}%\\n\".format(np.mean(acc)*100))\n for key in dict_tags.keys():\n if dict_tags[key]['right'] > 0:\n file_write.write(\"Taxas de acerto para a classe '{0}': {1:.2f}% Total da classe '{0}': {2:.2f}%\\n\".format(key, \n (dict_tags[key]['pred']/dict_tags[key]['right'])*100, \n (dict_tags[key]['right']/dict_tags[key]['pres'])*100))\n else:\n file_write.write(\"Taxas de acerto para a classe '{0}': Nao presente no corpus de teste\\n\".format(key))\n\n file_write.close()",
"def write(self, fname):\n pass",
"def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()",
"def write( data ):",
"def write_to_file(self):\n name = datetime.today().date()\n with open(f'{name}.csv', 'w', newline='') as file_create:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_create, fieldnames=fieldnames)\n writer.writeheader()\n while datetime.today() < self.track_to:\n value_of_currency = PriceTracker.track_price()\n with open(f'{file_create.name}', 'a', newline='') as file_append:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_append, fieldnames=fieldnames)\n writer.writerow({'date': datetime.today().strftime(\"%H:%M:%S\"), 'value_in_pln': value_of_currency})\n\n self.check_min_value(tracked_price=value_of_currency)\n sleep(1)\n\n return self.generate_report(file_create.name)",
"def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)",
"def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")",
"def write(self, filename, data, hdr):\n pass",
"def write(cls, file, data):\n file.write(data)",
"def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')",
"def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()",
"def _write(fdesc, data):\n while data:\n count = os.write(fdesc, data)\n data = data[count:]",
"def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)",
"def write_tcv(self):\n suffix = '_'+str(self.shot)+'_'+str(int(self.t*1e3))\n self.write_input(suffix=suffix)",
"def write_data(data=None,\n data_op_file=None):\n\n crdata = np.copy(data)\n\n crdataf = str(len(crdata)) + '\\n' + '\\n'.join([' '.join(list(map(str, l))) for l in crdata])\n\n with open(data_op_file, 'w') as ndf:\n ndf.write(crdataf)\n ndf.close()",
"def write_tdm_to_file(pkt):\n\n global g_tdm_cnt\n global g_binfile\n\n if UDP in pkt:\n if pkt[UDP].dport == TDM_PORT:\n f = open(g_binfile, 'a+b')\n f.write(bytes(pkt[UDP].payload))\n f.close()\n g_tdm_cnt += 1\n print(\"\\rTDM Count: {0}. CTRL-C to quit\".format(g_tdm_cnt), end=\" \")",
"def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)",
"def _write(self, out_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write(self.setname.ljust(6).encode())\n out_file.write('{:12.5E}'.format(self.value).encode())\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write(self.text.ljust(20).encode())\n out_file.write('{:2d}'.format(self.ictype).encode())\n out_file.write('{:5d}'.format(self.numstep).encode())\n out_file.write(self.analys.ljust(10).encode())\n out_file.write('{:2d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n out_file.write(' '.encode()) # pad byte\n out_file.write('-4'.encode()) # key = -4\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(self.name.ljust(8).encode())\n if self.entities[0].ictype == 2 and self.ncomps == 3:\n out_file.write('{:5d}'.format(self.ncomps + 1).encode())\n else:\n out_file.write('{:5d}'.format(self.ncomps).encode())\n out_file.write('{:5d}'.format(self.irtype).encode())\n out_file.write('\\n'.encode()) # eol\n\n for entity in self.entities:\n out_file.write(' '.encode()) # pad byte\n out_file.write('-5'.encode())\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(entity.name.ljust(8).encode())\n out_file.write('{:5d}'.format(entity.menu).encode())\n out_file.write('{:5d}'.format(entity.ictype).encode())\n out_file.write('{:5d}'.format(entity.icind1).encode())\n if entity.ictype == 4:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n elif entity.ictype == 2 and entity is self.entities[-1]:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write(entity.icname.encode())\n else:\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write('\\n'.encode()) # eol\n\n for result in self.results:\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n for j in range(num_lines):\n if j == 0:\n out_file.write(' -1'.encode()) # pad byte and key = -1\n if self.format == 0:\n out_file.write(\n '{:5d}'.format(result.node).encode())\n else:\n out_file.write(\n '{:10d}'.format(result.node).encode())\n else:\n out_file.write(' -2'.encode()) # pad byte and key = -2\n out_file.write(' '*(5*(self.format+1)).encode())\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for k in range(k_start, k_end):\n out_file.write(\n '{:12.5E}'.format(result.data[k]).encode())\n out_file.write('\\n'.encode()) # eol\n else:\n out_file.write(struct.pack('i', result.node))\n out_file.write(struct.pack('f'*self.ncomps, *result.data))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)"
] | [
"0.62049794",
"0.59950626",
"0.5845376",
"0.58307445",
"0.5820113",
"0.5765106",
"0.573643",
"0.57359177",
"0.5724767",
"0.56903416",
"0.56730187",
"0.5641693",
"0.56183136",
"0.5617403",
"0.5613926",
"0.56085664",
"0.5590051",
"0.55600286",
"0.5557936",
"0.5546166",
"0.5534232",
"0.55188936",
"0.5514999",
"0.5504552",
"0.5492242",
"0.5491648",
"0.54593766",
"0.5445124",
"0.5445124",
"0.54337066"
] | 0.8004967 | 0 |
{{{ Docstrings Filters GO data based on the primary GO category. }}} | def filter_GO_dict(self, GO_dict, *GO_category):
dictionary = {}
for i in GO_category:
tmp_dict = {}
for k, v in GO_dict.iteritems():
tmp_value = filter(lambda x: i in x, v)
if tmp_value:
tmp_dict[k] = tmp_value
dictionary[i] = tmp_dict
# {{{ return empty
# uncomment this block of code if you would like script to
# print 'empty' in gene_ontology column if no GO hits in
# pertinent GO category
# else:
# dictionary[k] = 'empty'
# }}}
return(
dictionary['cellular_component'],
dictionary['biological_process'],
dictionary['molecular_function']
)
# }}} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n parser = ArgumentParser(\n description='Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n\n ann_file = open(args.json_file_path)\n category_names = [\"sports ball\", \"cell phone\", \"couch\", \"elephant\", \"tie\", \"spoon\", \"skis\", \"apple\", \"giraffe\", \"laptop\", \"tennis racket\", \"sink\", \"dog\", \"fork\", \"cat\", \"teddy bear\", \"train\", \"skateboard\", \"toilet\", \"sandwich\", \"bed\", \"keyboard\", \"baseball glove\", \"baseball bat\", \"airplane\", \"oven\", \"hot dog\", \"refrigerator\", \"frisbee\", \"mouse\", \"fire hydrant\", \"stop sign\", \"bear\", \"snowboard\", \"parking meter\", \"toothbrush\", \"microwave\", \"scissors\", \"hair drier\", \"toaster\"]\n\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n\n output = open(args.out_file, \"w\")\n json.dump(new_json, output)\n output.close()",
"def getCategory():",
"def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )",
"def category(self) -> str:\n return self._search_in_properties(ATTR_CATEGORY)",
"def category(self):\r\n return self._get('category', {})",
"def filter_category(category_list: List, category: str):\n for cat in category_list:\n if cat[\"name\"] == category:\n return cat\n return category_list.pop()",
"def __init__(self, category):\n self.category = category\n self.name = \"Filters.document.category('{}')\".format(self.category)",
"def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat",
"def item_filter(item):\n\tcch_geoserver_services = get_only_cch_geoserver_services(item['services'])\n\thas_cch_geoserver_services = 0 != len(cch_geoserver_services)\n\tis_data = 'data' == item['itemType']\n\treturn is_data and has_cch_geoserver_services;",
"def data_category(self):\n try:\n return self.attributes.workspace.attributes['library:dataCategory']['items']\n except Exception as e:\n self._logger.debug(f\"data_category {e}\")\n return None",
"def category(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])",
"def categories(self):\n pass",
"async def get_categories_for_filter_menu(language: str):\n try:\n category_filter_query_result = get_db().AQLQuery(\n query=menu_queries.QUERY_CATEGORIES_FOR_LANGUAGE,\n batchSize=500,\n bindVars={\"language\": language},\n )\n return {\"categoryitems\": category_filter_query_result.result}\n\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error",
"async def category(self,ctx):\n await ctx.send(\"Yes this is a category.\")",
"def get_category(self, obj):\n cat_lst = []\n for k, v in obj.items():\n cat_lst = cat_lst + list(v.keys())\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n cat_lst = cat_lst + list(in_v.keys())\n in_k, in_v = list(in_v.items())[-1]\n simpl_lst = [i for n, i in enumerate(cat_lst) if i not in cat_lst[:n]]\n res = []\n for cat in simpl_lst:\n if cat not in self._loop_name:\n re_outer = re.compile(r'([^A-Z ])([A-Z])')\n re_inner = re.compile(r'(?<!^)([A-Z])([^A-Z])')\n res.append(re_outer.sub(r'\\1 \\2', re_inner.sub(r' \\1\\2', cat)))\n self._category = res",
"def category(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"category\")",
"def with_category(self, category: str) -> list:\n return list(self.__holder.db_tags.filter(\n lambda t: t.category == category))",
"def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )",
"def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )",
"def filter_json_by_category(self, new_json_path):\n # {'supercategory': 'person', 'id': 1, 'name': 'person'}\n ### Filter images:\n print(\"Filtering the annotations ... \")\n json_parent = os.path.split(new_json_path)[0]\n os.makedirs(json_parent, exist_ok=True)\n imgs_ids = [x['id'] for x in self.images] # get img_ids of imgs with the category\n new_imgs = [x for x in self.coco.dataset['images'] if x['id'] in imgs_ids]\n catIds = self.catIds\n ### Filter annotations\n new_annots = [x for x in self.coco.dataset['annotations'] if x['category_id'] in catIds]\n ### Reorganize the ids\n new_imgs, annotations = self.modify_ids(new_imgs, new_annots)\n ### Filter categories\n new_categories = [x for x in self.coco.dataset['categories'] if x['id'] in catIds]\n print(\"new_categories: \", new_categories)\n data = {\n \"info\": self.coco.dataset['info'],\n \"licenses\": self.coco.dataset['licenses'],\n \"images\": new_imgs, \n \"annotations\": new_annots,\n \"categories\": new_categories \n }\n print(\"saving json: \")\n with open(new_json_path, 'w') as f:\n json.dump(data, f)",
"def search_category(self):\n return _(self.child_class)",
"def Subcategories():\n subcat = {\n \t\"Featured\": 0,\n \t\"All\": 1,\n \t\"Collectibles\": 2,\n \t\"Clothing\": 3,\n \t\"BodyParts\": 4,\n \t\"Gear\": 5,\n \t\"Models\": 6,\n \t\"Plugins\": 7,\n \t\"Decals\": 8,\n \t\"Hats\": 9,\n \t\"Faces\": 10,\n \t\"Packages\": 11,\n \t\"Shirts\": 12,\n \t\"Tshirts\": 13,\n \t\"Pants\": 14,\n \t\"Heads\": 15,\n \t\"Audio\": 16,\n \t\"RobloxCreated\": 17,\n \t\"Meshes\": 18,\n \t\"Accessories\": 19,\n \t\"HairAccessories\": 20,\n \t\"FaceAccessories\": 21,\n \t\"NeckAccessories\": 22,\n \t\"ShoulderAccessories\": 23,\n \t\"FrontAccessories\": 24,\n \t\"BackAccessories\": 25,\n \t\"WaistAccessories\": 26,\n \t\"AvatarAnimations\": 27,\n \t\"ClimbAnimations\": 28,\n \t\"FallAnimations\": 30,\n \t\"IdleAnimations\": 31,\n\t \"JumpAnimations\": 32,\n\t \"RunAnimations\": 33,\n \t\"SwimAnimations\": 34,\n \t\"WalkAnimations\": 35,\n \t\"AnimationPackage\": 36,\n \t\"Bundles\": 37,\n \t\"AnimationBundles\": 38,\n\t \"EmoteAnimations\": 39,\n\t \"CommunityCreations\": 40,\n\t \"Video\": 41,\n\t \"Recommended\": 51\n }\n return subcat",
"def category(self) -> str:\n return pulumi.get(self, \"category\")",
"def healthcare_filter(df_all): \n #get requested assets under healthcare tag \n df_filtered = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_filtered = df_filtered.append(df_all.loc[row]) #if so, save in df \n if '\"healthcare\"=>\"doctor\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"healthcare\"=>\"pharmacy\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'pharmacy'\n elif '\"healthcare\"=>\"hospital\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'hospital'\n elif '\"healthcare\"=>\"clinic\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'clinic'\n elif '\"healthcare\"=>\"dentist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'dentist'\n elif '\"healthcare\"=>\"physiotherapist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'physiotherapist'\n elif '\"healthcare\"=>\"alternative\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'alternative'\n elif '\"healthcare\"=>\"laboratory\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'laboratory'\n elif '\"healthcare\"=>\"optometrist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'optometrist'\n elif '\"healthcare\"=>\"rehabilitation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'rehabilitation'\n elif '\"healthcare\"=>\"blood_donation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'blood_donation'\n elif '\"healthcare\"=>\"birthing_center\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'birthing_center'\n else:\n df_filtered = df_filtered.drop(index=row)\n \n return df_filtered",
"async def wordfilter(self, ctx):\n pass",
"def findCategory( _file, _treeName, _conditioned = None ):\n\ttree = _file.Get(_treeName)\n\t#Name of the EDAnalyzer\n\tedmAnName = _treeName.split('/')[0]\n\t#Get MetaData \n\tmetadata = tree.GetUserInfo()[0].GetString().Data()\n\t# Isolating the EDAnalyzer with its parameters\n\tparameters = metadata[metadata.find(edmAnName+':'):]\n\tparameters = searchEndBlock( parameters )\n\t# Getting the categories\n\tcatStr = parameters[parameters.find('flags:'):]\n\tcatStr = searchEndBlock( catStr )\n\tcategories = []\n\ttriggerCat = []\n\tfor i in catStr.split('\\n'):\n\t\t# Pairing Triggers with the rest of categories \n\t\tif i.find('triggerObjectMatches') != -1:\n\t\t\ttriggerCat.append( i.split(':')[0].strip(' ') ) \n\t\telif i.find('string tracked') != -1 or i.find('InputTag tracked') != -1:\n\t\t\tcategories.append( i.split(':')[0].strip(' ') )\n\t# Checking if the conditional category is in the file\n\tif _conditioned:\n\t\tif _conditioned not in categories:\n\t\t\tmessage = \"\"\"\\033[1;33mWarning: The conditional category %s is not in the tree %s \\033[1;m\"\"\" % (_treeName,_conditioned)\n\t\t\t_conditioned = None\n\t\t\tprint message\n\t\telse:\n\t\t\tcategories.remove( _conditioned )\n\t\t\t#Adding the conditional category\t\t\t\n\t\t\tfor i in xrange(len(categories)):\n\t\t\t\tcategories[i] = _conditioned+':'+categories[i]\n\t#Add the trigger to build categories with to checks\n\tdeliverCat = None\n\tif len(triggerCat) == 0:\n\t\tif _conditioned:\n\t\t\tcategoriesSet = set(map( lambda x: x.split(':')[:-1][0], categories ))\n\t\t\tcategories = list(categoriesSet)\n\t\telse:\n\t\t\tcategories = [PLAIN]\n\t\tdeliverCat = categories\n\telse:\n\t\tdeliverCat = []\n\t\tfor cat in categories:\n\t\t\tdeliverCat.append( cat )\n\t\n\treturn deliverCat",
"async def search_by_product_or_category(\n conn, cursor, product: str = \"\", category: str = \"\"\n) -> List[str]:\n\n if (not product) and (not category):\n filter_term = \"\"\n elif product and category:\n filter_term = (\n f\"\\n WHERE product = '{product}' AND category = '{category}'\"\n )\n elif product:\n filter_term = f\"\\n WHERE product = '{product}'\"\n else:\n filter_term = f\"\\n WHERE category = '{category}'\"\n\n statement = f\"\"\"\n SELECT product.name as product,\n product.description as description,\n product.category as category,\n supplier_product.price as price,\n supplier_product.supplier as supplier,\n supplier_product.price as price,\n product.rating as product_rating,\n supplier.rating as supplier_rating,\n ROUND(((product.rating + supplier.rating)/2),2) as combined_rating,\n product.last_updated as last_updated \n FROM product \n INNER JOIN supplier_product\n ON product.name = supplier_product.product\n INNER JOIN supplier \n ON supplier_product.supplier = supplier.name {filter_term}\n ORDER BY (product.rating + supplier.rating) DESC\n \"\"\"\n await cursor.execute(statement)\n categories = await cursor.fetchall()\n return categories",
"def category(self) -> Optional[str]:\n return pulumi.get(self, \"category\")",
"def get_cats(bfo):\n primary_report_numbers = bfo.fields('037__')\n additional_report_numbers = bfo.fields('088__')\n report_numbers = primary_report_numbers\n report_numbers.extend(additional_report_numbers)\n\n cat = [num.get('c','') for num in report_numbers if num.get('9') == 'arXiv' or num.get('s')=='arXiv']\n\n return cat",
"def wordsByCategoryName(self, category):\n\t\ttry:\n\t\t\treturn (self.dictData[category])\n\t\texcept KeyError:\n\t\t\tprint (\"La categoría ingresada no existe.\")"
] | [
"0.5861926",
"0.5739233",
"0.57207024",
"0.56685543",
"0.54676676",
"0.5457863",
"0.53953564",
"0.53262365",
"0.52879685",
"0.52697474",
"0.5253551",
"0.52304476",
"0.5225333",
"0.5170825",
"0.5147459",
"0.51252097",
"0.51222163",
"0.5112287",
"0.5112287",
"0.5100528",
"0.5096094",
"0.5084755",
"0.5053048",
"0.5050306",
"0.5048407",
"0.5046382",
"0.50354755",
"0.5032299",
"0.50284827",
"0.50267553"
] | 0.68821824 | 0 |
Converts bytes to 4 x 4 array | def byte2array(bytes):
array = []
for i, byte in enumerate(bytes):
if i % 4 == 0:
array.append([byte])
else:
array[i // 4].append(byte)
return array | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bytes2matrix(text):\n return [list(text[i:i+4]) for i in range(0, len(text), 4)]",
"def to_nibble_array(arr: ndarray) -> ndarray:\n arr = arr.ravel()\n return (arr[::2] + (arr[1::2] << 4)).astype(\"uint8\")",
"def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...",
"def image_decoder(rawbytes):\n img = Image.open(BytesIO(rawbytes))\n array = np.asarray(img, dtype=np.uint8)\n return array",
"def decode_u8_array(as_bytes: typing.List[int]) -> typing.List[int]:\n raise NotImplementedError()",
"def matrix4_to_3x4_array(mat):\r\n return tuple(f for v in mat[0:3] for f in v)",
"def bytes_to_np(img: bytes) -> np.ndarray:\n im = Image.open(BytesIO(img))\n im = im.convert(\"RGB\")\n return np.array(im)",
"def _packbits(self, tile: bytes) -> np.ndarray:\n decoded = self._reshape(\n np.frombuffer(imagecodecs.packbits_decode(tile), self.dtype)\n )\n return np.rollaxis(decoded, 2, 0)",
"def get_blobs(self): # CHECK\n x=self.send_packet_check_response_without_retry('\\x90')\n n=len(x)/4 # CHECK? n = len(x)//4\n z=struct.unpack('<'+'I'*n,x)\n unpack=lambda i: tuple(i>>offset & (1<<length)-1 for offset,length in [(0,11),(11,11),(22,2),(24,8)])\n return z[0],[unpack(i) for i in z[1:]]",
"def ascii_to_numpy(ascii_diagram, as_bytes=True):\n ascii_diagram = [list(i) for i in ascii_diagram]\n ascii_diagram = np.array(ascii_diagram)\n v_to_bytes = np.vectorize(to_bytes)\n return v_to_bytes(ascii_diagram) if as_bytes else ascii_diagram",
"def decode_frame(self, buf):\n import numpy as np\n from cv2 import cvtColor\n\n w, h = self._resolution\n arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))\n arr = cvtColor(arr, 93) # NV21 -> BGR\n return arr",
"def bs_to_array(bs):\n return np.array([int(x) for x in bs], dtype=\"int8\")",
"def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data",
"def from_nibble_array(arr: ndarray) -> ndarray:\n shape = arr.size\n\n new_arr = zeros((shape * 2), dtype=uint8)\n\n new_arr[::2] = arr & 0xF\n new_arr[1::2] = arr >> 4\n\n return new_arr",
"def hex_str_to_bytes_arr(bytes_str: str) -> List[int]:\n return eval(f\"[{bytes_str}]\")",
"def _chunkify(arr, size):\n arrs = []\n for i in range(0, len(arr), size):\n chunk = bytearray(arr[i:i + size])\n arrs.append(chunk)\n return arrs",
"def decompose_byte(data: str, nibble: bool = False) -> list:\n _bytes = int(len(sanatize_hex(data)) / 2)\n mem_size = 8\n if nibble:\n mem_size = 4\n binary_data = format(int(str(data), 16), f\"0{_bytes*8}b\")\n return [\n format(int(binary_data[mem_size * x : mem_size * (x + 1)], 2), f\"#0{int(mem_size/2)}x\")\n for x in range(0, int(len(binary_data) / mem_size))\n ]",
"def convertData(img):\n dataset = []\n for i in img:\n dataset.append(format(ord(i), '08b'))\n return dataset",
"def blob2image(s):\n d0 = ord(s[0])\n d1 = ord(s[1])\n assert len(s)==d0*d1+2,(len(s),d0,d1)\n return numpy.frombuffer(s[2:],dtype='B').reshape(d0,d1)",
"def idx_to_list(bytez):\n # byte 0: 0\n # byte 1: 0\n if not (bytez[0] == 0 and bytez[1] == 0):\n raise IOError(\"IDX file should start with two 0 bytes\")\n\n # byte 2: The number of dimensions\n # byte 3: The type code\n typebyte = bytez[2]\n numdims = bytez[3]\n\n # 4 bytes for each dimension: size of dimensions\n fmtstring = '>' + 'i'*numdims\n dimension_sizes = struct.unpack(fmtstring, bytez[4:4+4*numdims])\n\n # Rest of the data starts here\n startoffset = 4 + 4*numdims\n\n typedata = {\n 0x08: ('B', 1),\n 0x09: ('b', 1),\n 0x0B: ('h', 2),\n 0x0C: ('i', 4),\n 0x0D: ('f', 4),\n 0x0E: ('d', 8)\n }\n\n typecode = typedata[typebyte][0]\n flatarray = array.array(typecode, bytez[startoffset:])\n if sys.byteorder == 'little':\n flatarray.byteswap()\n\n if flatarray.itemsize != typedata[typebyte][1]:\n raise EnvironmentError(\"It's assumed a C int is 4 bytes\")\n\n def _recursive(inputlst, dimsizes):\n \"\"\"Recursively split the flat list into chunks and merge them back into a\n nested list structure.\"\"\"\n if len(dimsizes) == 1:\n return list(inputlst)\n\n outerlist = []\n chunksize = len(inputlst)//dimsizes[0]\n for i in range(0, len(inputlst), chunksize):\n chunk = inputlst[i:i+chunksize]\n outerlist.append(_recursive(chunk, dimsizes[1:]))\n\n return outerlist\n\n return _recursive(flatarray, dimension_sizes)",
"def fig2array(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n buf.shape = (w, h, 3)\n return buf",
"def bytes2words(data, length=8):\n return(np.fromstring(data, dtype=np.uint64))",
"def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())",
"def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())",
"def bytes_as_char_array(b):\n return \"{ \" + \", \".join(\"0x%02x\" % x for x in b) + \" }\"",
"def levels_to_data(levels):\n\n b4_conv_fact = [1, 4, 16, 64]\n levels = levels.reshape(levels.size / 4, 4)\n data = np.array(np.dot(levels, b4_conv_fact), dtype = np.uint8)\n\n return data",
"def bitarray_to_data(bits):\n return np.fromstring(bits,dtype = np.int32)",
"def read32(bytestream):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)[0]",
"def read32(bytestream):\n dt = np.dtype(np.uint32).newbyteorder('>')\n return np.frombuffer(bytestream.read(4), dtype=dt)[0]",
"def bytes_list_to_array(bytes_list):\r\n digital_bytes = []\r\n for elem in bytes_list:\r\n if isinstance(elem, int):\r\n digital_bytes.append(elem.to_bytes(1, 'little'))\r\n elif isinstance(elem, str):\r\n digital_bytes.append(ord(elem).to_bytes(1, 'little'))\r\n digital_bytes_array = b''.join(digital_bytes)\r\n return digital_bytes_array"
] | [
"0.69049335",
"0.6487146",
"0.6353773",
"0.63418293",
"0.6140696",
"0.61321247",
"0.61064464",
"0.6101885",
"0.6076199",
"0.6047178",
"0.60198945",
"0.59871984",
"0.59552336",
"0.59167445",
"0.5873622",
"0.5866856",
"0.58424544",
"0.57909036",
"0.5773045",
"0.57477385",
"0.57368064",
"0.5734935",
"0.5732678",
"0.5732678",
"0.571382",
"0.5712514",
"0.56906617",
"0.5634966",
"0.5634966",
"0.5634432"
] | 0.7883482 | 0 |
Converts 4 x 4 array to hex string | def array2hex(array):
hexstr = ""
for i in range(4):
hexstr += ''.join('{:02x}'.format(x) for x in array[i])
return hexstr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bytes_arr_to_hex_str(bytes_arr: List[int]) -> str:\n return \", \".join(\"0x%02x\" % b for b in bytes_arr)",
"def to_hex_str(dec_array):\r\n\r\n as_hex = [f'{i:02x}' for i in dec_array]\r\n\r\n return ''.join(as_hex)",
"def bit_array_to_string(array: Iterable) -> str:\n\n res = ''.join(\n [chr(int(y, 2)) for y in [''.join([str(x) for x in _bytes])\n for _bytes in Des.n_split(array, 8)]])\n return res",
"def render_list_as_hex(self, data):\n s = '[ '\n for c in data:\n s += '%02x ' % c\n s += ']'\n return s",
"def transfer_2d_array_to_str(array):\n str_list = []\n for r in array:\n str_list.append(\",\".join([str(e) for e in r]))\n return \" \".join(str_list)",
"def rgb_hexify(rgb: Iterable[int]) -> str:\n return ''.join(\n list(map(\n lambda x: hex(abs(x))[2:].zfill(2),\n rgb\n ))[::-1]\n )",
"def matrix_to_text(matrix):\n return ''.join(['{:02x}{:02x}{:02x}{:02x}'.format(\n matrix[0][c], matrix[1][c], matrix[2][c], matrix[3][c]) for c in range(4)])",
"def str_blocks_hex(buf):\n return ' '.join(['%s%s%s%s%02X'%(\n '' if i == 0 or i%32 != 0 else '\\n', # add a line-feed every 8 blocks\n '' if i == 0 or i%128 != 0 else '\\n',# add an extra line-feed every 4 lines\n '' if i%4 != 0 else ' ', # add an extra space every 4 values\n '' if i%16 != 0 else ' ', # add another extra space every block of 4\n ord(x)) for i, x in enumerate(buf)])",
"def to_hex6_string(self):\n def c(x):\n return int(x * 255.0)\n return '#{:02x}{:02x}{:02x}'.format(c(self.r), c(self.g), c(self.b))",
"def bit_array_to_string(array):\n result_string = ''.join(\n [chr(int(i, 2)) for i in\n [''.join([str(x) for x in s_bytes])\n for s_bytes in split_into_n(array, 8)]]\n )\n return result_string",
"def uInt32HexListStr(uInt32List):\n \n outputStr = \"\"\n for value in uInt32List:\n outputStr += \"\\n\\t\" + uInt32HexStr(value)\n outputStr += \"\\n\"\n return outputStr",
"def rgb_hex_str(self, x):\n return \"#%02x%02x%02x\" % self.rgb_bytes_tuple(x)",
"def hex_list(self):\r\n return [''.join(['{:02X}'.format(b) for b in data]) for data in self.buffers()]",
"def toHex(self):\r\n rgb = self.toRGB()\r\n return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],\r\n hex(rgb[2])[2:])).replace(' ', '0')",
"def float_array_string(arr: Iterable[float]) -> str:\n return \"[\" + \", \".join([\"{:.4f}\".format(el) for el in arr]) + \"]\"",
"def _rgb_color_list_to_hex(color_list):\n color_list_rgb = [[int(x * 255) for x in c[0:3]] for c in color_list]\n color_list_hex = [f\"#{rgb[0]:02X}{rgb[1]:02X}{rgb[2]:02X}\" for rgb in color_list_rgb]\n return color_list_hex",
"def to_nibble_array(arr: ndarray) -> ndarray:\n arr = arr.ravel()\n return (arr[::2] + (arr[1::2] << 4)).astype(\"uint8\")",
"def rgba_hex_str(self, x):\n return \"#%02x%02x%02x%02x\" % self.rgba_bytes_tuple(x)",
"def hex_digest(x):\r\n\r\n global hashlib\r\n if hashlib is None:\r\n try:\r\n import hashlib\r\n except ImportError:\r\n raise RuntimeError(\"Can't run hex_digest because hashlib is not available.\")\r\n assert isinstance(x, np.ndarray)\r\n rval = hashlib.md5(x.tostring()).hexdigest()\r\n # hex digest must be annotated with strides to avoid collisions\r\n # because the buffer interface only exposes the raw data, not\r\n # any info about the semantics of how that data should be arranged\r\n # into a tensor\r\n rval = rval + '|strides=[' + ','.join(str(stride) for stride in x.strides) + ']'\r\n rval = rval + '|shape=[' + ','.join(str(s) for s in x.shape) + ']'\r\n return rval",
"def w__format_hex(self, string):\n d = map(None, string)\n d = map(ord, d)\n d = map(lambda x: \"%02x\" % x, d)\n return ' '.join(d)",
"def toHex(self):\n \n t=self.boolVals[:]\n t.reverse()\n \n string=str(self)\n \n \n string=hex(int(string,2))\n string=string[2:]\n\n d=ceil(self.n/4)-len(string)\n string=d*\"0\"+string\n return string",
"def hexify(buffer):\n return ''.join('%02x' % ord(c) for c in buffer)",
"def ByteToHex( bins ):\r\n\r\n return ''.join( [ \"%02X\" % x for x in bins ] ).strip()",
"def test_bytes_to_pretty_hex():\n data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n expected = (\n \"0000 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 \"\n \"|................|\\n\"\n )\n\n result = cmds._bytes_to_pretty_hex(data=data)\n\n assert expected == result",
"def to_hex(x):\n if isinstance(x, bytearray):\n return x.decode('hex')\n elif isinstance(x, basestring):\n return base64.b16encode(x)\n elif isinstance(x, (list, tuple)):\n return bytearray(x).decode('hex')\n else:\n raise ValueError('Unknown input argument type')",
"def colors_to_string(colors):\n return ''.join(['%02x%02x%02x' % (r,g,b) for r,g,b in colors])",
"def int2hex(n: int) -> str:",
"def hex_dump(data):\n for i in xrange(len(data)):\n print(\"0x%02x\" % ord(data[i]))\n if i != 0 and not (i+1) % 8:\n print(\"\")\n else:\n print(\" \")\n print",
"def hexdump(data, columns=4, blocksize=4):\n\tblocks = splitevery(data, blocksize)\n\n\t# calculate number of rows given columns\n\trow_count,remain = divmod(len(blocks), columns)\n\tif remain > 0:\n\t\trow_count += 1\n\n\trows = []\n\t# row length includes 2 chars for hex and 1 for spaces\n\trowlen = columns*(2*blocksize+1) \n\t# printable chars, in this context, dont include whitespace\n\tprintable = string.digits + string.letters + string.punctuation \n\n\tfor i in range(0, row_count):\n\t\tstart = i*columns\n\t\tascii_string = ''\n\t\trow = ''\n\t\t# add the hex\n\t\tfor block in blocks[start:start+columns]:\n\t\t\trow += block.encode('hex') + ' '\n\t\t\tascii_string += ''.join([x if x in printable else ' ' for x in block])\n\t\t# pad last row with spaces so ascii strings align\n\t\trows.append(row.ljust(rowlen) + ascii_string)\n\n\treturn '\\n'.join(rows)",
"def _encodeArray(self, array):\n\n # Actually, we want dtype,naxis,axNlen,base64(array)\n return base64.b64encode(array.tostring())"
] | [
"0.7119604",
"0.65960336",
"0.64228106",
"0.6327956",
"0.6200571",
"0.61711365",
"0.61589646",
"0.614864",
"0.6056005",
"0.59375083",
"0.5930569",
"0.5924319",
"0.59048486",
"0.5874973",
"0.5842972",
"0.58335584",
"0.5831089",
"0.5827387",
"0.57935375",
"0.57718426",
"0.5771126",
"0.5699744",
"0.5686766",
"0.5684178",
"0.5679652",
"0.5675958",
"0.56702137",
"0.566571",
"0.56574976",
"0.56564945"
] | 0.77297497 | 0 |
Returns key schedule of 44 words | def getKeySchedule(key):
temp_keys = 44 * [None]
key_schedule = byte2array(key)
for i in range(len(key_schedule)):
if i%4==0:
temp = key_schedule[i]
for j in range(0,len(temp_keys),4):
temp_keys[j] = temp
temp = [temp[-1]] + temp[:3]
if i%4==1:
temp = key_schedule[i]
for j in range(1,len(temp_keys),4):
temp_keys[j] = temp
temp = [temp[-1]] + temp[:3]
if i%4==2:
temp = key_schedule[i]
for j in range(2,len(temp_keys),4):
temp_keys[j] = temp
temp = [temp[-1]] + temp[:3]
if i%4==3:
temp = key_schedule[i]
for j in range(3,len(temp_keys),4):
temp_keys[j] = temp
temp = [temp[-1]] + temp[:3]
key_schedule = temp_keys
return key_schedule | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encrypt(plaintext, key_schedule):\n state_array = byte2array(plaintext)\n round_0 = []\n round_0.extend([key_schedule[0],key_schedule[1],key_schedule[2],key_schedule[3]])\n ADD_ROUND_KEY(state_array,round_0)\n count = 0\n temp = []\n temp_key_sched = []\n\n for i in range(4,44): \n temp.append(key_schedule[i])\n count += 1 \n\n if count % 4 == 0 :\n temp_key_sched.append(temp)\n count = 0\n temp = []\n words = temp_key_sched\n\n\n\n for i in words:\n a = SUBSTITUTE_BYTES(state_array)\n b = SHIFT_ROWS(a)\n state_array = ADD_ROUND_KEY(b,i)\n\n # Code here\n\n return array2hex(state_array)",
"def getBusSchedule():\n isFirstWord = True\n inFile = fileinput.input(files=('busRoutes.txt'))\n strWord = \"\"\n index = 0\n dictStops = {}\n strStops = []\n\n for line in inFile:\n isFirstWord = True\n index = 0\n for strWord in line.split():\n #skip the Bus number\n if isFirstWord:\n isFirstWord = False\n continue\n\n #skip the colons\n if strWord == \":\":\n continue\n\n if inFile.isfirstline():\n lstTimes = []\n dictStops[strWord] = lstTimes;\n strStops.append(strWord);\n else:\n lstTimes = dictStops[strStops[index]]\n lstTimes.append(strWord)\n dictStops[strStops[index]] = lstTimes;\n index+=1\n #for word in line\n #for line in file\n return dictStops;",
"def schedule_text():",
"def expand_key(master_key):\n #s_box = bytes2matrix(s_box1)\n # Round constants https://en.wikipedia.org/wiki/AES_key_schedule#Round_constants\n r_con = (\n 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,\n 0x80, 0x1B, 0x36, 0x6C, 0xD8, 0xAB, 0x4D, 0x9A,\n 0x2F, 0x5E, 0xBC, 0x63, 0xC6, 0x97, 0x35, 0x6A,\n 0xD4, 0xB3, 0x7D, 0xFA, 0xEF, 0xC5, 0x91, 0x39,\n )\n\n # Initialize round keys with raw key material.\n key_columns = bytes2matrix(master_key, 4)\n #print(key_columns)\n iteration_size = len(master_key) // 4\n\n\n # Each iteration has exactly as many columns as the key material.\n columns_per_iteration = len(key_columns)\n i = 1\n while len(key_columns) < (N_ROUNDS + 1) * 4:\n # Copy previous word.\n word = list(key_columns[-1])\n\n # Perform schedule_core once every \"row\".\n if len(key_columns) % iteration_size == 0:\n # Circular shift.\n word.append(word.pop(0))\n # Map to S-BOX.\n word = [s_box[b-1] for b in word]\n\n # XOR with first byte of R-CON, since the others bytes of R-CON are 0.\n word[0] ^= r_con[i]\n i += 1\n elif len(master_key) == 32 and len(key_columns) % iteration_size == 4:\n # Run word through S-box in the fourth iteration when using a\n # 256-bit key.\n word = [s_box[b] for b in word]\n\n # XOR with equivalent word from previous iteration.\n word = bytes(i^j for i, j in zip(word, key_columns[-iteration_size]))\n key_columns.append(word)\n\n # Group key words in 4x4 byte matrices.\n return [key_columns[4*i : 4*(i+1)] for i in range(len(key_columns) // 4)]",
"def KSA(key):\n key_length = len(key)\n tab = list(range(MOD))\n j = 0\n for i in range(MOD):\n j = (j + tab[i] + key[i % key_length]) % MOD\n tab[i], tab[j] = tab[j], tab[i]\n return tab",
"def create_empty_schedule():\n\n\t# create empty dictionary with all room-timelock combinations (roomlocks) as keys\n\troomlocks = list(range(0, 140))\n\tschedule = dict.fromkeys(roomlocks)\n\n\treturn schedule",
"def get_schedule_string(self):\n schedule = \"\"\n for entry in self.entries:\n schedule += entry.get_entry_string()\n\n return schedule",
"def timeInWords(hour, minutes):\n num_to_word = {\n 1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\",\n 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"ten\",\n 11: \"eleven\", 12: \"twelve\", 13: \"thirteen\", 14: \"fourteen\",\n 15: \"quarter\", 16: \"sixteen\", 17: \"seventeen\",\n 18: \"eighteen\", 19: \"nineteen\", 20: \"twenty\", 30: \"half\",\n 40: \"forty\", 45: \"quarter\", 50: \"fifty\"\n }\n\n if minutes == 0:\n return (f\"{num_to_word[hour]} o' clock\")\n elif 1 <= minutes <= 30:\n if minutes == 15 or minutes == 30:\n return (f\"{num_to_word[minutes]} past {num_to_word[hour]}\")\n if 21 <= minutes <= 29:\n last_digit = minutes - 20\n last_digit = num_to_word[last_digit]\n return (f\"{num_to_word[20]} {last_digit} minutes past {num_to_word[hour]}\")\n else:\n return (f\"{num_to_word[minutes]} minutes past {num_to_word[hour]}\")\n else:\n if minutes == 45:\n return (f\"{num_to_word[60-minutes]} to {num_to_word[hour+1]}\")\n if 31 <= minutes <= 39:\n remainder_20 = 60 - minutes\n last_digit = remainder_20 - 20\n last_digit = num_to_word[last_digit]\n return (f\"{num_to_word[20]} {last_digit} minutes to {num_to_word[hour+1]}\")\n else:\n return (f\"{num_to_word[60-minutes]} minutes to {num_to_word[hour+1]}\")",
"def long_training_sequence():\n\n symbol = np.fft.ifft(long_training_symbol())\n full_long_time = np.concatenate([symbol[32:], symbol, symbol]) # two symbols plus 32 samples of GI\n return full_long_time.tolist()",
"def _TKK():\n return [406604, 1836941114]",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def calculate_cipher(self, sub_keys, text):\n X = text\n K = sub_keys\n\n step = ['0'] * 14\n for i in range(0, ROUNDS - 1):\n step[0] = (self.mul(X[0], int(K[i][0], 2)))\n step[1] = (self.add(X[1], int(K[i][1], 2)))\n step[2] = (self.add(X[2], int(K[i][2], 2)))\n step[3] = (self.mul(X[3], int(K[i][3], 2)))\n step[4] = (self.xor(step[0], step[2]))\n step[5] = (self.xor(step[1], step[3]))\n step[6] = (self.mul(step[4], int(K[i][4], 2)))\n step[7] = (self.add(step[5], step[6]))\n step[8] = (self.mul(step[7], int(K[i][5], 2)))\n step[9] = (self.add(step[6], step[8]))\n step[10] = (self.xor(step[0], step[8]))\n step[11] = (self.xor(step[2], step[8]))\n step[12] = (self.xor(step[1], step[9]))\n step[13] = (self.xor(step[3], step[9]))\n # [print(\"Step \"+str(y)+\": \"+str(hex(int(step[y]))) + \"({0})\".format(int(step[y]))) for y in range(14)]\n\n if self.log:\n print(\"Round [\" + str(i + 1) + \"] HEX input \" + ' '.join([str(hex(int(x))) for x in X]))\n print(\"Round [\" + str(i + 1) + \"] HEX sub-key \" + ' '.join([str(hex(int(k, 2))) for k in K[i]]))\n X = [step[10], step[11], step[12], step[13]] # Swap step 12 and 13\n if self.log:\n print(\"Round [\" + str(i + 1) + \"] HEX output \" + ' '.join(\n [str(hex(int(x))) for x in X]) + \"\\n---------------\")\n\n \"\"\"X1 * K1\n X2 + K2\n X3 + K3\n X4 * K4\"\"\"\n X = [step[10], step[12], step[11], step[13]]\n result = [self.mul(X[0], int(K[ROUNDS - 1][0], 2)), self.add(X[1], int(K[ROUNDS - 1][1], 2)),\n self.add(X[2], int(K[ROUNDS - 1][2], 2)), self.mul(X[3], int(K[ROUNDS - 1][3], 2))]\n\n temp = [str(hex(int(x)))[2:] for x in result]\n temp = ['0' * (4 - len(x)) + x for x in temp]\n cipher = ''.join([x for x in temp])\n\n if self.log:\n print(\"Round [\" + str(ROUNDS - 0.5) + \"] HEX input \" + ' '.join([str(hex(int(x))) for x in X]))\n print(\"Round [\" + str(ROUNDS - 0.5) + \"] HEX sub-key \" + ' '.join(\n [str(hex(int(k, 2))) for k in K[ROUNDS - 1]]))\n print(\"Round [\" + str(ROUNDS - 0.5) + \"] HEX output \" + ' '.join([str(hex(int(x))) for x in result])\n + \"\\n---------------\")\n print(\"Final Cipher/Decipher: \" + cipher + \"\\n---------------\")\n\n return cipher # Hex string",
"def generate_hks():\n return ''.join((random.choice(string.ascii_letters) for x in range(26)))",
"def rc4_ksa(key_bits: Bits):\n key = bytearray(key_bits.tobytes())\n w = 256\n r = list(range(w))\n keylength = len(key)\n\n j = 0\n for i in range(w):\n j = (j + r[i] + key[i % keylength]) % w\n r[i], r[j] = r[j], r[i]\n\n return r",
"def keyGen(key):\n def leftShift(keyBitList):\n \"\"\"Perform a circular left shift on the first and second five bits\"\"\"\n shiftedKey = [None] * KeyLength\n shiftedKey[0:9] = keyBitList[1:10]\n shiftedKey[4] = keyBitList[0]\n shiftedKey[9] = keyBitList[5]\n return shiftedKey\n\n # Converts input key (integer) into a list of binary digits\n keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]\n permKeyList = [None] * KeyLength\n for index, elem in enumerate(P10table):\n permKeyList[index] = keyList[elem - 1]\n shiftedOnceKey = leftShift(permKeyList)\n shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))\n subKey1 = subKey2 = 0\n for index, elem in enumerate(P8table):\n subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]\n subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]\n return (subKey1, subKey2)",
"def find_best_shifts_rec(wordlist, text, start):\n ### TODO.\n list_of_best_shifts = []\n decoded_text = text[:start]\n encoded_text = text[start:]\n best_shift = (start, find_best_shift_for_next_word(wordlist, encoded_text))\n # print \"best_shift =\", best_shift\n list_of_best_shifts.append(best_shift)\n # print \"list_of_best_shifts =\", list_of_best_shifts\n new_decoded_text = apply_shift(encoded_text,best_shift[1])\n point = new_decoded_text.find(\" \")\n # print \"point =\",point\n if start > len(text):\n return list_of_best_shifts\n else:\n decoded_text = decoded_text + new_decoded_text[:point]\n encoded_text = new_decoded_text[point:]\n text = decoded_text + encoded_text\n # print \"(decoded_text =\", decoded_text + \") (encoded_text=\", encoded_text + \")\"\n # print \"text =\", text\n start = len(decoded_text) + 1\n # print \"start =\", start\n real_words = ()\n possible_words = []\n list_of_best_shifts.extend(find_best_shifts_rec(wordlist, text, start))\n return list_of_best_shifts\n # if point >= 0:\n # decoded_text = decoded_text + new_decoded_text[:point]\n # encoded_text = new_decoded_text[point:]\n # text = decoded_text + encoded_text\n # # print \"(decoded_text =\", decoded_text + \") (encoded_text=\", encoded_text + \")\"\n # # print \"text =\", text\n # start = len(decoded_text) + 1\n # # print \"start =\", start\n # real_words = ()\n # possible_words = []\n # list_of_best_shifts.extend(find_best_shifts_rec(wordlist, text, start))\n # return list_of_best_shifts\n # else:\n # # print \"We are now in the last layer\"\n # # print \"best_shift = \", best_shift\n # # print \"Type of best_shift =\", type(best_shift)\n # return list_of_best_shifts",
"def tower(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"255D\"+self.ESC+\"0;1;44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"58C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"68C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"30m\"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"0;44m\"+self.A219+\" \"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"1m\"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"52C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"43m\"+self.A223+self.A223+self.ESC+\"0;43m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"40C\"+self.ESC+\"43m\"+self.ESC+\"0;34;43m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"30m\"+self.A223+self.A223+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"1;36;40mYou reach th\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"42Ce castle and fight your way up the\"+self.ESC+\"C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A178+self.A219+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"30C\"+self.ESC+\"43m\"+self.ESC+\"1;36;40mtower!\"+self.ESC+\"41C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A176+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A219+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"24C\"+self.ESC+\"44m\"+self.ESC+\"40m\"+self.A221+self.ESC+\"44m\"+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"48C\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A178+self.A177+self.A177+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A177+self.A177+self.A177+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A219+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"28C\"+self.ESC+\"44m\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"1;36;40mYou blindly slash at any and all who\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"66C stand in \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A177+self.A176+self.A177+self.A176+self.A178+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A176+self.A177+self.A177+self.A219+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"30C\"+self.ESC+\"43m\"+self.ESC+\"1;36;40myour path - screaming to find the prisoner \"+self.ESC+\"4C\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"77C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A176+self.A177+self.A177+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A177+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"28C\"+self.ESC+\"44m\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"C\"+self.ESC+\"1;36;40myou seek - and the jailer of your he\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"66Cart.\"+self.ESC+\"7C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A176+self.A176+self.A177+self.ESC+\"33m\"+self.A176+self.ESC+\"32m\"+self.A177+self.A176+self.A177+self.ESC+\"33m\"+self.A178+self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A178+self.A176+self.A177+self.A176+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"23C\"+self.ESC+\"44m\"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"48C\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"32m\"+self.A176+self.A177+self.A177+self.A177+self.A176+self.A178+self.A177+self.ESC+\"33m\"+self.A176+self.A177+self.A178+self.ESC+\"32m\"+self.A177+self.A177+self.A178+self.A176+self.A178+self.A176+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"43m\"+self.A220+self.A220+self.ESC+\"0;43m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"34C\"+self.ESC+\"43m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.ESC+\"0;34;43m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"76C\"+self.ESC+\"43m\"+self.ESC+\"30m\"+self.A220+self.A220+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A177+self.A176+self.A177+self.A176+self.A176+self.ESC+\"33m\"+self.A176+self.A177+self.A178+self.A177+self.A177+self.ESC+\"32m\"+self.A177+self.A178+self.A177+self.A177+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A222+self.ESC+\"44m\"+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"28C\"+self.ESC+\"44m\"+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A221+self.A219+self.ESC+\"1;44m\"+self.A219+self.ESC+\"40m\"+self.A221+self.ESC+\"44m\"+self.A219+self.A219+\" \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"40m\"+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"44C\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"43m\"+self.A219+self.ESC+\"34m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"33;40m\"+self.A176+self.A177+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"37C\"+self.ESC+\"44m\"+self.A219+\" \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"1;37;40mYou make it to the top and \\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"74C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A177+self.A176+self.A178+self.ESC+\"44m \"+self.ESC+\"1;30m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+\" \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"40C\"+self.ESC+\"44m \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"4C\"+self.ESC+\"1;37;40mthrow open the door...\"+self.ESC+\"6C\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"74C\"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"40m\"+self.A176+self.A178+self.A177+self.A178+self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A177+self.ESC+\"1;42m\"+self.A178+self.A219+self.ESC+\"30;44m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"31C\"+self.ESC+\"44m\"+self.ESC+\"0;44m\"+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+\" \"+self.ESC+\"0;33;43m\"+self.A219+self.ESC+\"1;32;42m\"+self.A177+self.A178+self.A178+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"53C\"+self.ESC+\"32m\"+self.A178+self.ESC+\"1;42m\"+self.A219+self.A219+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"44m \"+self.ESC+\"32;40m\"+self.A176+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A219+self.A219+self.ESC+\"0;33m\"+self.A176+self.A177+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"20C\"+self.ESC+\"44m\"+self.ESC+\"1;32;42m\"+self.A178+self.A219+self.ESC+\"0;32m\"+self.A176+self.A178+self.ESC+\"1;42m\"+self.A219+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;30;44m\"+self.A219+self.A219+self.A219+self.ESC+\"0;44m\"+self.A219+self.A219+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"32C\"+self.ESC+\"44m\"+self.ESC+\"1m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+\" \"+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;42m\"+self.A178+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"46C\"+self.ESC+\"42m\"+self.ESC+\"44m \"+self.ESC+\"0;32m\"+self.A177+self.A178+self.A176+self.ESC+\"1;42m\"+self.A178+self.ESC+\"0;32m\"+self.A176+self.A178+self.ESC+\"1;42m\"+self.A219+self.A178+self.A178+self.ESC+\"44m \"+self.ESC+\"42m\"+self.A176+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"63C\"+self.ESC+\"42m\"+self.A178+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"42m \"+self.ESC+\"0;32m\"+self.A177+self.A176+self.ESC+\"1;42m\"+self.A178+self.A176+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A178+self.A219+self.ESC+\"0;32m\"+self.A178+self.A219+self.ESC+\"42m \"+self.ESC+\"40m\"+self.A177+self.A177+self.A176+self.A176+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"21C\"+self.ESC+\"1;42m\"+self.A178+self.A176+self.ESC+\"30;40m\"+self.A219+self.ESC+\"0;32m\"+self.A177+self.A177+self.A178+self.ESC+\"1;42m\"+self.A219+self.ESC+\"30;40m\"+self.A219+self.A219+self.ESC+\"0m\"+self.A219+self.A219+self.A219+self.ESC+\"1m\"+self.A219+self.A219+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"36C\"+self.ESC+\"0;32m\"+self.A177+self.A178+self.A176+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A219+self.A178+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A219+self.A219+\" \"+self.A178+\" \"+self.ESC+\"0;32m\"+self.A177+self.A176+self.A176+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"52C\"+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.A176+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.A178+self.A176+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;42m\"+self.A176+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"63C\"+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"42m\"+self.A176+self.A176+self.ESC+\"0;32m\"+self.A177+self.A176+self.ESC+\"1;42m\"+self.A176+self.A176+self.A178+self.A219+self.A219+self.A178+self.A178+self.A176+self.A176+\" \"+self.A176+self.A176+self.ESC+\"0;32m\"+self.A177+self.A176+self.A177+self.A178+self.A176+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A176+self.ESC+\"1;42m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"26C\"+self.ESC+\"42m\"+self.ESC+\"0;32m\"+self.A178+self.ESC+\"1;42m\"+self.A219+self.ESC+\"0;32m\"+self.A177+self.A178+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.A176+self.ESC+\"0;32m\"+self.A177+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"36C\"+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A176+self.A177+self.A177+self.A177+self.ESC+\"1;42m\"+self.A219+self.A176+self.ESC+\"0;32m\"+self.A178+self.A178+self.A177+self.A176+self.A178+self.ESC+\"1;42m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"51C\"+self.ESC+\"42m\"+self.ESC+\"0;32m\"+self.A177+self.A176+self.A177+self.A178+self.A178+self.ESC+\"1;42m\"+self.A178+self.ESC+\"0;32m\"+self.A177+self.A178+self.ESC+\"1;42m\"+self.A177+self.A178+self.ESC+\"0;32m\"+self.A177+self.ESC+\"1;42m\"+self.A176+self.ESC+\"44m \"+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"64C\"+self.ESC+\"44m \"+self.ESC+\"37m<MORE> \"+self.ESC+\"40m\\r\\n\"\n\t\treturn thismsg",
"def buildCoder(shift):\n out_dic = {}\n lo = string.ascii_lowercase\n up = string.ascii_uppercase\n for i in lo:\n out_dic[i] = lo[(lo.index(i) + shift) % len(lo)]\n for i in up:\n out_dic[i] = up[(up.index(i) + shift) % len(up)]\n return out_dic",
"def gen_seskey(self):\n return ''.join([str(format(randint(0, 15), 'X')) for i in range(24)])",
"def word_starters():\n space = ''\n for k,v in key.items():\n if v==' ':\n space = k\n\n starters = Counter()\n for word in ciphertext.split( space ):\n first_two = word[0]+word[1]\n starters[first_two] += 1\n\n return starters",
"def buildCoder(shift):\n alphabet = string.ascii_lowercase \n alphabet2 = string.ascii_uppercase \n \n \n #Create our substitution dictionary \n dic={} \n dic2={}\n for i in range(0,len(alphabet)): \n dic[alphabet[i]]=alphabet[(i+shift)%len(alphabet)]\n dic2[alphabet2[i]]=alphabet2[(i+shift)%len(alphabet2)]\n \n dic.update(dic2)\n \n return dic",
"def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki",
"def schedule_paragraph():",
"def buildCoder(shift):\n result = {}\n import string\n lower = string.ascii_lowercase\n lower_shifted = lower[shift:]+lower[:shift]\n upper = string.ascii_uppercase\n upper_shifted = upper[shift:]+upper[:shift]\n for i in range(26):\n result[lower[i]] = lower_shifted[i]\n for i in range(26):\n result[upper[i]] = upper_shifted[i]\n return result",
"def make_text(chains):\n\n #current key is equal to a random key in the dictionary\n current_key = choice(chains.keys())\n #text is a string equal to the first index of the tuple and the second index of the tuple\n text = current_key[0] + \" \" + current_key[1]\n #create a loop that will repeat until the function reaches the last bi-gram\n while True:\n #created a variable for a random value of the current key\n chosen_word = choice(chains[current_key])\n #equivalent to: if chosen_word is None\n if not chosen_word:\n break\n # if it's not the last key add the chosen_word to the text to string\n text = text + \" \" + chosen_word\n # select new key\n current_key = (current_key[1], chosen_word)\n \n return text",
"async def key(ctx, note:str, length=7):\n if length < 1:\n length = 1\n elif length > 7:\n length = 7\n answer=Tempo.getKeyScale(note)\n solution = 'Next notes in this key is: '+str(answer)\n await ctx.send(solution)",
"def get_schedule(self, key=None):\n if key is not None:\n assert key in self.params.schedule[self.sched_idx].keys(), (\n key+\" was not found in the schedule.\")\n return self.params.schedule[self.sched_idx][key]\n return self.params.schedule[self.sched_idx]",
"def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"",
"def get_keywords(seq):\r\n if len(seq) = 0:\r\n return None\r\n freqs = {}\r\n for w in seq: \r\n if w not in freqs:\r\n\t freqs[w] = 1\r\n\telse\r\n\t freqs[w] += 1\r\n num_keys = len(freqs)\r\n res = []\r\n \r\n return res",
"def findBestShift(wordList, text):\n import string\n decoded = ''\n r = 0\n max_count = 0\n for i in range(26):\n count = 0\n decoded = applyShift(text,i)\n for word in decoded.split():\n if word.strip(string.punctuation+string.digits).lower() in wordList:\n count += 1\n if count > max_count:\n max_count = count\n r = i\n return r"
] | [
"0.6306153",
"0.5801089",
"0.5770608",
"0.5475522",
"0.54427946",
"0.538228",
"0.5380662",
"0.53749466",
"0.5294702",
"0.52684486",
"0.5235163",
"0.51727843",
"0.5154344",
"0.51498634",
"0.5124376",
"0.5113235",
"0.51083964",
"0.5106627",
"0.50985396",
"0.5095365",
"0.50793827",
"0.5076861",
"0.507335",
"0.50611746",
"0.50551844",
"0.50485",
"0.50418156",
"0.5024389",
"0.50234133",
"0.5010857"
] | 0.7305503 | 0 |
Encrypts plaintext using key schedule | def encrypt(plaintext, key_schedule):
state_array = byte2array(plaintext)
round_0 = []
round_0.extend([key_schedule[0],key_schedule[1],key_schedule[2],key_schedule[3]])
ADD_ROUND_KEY(state_array,round_0)
count = 0
temp = []
temp_key_sched = []
for i in range(4,44):
temp.append(key_schedule[i])
count += 1
if count % 4 == 0 :
temp_key_sched.append(temp)
count = 0
temp = []
words = temp_key_sched
for i in words:
a = SUBSTITUTE_BYTES(state_array)
b = SHIFT_ROWS(a)
state_array = ADD_ROUND_KEY(b,i)
# Code here
return array2hex(state_array) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))",
"def encrypt(self, key, plaintext):\n output = []\n padded_key = padd_key(key, plaintext)\n for i in range(len(plaintext)):\n enc_ascii = (ord(plaintext[i]) + ord(padded_key[i])) % 256\n output.append(chr(enc_ascii))\n return ''.join(output)",
"def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext",
"def encrypt(self, plaintext: str) -> str:\n\n return self.run(plaintext, Cryptography.ENCRYPT)",
"def encrypt(plaintext: str, key: str) -> str:\n return \"\".join(chr(ord(p) ^ ord(k)) for (p, k) in zip(plaintext, key))",
"def encrypt(plaintext):\n # Pad plaintext\n plaintext = pad(plaintext)\n\n # AES encrypt\n iv = Random.new().read(BS)\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return iv + aes.encrypt(plaintext)",
"def encrypt(key, plaintext, cipher):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n p = plaintext.read()\n c = rsa.encrypt(p, k)\n\n cipher.write(c)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except OverflowError:\n click.echo(\"ERROR: Message is to long for encryption with the given key.\")",
"def encrypt(self):\n # Generate a randomized initialization vector\n iv = Random.new().read(AES.block_size)\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n # Add a buffer so that the plaintext is a multiple of 16 characters in length\n pt_len = len(self.plaintext)\n buffer_size = AES.block_size - pt_len % AES.block_size\n strmsg = self.plaintext + \" \" * buffer_size\n return cipher.encrypt(str.encode(strmsg)), iv",
"def encrypt(cls, plaintext, aad, key, iv):",
"def __encrypt(self, plaintext):\n iv = get_random_bytes(16)\n try:\n encryption_envelope = {'ciphertext':'', \n 'keyid':esn_manifest + '_' + str(self.sequence_number), 'sha256':'AA==', \n 'iv':base64.standard_b64encode(iv).decode('utf-8')}\n except Exception:\n print('ESN is invalid.')\n sys.exit(0)\n\n plaintext = Padding.pad(plaintext.encode('utf-8'), 16)\n cipher = AES.new(self.encryption_key, AES.MODE_CBC, iv)\n ciphertext = cipher.encrypt(plaintext)\n encryption_envelope['ciphertext'] = base64.standard_b64encode(ciphertext).decode('utf-8')\n return json.dumps(encryption_envelope)",
"def encrypt_symmetric(secret_key, plaintext):\n f = Fernet(secret_key)\n return f.encrypt(plaintext)",
"def encrypt_vigenere(plaintext: str, keyword: str) -> str:\n ciphertext = \"\"\n # PUT YOUR CODE HERE\n\n key_lenght = len(keyword)\n text_lenght = len(plaintext)\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_A = ord('A')\n ord_a = ord('a')\n\n if plaintext.islower():\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_a)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n ciphertext += \" \"\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_a\n ciphertext += chr(value)\n else:\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_A)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n value = ord(\" \")\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_A\n ciphertext += chr(value)\n return ciphertext",
"def encrypt(cleartext):\n base_encode = {'16': base64.b16encode,\n '32': base64.b32encode, '64': base64.b64encode}\n ciphertext = cleartext+''\n\n for i in range(encrypt_times):\n base = random.choice(['16', '32', '64'])\n ciphertext = base_encode[base](ciphertext)\n\n return ciphertext",
"def encrypt_aes(msg, key, iv):\r\n #start timer\r\n start = timeit.default_timer()\r\n\r\n #converting key to bytes from hex\r\n key = bytes.fromhex(key)\r\n msg = pad(msg)\r\n obj = AES.new(key, AES.MODE_CBC, iv)\r\n ciphertxt = obj.encrypt(msg)#ciphertxt will be in 'bytes'\r\n\r\n #converting ciphertxt into hexadecimal\r\n ciphertxt = ciphertxt.hex()\r\n\r\n print(\"Ciper is: \",ciphertxt)\r\n\r\n #stop timer\r\n stop = timeit.default_timer()\r\n print('Encryption Running Time: ', stop-start)\r\n \r\n return ciphertxt",
"def encrypt():\n\tnull = 0",
"def encrypt_block(self, plaintext):\n assert len(plaintext) == 16\n plain_state = bytes2matrix(plaintext)\n\n add_round_key(plain_state, self._key_matrices[0])\n\n for i in range(1, self.n_rounds):\n sub_bytes(plain_state)\n shift_rows(plain_state)\n mix_columns(plain_state)\n add_round_key(plain_state, self._key_matrices[i])\n\n sub_bytes(plain_state)\n shift_rows(plain_state)\n add_round_key(plain_state, self._key_matrices[-1])\n\n return matrix2bytes(plain_state)",
"def DHencrypt(plaintext, symmetricKey, p, gen):\r\n \"Method was updated to use AES symetric decryption that was\"\r\n \"provided in the starter code as option of symetric encrytion using shared secret keys is generated.\"\r\n simplified_AES.keyExp(symmetricKey) # Generating round keys for AES.\r\n ciphertext = simplified_AES.encrypt(plaintext) # Running simplified AES.\r\n return ciphertext",
"def sendToClient(plaintext):\n signature = userKeys.signUsingPrivateKey(plaintext)\n encryptedText = userKeys.encrypt(plaintext, contactKey)\n s.send(encryptedText)\n time.sleep(1)\n s.send(signature)",
"def encrypt(self, plaintext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass",
"def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'",
"def encipher(self):\n ciphertext = \"\"\n for pt, key_char in zip(self.text, self.key):\n char_index = self.char_block.alphabet.index(pt)\n ciphertext += self.char_block.rows[key_char][char_index]\n print(ciphertext)",
"def encrypt_vigenere(plaintext: str, keyword: str) -> str:",
"def encrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n f = open(myTmpDir + 'pt' + str(identity) + '.bin','wb')\n f.write(msg)\n f.close()\n\n os.popen(\"rsa.exe e \" + myTmpDir + \"pt\" + str(identity) + \".bin \"+ myTmpDir + \"locEnc\" + str(identity) + \".bin\")\n\n locEncFileName = myTmpDir + \"locEnc\" + str(identity) + \".bin\"\n with open(locEncFileName, \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n digest = base64.encodestring(bytes(readFile))\n\n # TODO: overwirite\n outText.insert(tkinter.END, digest)",
"def encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text",
"def fernet_encript(key,message):\n\tf = Fernet(key)\n\treturn f.encrypt(message)",
"def encrypt(plaintext: str) -> Iterable:\n return simplesubstitution.encrypt(KEY, plaintext)",
"def ctr_encrypt(pt_bin_list, keys, rounds):\n msg = pt_bin_list\n nonce = generate_random_binary(len(pt_bin_list[0])-8) # Initialization Vector\n counter = range(0,len(msg))\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(ctr_process, zip(msg, repeat(nonce), counter, keys, repeat(rounds)))\n\n enc_result.insert(0,nonce+\"00000000\") # Store padded IV to the start of ciphertext\n return enc_result",
"def __encrypt_text_aes__(self, text, password):\n BLOCK_SIZE = 32\n PADDING_CHAR = b'^'\n iv = Random.new().read(16)\n # key must be 32 bytes for AES-256, so the password is hashed with md5 first\n cipher = AES.new(self.__hash_md5__(password), AES.MODE_CBC, iv)\n plaintext = text.encode('utf-8')\n # plaintext must be padded to be a multiple of BLOCK_SIZE\n plaintext_padded = plaintext + (BLOCK_SIZE - len(plaintext) % BLOCK_SIZE) * PADDING_CHAR\n ciphertext = cipher.encrypt(plaintext_padded)\n return (\n base64.b64encode(iv),\n base64.b64encode(ciphertext),\n PADDING_CHAR\n )",
"def perform_aes_algorithm(plaintext, key):\n if len(key) == 32:\n print('C.1 AES-128 (Nk=4, Nr=10)\\n')\n elif len(key) == 48:\n print('\\nC.2 AES-192 (Nk=6, Nr=12)\\n')\n else:\n print('\\nC.3 AES-256 (Nk=8, Nr=14)\\n')\n\n print('{:<19} {:}'.format('PLAINTEXT:', plaintext))\n print('{:<19} {:}\\n'.format('KEY:', key))\n\n print('CIPHER (ENCRYPT):')\n ciphertext = encrypt(plaintext, key, verbose=True)\n\n print('\\nINVERSE CIPHER (DECRYPT):')\n decrypt(ciphertext, key, verbose=True)",
"def ecb_encrypt(pt_bin_list, keys, rounds):\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(feistel_encrypt, zip(pt_bin_list, keys, repeat(rounds)))\n return enc_result"
] | [
"0.7056993",
"0.6997083",
"0.6910249",
"0.6826273",
"0.6785555",
"0.6736617",
"0.67288643",
"0.6718968",
"0.6695133",
"0.66698223",
"0.6668952",
"0.6629753",
"0.6595874",
"0.6590091",
"0.6589204",
"0.65545017",
"0.65491396",
"0.6546425",
"0.6530666",
"0.65245265",
"0.6524502",
"0.6516106",
"0.6504815",
"0.6497673",
"0.6486684",
"0.6475161",
"0.64397585",
"0.64349765",
"0.64039797",
"0.63989186"
] | 0.7380667 | 0 |
Create nullity matrix and save the plot to ``matrix_nan.png`` in the "OUT_DATA" directory. | def create_matrix_nan():
index_missing = gate_plot.isna().sum().sort_values().index
sorted_by_missing = msno.nullity_sort(gate_plot[index_missing])
matrix_nan = msno.matrix(sorted_by_missing)
matrix_nan.set_ylabel("INDEX OF OBSERVATIONS", labelpad=0, fontsize=18)
matrix_nan.get_xticklabels()[19].set_fontweight("bold")
matrix_nan.figure.savefig(ppj("OUT_FIGURES", "matrix_nan.png"), bbox_inches="tight") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def null_plot(self, dataset):\n sns_heatmap_plot = sns.heatmap(\n dataset.isnull(), cmap=\"Blues\", yticklabels=False\n )\n sns_heatmap_plot.figure.savefig(config.NULL_CHECK_HEATMAP)",
"def create_heatmap_nan():\n index_category = pd.Index(new_labels)\n sorted_by_category = gate_plot[index_category]\n heatmap_nan = msno.heatmap(sorted_by_category, vmin=0, cmap=\"OrRd\")\n heatmap_nan.get_xticklabels()[16].set_fontweight(\"bold\")\n heatmap_nan.get_yticklabels()[16].set_fontweight(\"bold\")\n # Interesting fact:\n # When plotting heatmaps with seaborn (on which the \"missingno\" library\n # builds), the first and the last row is cut in halve, because of a bug\n # in the matplotlib regression between 3.1.0 and 3.1.1\n # We are correcting it this way:\n bottom, top = heatmap_nan.get_ylim()\n heatmap_nan.set_ylim(bottom + 0.5, top - 0.5)\n positions = np.array([1, 3, 5, 8, 10, 14, 16])\n labels = [\n \"BACKGROUND\",\n \"HOUSEHOLD\",\n \"FINANCE\",\n \"HEALTH\",\n \"EMPLOYMENT\",\n \"PERSONALITY\",\n ]\n heatmap_nan.hlines(positions, xmin=0, xmax=positions, lw=8, color=\"white\")\n for position, label in zip(positions, labels):\n heatmap_nan.text(position + 0.35, position + 0.35, label, fontsize=14)\n heatmap_nan.figure.savefig(\n ppj(\"OUT_FIGURES\", \"heatmap_nan.png\"), bbox_inches=\"tight\"\n )",
"def nullValueToNan(self) -> None:\n self.cpp.nullValueToNan()",
"def _autocheck_nan(self):\n # assert np.isnan(self.W).any() == False, \"W matrix should not contain NaN values.\"\n assert np.isnan(self.Win).any() == False, \"Win matrix should not contain NaN values.\"\n if self.Wfb is not None:\n assert np.isnan(self.Wfb).any() == False, \"Wfb matrix should not contain NaN values.\"",
"def plot_blank(self):\n self.figure_bmp.SetBitmap(self.controller.plot_blank())",
"def fix_nan(image, replace=0.):\n h = pyfits.open(image, mode='update')\n imgdata = h[0].data\n imgdata = np.where(np.isnan(imgdata), replace, imgdata)\n h[0].data = imgdata\n h.flush()\n h.close()",
"def nans(shape, dtype=float):\n a = np.empty(shape, dtype)\n a.fill(np.nan)\n return a",
"def plot_nans(df, dwelling_id):\n plt.clf()\n df = df.isnull()\n # Downsample to make all data visible\n df = df.resample('1T').sum() # Downsample to make small NaNs visible\n df = df.apply(lambda x: x > 0, 1) # Replace values >0 with 1\n\n # Reindex datetimes\n # https://stackoverflow.com/questions/41046630/set-time-formatting-on-a-datetime-index-when-plotting-pandas-series\n try:\n df.index = df.index.to_period('D')\n except:\n print('plot_nans could not set df.index.to_period')\n\n # Plot heatmap\n n = int(len(df)*0.1) # Choose amount of yticklabels to show\n\n try:\n fig = sns.heatmap(df, cmap='Reds', square=False, vmin=0, cbar=False, yticklabels=n*2, cbar_kws={})\n except TypeError:\n print('plot_nans ValueError')\n fig = sns.heatmap(df, cmap='Reds', square=False, vmin=0, cbar=False, cbar_kws={})\n\n # Set cbar ticks manually\n #cbar = fig.collections[0].colorbar\n #cbar.set_ticks([0, 1])\n #cbar.set_ticklabels(['Not NaN', 'NaN'])\n\n # Correct layout\n fig.invert_yaxis()\n fig.tick_params(axis='x', rotation=90)\n fig.tick_params(axis='y', rotation=0)\n fig.set(xlabel='Column [-]', ylabel='Index [-]')\n plt.title('Dwelling ID: '+dwelling_id)\n\n fig = fig.get_figure()\n fig.tight_layout()\n fig.show()\n print('Saving heatmap')\n fig.savefig('//datc//opschaler//nan_information//figures//' + dwelling_id + '.png', dpi=1200)\n\n return fig",
"def func_eda_df_missingno(df, prefix='', dir_png='../reports/figures/'):\n\n import missingno as msno\n import matplotlib.pyplot as plt\n\n # missing value matrix\n # - missing value indicated by white regions\n # - the bar on the right indicae each row's data completeness (number of valid values in each row)\n plt.figure(figsize=[14,10])\n msno.matrix(df)\n pngname = dir_png+'eda-missingno-heatmap-'+prefix+'.png'\n print('Missing value heatmap saved at {}'.format(pngname))\n #plt.tight_layout()\n plt.savefig(pngname)\n\n # missing value correlation\n plt.figure(figsize=[14,8])\n msno.heatmap(df) # Detailed housing characteristics\n pngname = dir_png+'eda-missingno-correlation-'+prefix+'.png'\n print('Missing value heatmap saved at {}'.format(pngname))\n plt.tight_layout()\n plt.savefig(pngname)\n\n return",
"def test_plot_empty_slice(affine_mni):\n img = Nifti1Image(np.zeros((20, 20, 20)), affine_mni)\n plot_img(img, display_mode=\"y\", threshold=1)\n plt.close()",
"def isnan(data):\n return _make.isnan(data)",
"def missing_values(self, layout={}, **kwargs):\n df = self._data.isna().astype(int)\n kwargs.update(\n {'zmin': 0, 'zmax': 1,\n 'colors': 'reds', 'ncolors': 9,\n 'xgap': 3, 'ygap': 3,\n 'showscale': False, }\n )\n\n layout = recursive_update(\n layout, updater={\n 'xaxis': {'showgrid': False, 'zeroline': False},\n 'yaxis': {'showgrid': False, 'zeroline': False},\n })\n return df.iplot.heatmap(layout=layout, **kwargs)",
"def write(fname, face, min_=0, max_=255):\n image = face.reshape(IMG_HEIGHT, IMG_WIDTH)\n res = plt.matshow(image, cmap='gray', vmin=min_, vmax=max_)\n res.axes.get_xaxis().set_visible(False)\n res.axes.get_yaxis().set_visible(False)\n plt.axis(\"off\")\n plt.savefig(fname, bbox_inches=\"tight\")",
"def test_plot_npy(self, script_runner: ScriptRunner, tmp_path: Path) -> None:\n outfile = tmp_path.joinpath(\"projection.png\")\n logfile = tmp_path.joinpath(\"plot.log\")\n result = script_runner.run(\n \"qaa\",\n \"plot\",\n \"-i\",\n PROJNP,\n \"-o\",\n outfile.as_posix(),\n \"-l\",\n logfile.as_posix(),\n \"--pca\",\n \"--verbose\",\n )\n assert result.success\n assert logfile.exists()\n assert outfile.exists()\n assert outfile.stat().st_size > 0",
"def plot_stability_matrix(self, file_name=None):\n size = len(self.seq) / 2.5\n plt.figure(figsize=(size, 2.5))\n plt.imshow(self.matrix,\n interpolation='none',\n cmap=plt.get_cmap('YlOrRd'))\n plt.yticks(range(4), ['A', 'C', 'G', 'U'], fontsize=12)\n plt.xticks(range(len(self.seq)), fontsize=12)\n if file_name is None:\n plt.show()\n else:\n plt.savefig(file_name,\n bbox_inches='tight',\n transparent=True,\n pad_inches=0)\n plt.close()",
"def missingno_matrix(self, df, fontsize, time_freq):\n\n df.index = pd.to_datetime(df[\"timestamp\"], errors='coerce')\n df = df.resample('D').mean()\n fig, ax = plt.subplots(figsize=(17,8))\n ax = msno.matrix(df, labels=True, fontsize=fontsize, freq=time_freq, ax=ax, sparkline=True, inline=True);\n st.pyplot(fig)",
"def testExpectedNaNOpOutputs(self):\n check_numerics_callback.enable_check_numerics()\n\n # Empty input tensor\n x = constant_op.constant(1, dtype=dtypes.float32, shape=[0, 1, 1, 1])\n scale = constant_op.constant([1], dtype=dtypes.float32)\n offset = constant_op.constant([1], dtype=dtypes.float32)\n\n # Calling fused_batch_norm with an empty input should output a NaN in the\n # latter four outputs without triggering the check_numerics callback\n batch_norm_res = gen_nn_ops._fused_batch_norm(\n x=x, scale=scale, offset=offset, mean=[], variance=[])\n\n _, batch_mean, batch_variance, _, _ = self.evaluate(batch_norm_res)\n\n self.assertTrue(np.isnan(batch_mean.squeeze()))\n self.assertTrue(np.isnan(batch_variance.squeeze()))",
"def assert_no_nans(x):\n assert not torch.isnan(x).any()",
"def _blankimage():\n img = TK.PhotoImage(width=1, height=1)\n img.blank()\n return img",
"def define_null(rastlist, NoData_Value, Quiet=False):\n\n rastlist = core.enf_rastlist(rastlist)\n\n # iterate through each file in the filelist and set nodata values\n for rastname in rastlist:\n\n arcpy.SetRasterProperties_management(rastname,data_type=\"#\",statistics=\"#\",\n stats_file=\"#\",nodata=\"1 \"+str(NoData_Value))\n \n print(\"Set nulls in {0}\".format(rastname)) \n return",
"def test_reduce_null_matrix_is_empty(self):\n original = pd.read_csv(NULL_FILENAME, index_col=0, header=0)\n full_reduced = entropy_reduce_position_matrix(\n original,\n 1,\n trivial_metric\n )\n self.assertEqual(full_reduced.shape[0], original.shape[0])\n self.assertEqual(full_reduced.shape[1], 0)",
"def remove_none_from_arrays(self):\r\n\r\n is_nan = numpy.isnan(self.y_values) # array of booleans, element is True if the corresponding element in\r\n # self.y_values is None\r\n\r\n self.x_values = self.x_values[numpy.logical_not(is_nan)]\r\n self.y_values = self.y_values[numpy.logical_not(is_nan)] # replace all None elements\r",
"def testPluginContainsNan(self):\n schema = self.dataset.makeMinimalSchema()\n task = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=self.config)\n exposure, cat = self.dataset.realize(noise=100.0, schema=schema, randomSeed=2)\n source = cat[0]\n exposure.getMaskedImage().getImage().getArray()[int(source.getY()), int(source.getX())] = np.nan\n task.run(cat, exposure)\n self.assertTrue(source.get(self.algName + \"_flag\"))\n self.assertTrue(source.get(self.algName + \"_flag_containsNan\"))\n self.assertFalse(source.get(self.algName + \"_flag_edge\"))",
"def test_plot_save_figure(self):\n pname = os.path.join(\n self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03' + HEN_FILE_EXTENSION)\n hen.plot.main([pname, '--noplot', '--figname',\n os.path.join(self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03.png'),\n '-o', 'dummy.qdp'])",
"def writeMatrix(self):\n\t\tpass",
"def createBlankPlot(self):\n\n fig = plt.figure(figsize=(8,6),dpi=80)\n fig.set_facecolor('#ededed')\n \n # Format plot\n ax = plt.subplot(111)\n \n fig.canvas.draw()\n \n return fig, ax",
"def check_nan(df, show_plot=False):\n void = pd.DataFrame(np.sum(df.isna()), columns=['absolute'])\n void['percentage'] = round((void.absolute / df.shape[0]) * 100, 2)\n\n if show_plot:\n print('\\n\\n')\n plt.figure(figsize=(12, 5))\n plt.plot(void.index.values, void.percentage.values, 'ro')\n plt.xlabel('Columns indexes')\n plt.ylabel('% of missing values')\n plt.title('Percentage of missing values per feature')\n plt.xticks(rotation=45)\n return void.T",
"def test_nodata(self):\n\n \n filename = 'data/test_grid.asc'\n R = read_coverage(filename)\n \n nan = R.get_nodata_value()\n assert nan == -9999\n \n A = R.get_data(nan=False)\n assert numpy.min(A[:]) == -9999\n assert numpy.allclose(numpy.max(A[:]), 50.9879837036) \n \n A = R.get_data(nan=True)\n assert numpy.allclose(numpy.nanmin(A[:]), -50.60135540866)\n assert numpy.allclose(numpy.nanmax(A[:]), 50.9879837036)",
"def test_nan_check(self):\n values_with_nans = np.array([1, 2, 3, np.nan, np.nan])\n\n with LogCapture(\"puma\") as log:\n _ = hist_w_unc(values_with_nans, bins=4)\n log.check(\n (\n \"puma\",\n \"WARNING\",\n \"Histogram values contain 2 nan values!\",\n )\n )",
"def _plot_neutrality(gc12_lst: list, gc3_lst: list, organism_name: None | str = None, save_image: bool = False,\n folder_path: str = 'Report', gene_analysis: bool = True):\n N = len(gc3_lst)\n if max(gc12_lst) - min(gc12_lst) > max(gc3_lst) - min(gc3_lst):\n color = gc12_lst\n label = r'$GC_{12}$ Value'\n else:\n color = gc3_lst\n label = r\"$GC_3$ Value\"\n slope, intercept, r, p, se = linregress(gc3_lst, gc12_lst)\n fig = plt.figure(figsize=(9, 5.25))\n ax = fig.add_subplot()\n ax.set_aspect('equal', adjustable='box')\n plt.scatter(gc3_lst, gc12_lst, s=12, c=color, cmap='viridis', alpha=0.5, label='Observed Values', zorder=1)\n x_lim = max(gc3_lst) + min(gc3_lst)\n x = np.linspace(0.0, x_lim, 201)\n y = [slope * _x + intercept for _x in x]\n _label = f\"$y = {round(slope, 4)}x+{round(intercept, 4)}$\" if intercept >= 0 else f\"$y = {round(slope, 4)}x{round(intercept, 4)}$\"\n plt.plot(x, y, color='red', label=_label, zorder=2)\n plt.grid(True, linestyle=\":\")\n plt.legend()\n plt.xlabel('$GC_3$ Value')\n plt.ylabel('$GC_{12}$ Value')\n c_bar = plt.colorbar()\n c_bar.set_label(label)\n suptitle = r'Neutrality Plot' if organism_name is None else f\"Neutrality plot for {organism_name}\"\n plt.suptitle(suptitle, fontsize=16)\n title = f'Total genes: {N}, $R^2$ value: {round(r ** 2, 4)}' if gene_analysis else f'Total genome: {N}, $R^2$ value: {round(r ** 2, 4)}'\n plt.title(title, fontsize=14)\n if save_image:\n make_dir(folder_path)\n name = 'Neutrality_plot.png' if organism_name is None else f\"Neutrality_plot_{organism_name}.png\"\n file_name = join(folder_path, name)\n if is_file_writeable(file_name):\n plt.savefig(file_name, dpi=500)\n print(f'Saved file can be found as {abspath(file_name)}')\n plt.show()\n plt.close()"
] | [
"0.63708234",
"0.6074387",
"0.57683337",
"0.56718147",
"0.56265396",
"0.54766446",
"0.5422341",
"0.53246593",
"0.5289961",
"0.5241998",
"0.5187888",
"0.5112321",
"0.51097625",
"0.510898",
"0.5086293",
"0.50471574",
"0.5039643",
"0.5014016",
"0.50119525",
"0.5000857",
"0.49794474",
"0.49767306",
"0.49450153",
"0.4941439",
"0.4935554",
"0.49066967",
"0.49045992",
"0.490417",
"0.489904",
"0.4895923"
] | 0.80562246 | 0 |
Run Imagemagick's identify command. If command outputs to stderr special "extraneous bytes" error message, then capture size of extra bytes and marker code. Return None if extraneous bytes are not found, otherwise values extracted from error message. | def find_extraneous_bytes_before_marker(filepath):
code, out, err = run_command(['identify', filepath])
err_str = err.decode('utf8')
ending = "extraneous bytes before marker"
if err_str.find(ending) < 0:
return None, None, None
m = re.search(r'Corrupt JPEG data: ([\d]+) extraneous bytes before marker (0x[\w]+)', err_str)
size = int(m.group(1))
marker = m.group(2)
return size, marker, err_str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def imageSizeNoCache(filename):\n\n if opts.no_magick:\n return (0, 0)\n\n fn = filename\n if opts.pil:\n\n try:\n im = PilImage.open(fn)\n s = im.size\n except IOError, e:\n raise SystemExit(\"Error: identifying file '%s'\" % fn + str(e))\n\n return s\n\n elif not opts.old_magick:\n\n cmd = getMagickProg('identify') + ' -format \"%w %h\" ' + \\\n '\"%s\"' % fn\n po = os.popen(cmd)\n output = po.read()\n try:\n (width, height) = map(lambda x: int(x), string.split(output))\n except ValueError:\n print >> sys.stderr, \\\n \"Error: parsing identify output on %s\" % fn\n return (0, 0)\n err = po.close()\n if err:\n print >> sys.stderr, \\\n \"Error: running identify program on %s\" % fn\n return (0, 0)\n return (width, height)\n\n else:\n # Old imagemagick doesn't have format tags\n cmd = getMagickProg('identify') + ' \"%s\"' % fn\n\n po = os.popen(cmd)\n output = po.read()\n err = po.close()\n if err:\n print >> sys.stderr, \\\n \"Error: running identify program on %s\" % fn\n return (0, 0)\n\n mre = re.compile(\"([0-9]+)x([0-9]+)\")\n mo = mre.match(string.split(output)[1])\n if not mo:\n mo = mre.match(string.split(output)[2])\n if mo:\n (width, height) = map(lambda x: int(x), mo.groups())\n return (width, height)\n print >> sys.stderr, \\\n \"Warning: could not identify size for image '%s'\" % filename\n return (0, 0)",
"def identify(cls, bytes):\n\t\traise NotImplementedError(\"identify must be implemented\")",
"def get_dimensions(image, classname):\n start, ext = os.path.splitext(image)\n if ext == '.yuv':\n bitdepth = \"8\"\n res_split = start.split('x')\n width_split = res_split[0].split('_')\n width = width_split[-1]\n height_split = res_split[-1].split('_')\n m = res_split[-1].find(\"bit\")\n if res_split[-1][m - 2] == \"_\":\n depth = res_split[-1][m - 1]\n else:\n depth = res_split[-1][m - 2:m]\n height = height_split[0]\n elif classname == \"classE_exr\":\n size = os.path.basename(image).split('_')[2]\n try:\n dimension_cmd = [\"identify\", '-size', size, '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n else:\n try:\n dimension_cmd = [\"identify\", '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n return width, height, depth",
"def _validate_image_info(ext, image_info=None, **kwargs):\n image_info = image_info or {}\n\n checksum_avail = False\n md5sum_avail = False\n os_hash_checksum_avail = False\n\n for field in ['id', 'urls']:\n if field not in image_info:\n msg = 'Image is missing \\'{}\\' field.'.format(field)\n raise errors.InvalidCommandParamsError(msg)\n\n if type(image_info['urls']) != list or not image_info['urls']:\n raise errors.InvalidCommandParamsError(\n 'Image \\'urls\\' must be a list with at least one element.')\n\n checksum = image_info.get('checksum')\n if checksum is not None:\n if (not isinstance(image_info['checksum'], str)\n or not image_info['checksum']):\n raise errors.InvalidCommandParamsError(\n 'Image \\'checksum\\' must be a non-empty string.')\n if _is_checksum_url(checksum) or len(checksum) > 32:\n # Checksum is a URL *or* a greater than 32 characters,\n # putting it into the realm of sha256 or sha512 and not\n # the MD5 algorithm.\n checksum_avail = True\n elif CONF.md5_enabled:\n md5sum_avail = True\n\n os_hash_algo = image_info.get('os_hash_algo')\n os_hash_value = image_info.get('os_hash_value')\n if os_hash_algo or os_hash_value:\n if (not isinstance(os_hash_algo, str)\n or not os_hash_algo):\n raise errors.InvalidCommandParamsError(\n 'Image \\'os_hash_algo\\' must be a non-empty string.')\n if (not isinstance(os_hash_value, str)\n or not os_hash_value):\n raise errors.InvalidCommandParamsError(\n 'Image \\'os_hash_value\\' must be a non-empty string.')\n os_hash_checksum_avail = True\n\n if not (checksum_avail or md5sum_avail or os_hash_checksum_avail):\n raise errors.InvalidCommandParamsError(\n 'Image checksum is not available, either the \\'checksum\\' field '\n 'or the \\'os_hash_algo\\' and \\'os_hash_value\\' fields pair must '\n 'be set for image verification.')",
"def _extract_image_short_id(scan_result: dict[str, Any]) -> str:\n\n if \"id\" not in scan_result:\n return \"sha256:unknown\"\n\n image_id: str = scan_result[\"id\"]\n\n if image_id.startswith(\"sha256:\"):\n return image_id[:17]\n return image_id[:10]",
"def check_images():\n saved_stdout, saved_stderr = sys.stdout, sys.stderr\n\n out, err = StringIO(), StringIO()\n try:\n sys.stdout, sys.stderr = out, err\n check_images_main()\n except SystemExit:\n pass\n finally:\n stdout, stderr = out.getvalue().strip(), err.getvalue().strip()\n sys.stdout, sys.stderr = saved_stdout, saved_stderr\n\n return stdout, stderr",
"def get_output_error(cmd, **kwargs):\n if not isinstance(cmd, list):\n cmd = [cmd]\n logging.debug(\"Running: %s\", ' '.join(map(quote, cmd)))\n try:\n result = Popen(cmd, stdout=PIPE, stderr=PIPE, **kwargs)\n except OSError as e:\n return -1, '', f'Failed to run {cmd!r}: {e!r}'\n so, se = result.communicate()\n # unicode:\n so = so.decode('utf8', 'replace')\n se = se.decode('utf8', 'replace')\n\n return result.returncode, so, se",
"def shellExecErrorCode(cmd):\n return subprocess.call(cmd, shell=True)",
"def format_error (result):\n if check_ok (result):\n return 'exiftool finished probably properly. (\"%s\")' % strip_nl(result)\n else: \n if result is None:\n return \"exiftool operation can't be evaluated: No result given\"\n else:\n return 'exiftool finished with error: \"%s\"' % strip_nl(result)",
"def image_not_exists(self):\n res = subprocess.run(\n \"{} inspect {}\".format(self.binary, self.vars['image']),\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n return res.returncode",
"def get_exif(path, key, numeric=False):\n args = ['exiftool', '-' + key, path]\n if numeric:\n args.insert(1, '-n')\n output = subprocess.check_output(args).strip()\n if ':' not in output:\n raise EXIFError(\"%s has no EXIF data for %s\" % (path, key))\n return output.split(':')[1].strip()",
"def qemu_img_info(path):\n if not os.path.exists(path):\n return QemuImgInfo()\n\n out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',\n 'qemu-img', 'info', path)\n return QemuImgInfo(out)",
"def getnumoflinesinblob(ext_blob):\n ext, blob_id = ext_blob\n try:\n return (ext, blob_id, int(getpipeoutput(['git cat-file blob %s' % blob_id, 'find /v /c \"\"']).split()[0]))\n except:\n return (ext, blob_id, 0)",
"def fast_get_image_size(raw_data):\n size = len(raw_data)\n data = raw_data[:25]\n input_io = io.BytesIO(data)\n if (size >= 10) and data[:6] in ('GIF87a', 'GIF89a'):\n # GIFs\n w, h = struct.unpack(\"<HH\", data[6:10])\n width = int(w)\n height = int(h)\n elif ((size >= 24) and data.startswith('\\211PNG\\r\\n\\032\\n')\n and (data[12:16] == 'IHDR')):\n # PNGs\n w, h = struct.unpack(\">LL\", data[16:24])\n width = int(w)\n height = int(h)\n elif (size >= 16) and data.startswith('\\211PNG\\r\\n\\032\\n'):\n # older PNGs?\n w, h = struct.unpack(\">LL\", data[8:16])\n width = int(w)\n height = int(h)\n elif (size >= 2) and data.startswith('\\377\\330'):\n # JPEG\n input_io.seek(0)\n input_io.read(2)\n b = input_io.read(1)\n try:\n w = ''\n h = ''\n while (b and ord(b) != 0xDA):\n while (ord(b) != 0xFF): b = input_io.read(1)\n while (ord(b) == 0xFF): b = input_io.read(1)\n if (ord(b) >= 0xC0 and ord(b) <= 0xC3):\n input_io.read(3)\n h, w = struct.unpack(\">HH\", input_io.read(4))\n break\n else:\n input_io.read(int(struct.unpack(\">H\", input_io.read(2))[0])-2)\n b = input_io.read(1)\n width = int(w)\n height = int(h)\n except Exception as e:\n #print 'get size error'\n return 0, 0\n else:\n # print \"Sorry, don't know how to get information from this file %s\" % file_path\n return 0, 0\n if width < 0 or height<0:\n return 0, 0\n else:\n return width, height",
"def identify(path):\n\n if not os.path.exists(path):\n return {}\n\n out, _ = processutils.execute(\n 'qemu-img info %s' % path, shell=True)\n\n data = {}\n for line in out.split('\\n'):\n line = line.lstrip().rstrip()\n elems = line.split(': ')\n if len(elems) > 1:\n key = elems[0]\n value = ': '.join(elems[1:])\n\n m = VALUE_WITH_BRACKETS_RE.match(value)\n if m:\n value = float(m.group(1))\n\n elif value.endswith('K'):\n value = float(value[:-1]) * 1024\n elif value.endswith('M'):\n value = float(value[:-1]) * 1024 * 1024\n elif value.endswith('G'):\n value = float(value[:-1]) * 1024 * 1024 * 1024\n elif value.endswith('T'):\n value = float(value[:-1]) * 1024 * 1024 * 1024 * 1024\n\n try:\n data[key] = float(value)\n except Exception:\n data[key] = value\n\n return data",
"def stderr(self: \"ShellOutput\") -> Artefact[bytes]:\n self.__check_len()\n return self.stderrs[0]",
"def test_ipython_robot_report_image(self):\n if PLATFORM == \"windows\":\n return\n\n self.activate_magic()\n\n with patch(\"jupyter_kernel_test.validate_message\", fake_validate):\n reply, outputs = self.execute_helper(code=MAGIC_IMAGE_TASK, timeout=60)\n assert reply[\"content\"][\"status\"] == \"ok\"\n assert any(\"image/png\" in output[\"content\"][\"data\"] for output in outputs)",
"def RunDetectCommand(vcs_type, command):\r\n try:\r\n out, returncode = RunShellWithReturnCode(command)\r\n if returncode == 0:\r\n return (vcs_type, out.strip())\r\n except OSError, (errcode, message):\r\n if errcode != errno.ENOENT: # command not found code\r\n raise",
"def _identify_fail(failure):\n logger.warning(failure.getErrorMessage())\n logger.warning(\"Failed to setup & obtain identity\")\n return",
"def _image_data(buff):\n code = buff.getvalue()\n m = _size(code)\n if m:\n size = int(m.group(1))\n else:\n raise Exception('Internal error: PPM header not found')\n return code[m.end():], size",
"def __try_command(cmd, description):\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT);\n return (True, out.decode(\"utf-8\")) # success\n except subprocess.CalledProcessError as e:\n print(\"Error while {:s}, return code is non-zero ({:d})\".format(description, e.returncode))\n print(\"Command: {:s}\".format(\" \".join(e.cmd)))\n if e.output:\n print(\"Output: {:s}\".format(e.output.decode(\"utf-8\").strip()))\n\n return (False, None) # error",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)",
"def image_size():\n return eval(subprocess(\"print camera_image_size()\"))",
"def _extract_error():\n\n error_num = errno()\n\n try:\n error_string = os.strerror(error_num)\n except (ValueError):\n return str_cls(error_num)\n\n if isinstance(error_string, str_cls):\n return error_string\n\n return _try_decode(error_string)",
"def _du_using_subprocesses(self):\n size = 0\n size_process = Popen(['find', self.path, '-type', 'f', '-ls'], stdout=PIPE)\n size_result, _ = size_process.communicate()\n lines = size_result.split(\"\\n\")\n separator = re.compile(\"\\s+\")\n num = 0\n for line in lines:\n if len(line.strip()) == 0:\n continue\n file_info = separator.split(line)\n file_size = int(file_info[6])\n size += file_size\n num += 1\n return Du.Result(num, size)",
"def test_docker_exists_but_unknown_error_when_running_command(mock_tools):\n mock_tools.subprocess.check_output.side_effect = [\n VALID_DOCKER_VERSION,\n subprocess.CalledProcessError(\n returncode=1,\n cmd=\"docker info\",\n output=\"This command failed!\",\n ),\n ]\n\n with pytest.raises(\n BriefcaseCommandError,\n match=\"Check your Docker\\ninstallation, and try again\",\n ):\n Docker.verify(mock_tools)",
"def trycmd(*args, **kwargs):\n discard_warnings = kwargs.pop('discard_warnings', False)\n\n try:\n out, err = execute(*args, **kwargs)\n failed = False\n except exception.ProcessExecutionError as exn:\n out, err = '', str(exn)\n failed = True\n\n if not failed and discard_warnings and err:\n # Handle commands that output to stderr but otherwise succeed\n err = ''\n\n return out, err",
"def docker_inspect(args, identifier, always=False): # type: (EnvironmentConfig, str, bool) -> DockerInspect\n try:\n stdout = docker_command(args, ['container', 'inspect', identifier], capture=True, always=always)[0]\n except SubprocessError as ex:\n stdout = ex.stdout\n\n if args.explain and not always:\n items = []\n else:\n items = json.loads(stdout)\n\n if len(items) == 1:\n return DockerInspect(args, items[0])\n\n raise ContainerNotFoundError(identifier)",
"def testImageDiffLengthEnforced(self) -> None:\n with self.assertRaises(AssertionError):\n _ = data_types.Result('test', ('win', 'x86'), (1, 2, 3),\n 'build_id')",
"def get_exitcode_stdout_stderr(cmd):\n args = shlex.split(cmd)\n\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n out = out.decode('utf-8')\n exitcode = proc.returncode\n #\n return exitcode, out, err"
] | [
"0.61020416",
"0.544879",
"0.5286424",
"0.51605546",
"0.51338214",
"0.5132784",
"0.5103628",
"0.509698",
"0.50425714",
"0.50197345",
"0.4971101",
"0.49556476",
"0.4923544",
"0.49070853",
"0.48903632",
"0.4869814",
"0.48540086",
"0.4850367",
"0.48499233",
"0.48085493",
"0.47453266",
"0.47418982",
"0.47083056",
"0.46855143",
"0.4657893",
"0.4657508",
"0.4653389",
"0.46446437",
"0.46428618",
"0.4626568"
] | 0.6984234 | 0 |
Last element from scopeStack is removed and currScope is updated | def deleteScope():
global currScope
scopeStack.pop()
currScope = scopeStack[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scope_pop(self) -> None:\n self.scope_stack.popleft()",
"def endScope():",
"def leaveScope(self, name):",
"def pop_instantiate_scope(self):\n self.instantiate_scope = self.instantiate_scope.get_parent()",
"def pop(self):\n self._variables = self._variable_stack.pop()",
"def scope_push(self) -> None:\n self.scope_stack.appendleft(defaultdict(lambda: 0))",
"def pop(self):\n if (self.child == None):\n return \n SymbolTable.currentContext = SymbolTable.currentContext.parent\n SymbolTable.currentContext.child = None",
"def pop(self):\n assert self.local_variables.parent is not None\n self.local_variables = self.local_variables.parent\n assert self.local_types.parent is not None\n self.local_types = self.local_types.parent",
"def remove(self):\n return self.stack_list.pop()",
"def clear(self) -> None:\n\n try:\n del self.registry[self.scopefunc()]\n except KeyError:\n pass",
"def remove_scope(self, ):\n if self.AttributeNames.SCOPE in self.attrs:\n del self.attrs[self.AttributeNames.SCOPE]\n return self",
"def popclear():\n stack = currentframe().f_back.f_locals.setdefault(SN, [])\n result = stack.pop()\n stack[:] = []\n return result",
"def exit_var_scope(self):\n # type: () -> Scope[expr.Var]\n\n return self.var_scopes.popleft()",
"def pop(self):\n old = self.stack.pop()\n if self.stack:\n self.current = self.stack.pop()\n else:\n self.current = None\n return old",
"def exit_type_param_scope(self):\n # type: () -> Scope[ty.TypeVar]\n\n return self.type_param_scopes.popleft()",
"def pop(self):\n stack = self.stack\n if len(stack)>1:\n stack.pop()\n self._setspaces()",
"def pop(self):\n self._stack.pop()",
"def __exit__(self, *_) -> None:\n self.__cursor.top.pop()",
"def pop_current_line(self):\n self.current_line.pop()",
"def remove_current():\n current.remove()",
"def pop_last(self):\n self.pop_item(-1)",
"def pop(self):\n self.restore(self.stack.pop())",
"def pop(state):\n return state.env.stack.pop()",
"def pop(self):",
"def pop(self):",
"def restore_context(self):\r\n self.current_context = self.context_stack.pop()",
"def pop(self):\n del self.args[self.type_stack.pop()]\n self.id_stack.pop()",
"def pop_focus(self):\n self._focus.pop()",
"def delete_last(self):\n self.deque.pop()",
"def discard(self,):\n self.stack.pop()"
] | [
"0.7608958",
"0.68642795",
"0.6764787",
"0.6377721",
"0.63775796",
"0.62262475",
"0.6219208",
"0.60805106",
"0.6016696",
"0.60080355",
"0.5974704",
"0.5926928",
"0.5896437",
"0.58770096",
"0.5875703",
"0.5872738",
"0.5854077",
"0.5823992",
"0.58158916",
"0.581577",
"0.5786769",
"0.5785812",
"0.5775543",
"0.5748899",
"0.5748899",
"0.5747647",
"0.569402",
"0.56678706",
"0.5647457",
"0.56347626"
] | 0.80145603 | 0 |
generates new temporary variable | def newTemp():
global varSeq
toRet = 'var'+str(varSeq)
varSeq += 1
scopeDict[currScope].insert(toRet,"temporary")
return toRet | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_temp_variable_name():\n counter = 0\n while True:\n counter += 1\n yield f\"t_{counter}\"",
"def get_temp_var() -> str:\n if len(DEALLOCATED_TEMP_VARS) > 1:\n var = DEALLOCATED_TEMP_VARS.pop()\n ALLOCATED_TEMP_VARS.append(var)\n\n return var\n\n # Create a t<0~> variable name to be used\n else:\n i = 0\n while True:\n var = f't{i}'\n if var not in ALLOCATED_TEMP_VARS:\n ALLOCATED_TEMP_VARS.append(var)\n return var\n\n i += 1",
"def make_variable(self, name = None):\r\n return self.Variable(self, name = name)",
"def temp_var_or_literal(self, name, var, init):\n if var[0]:\n # Literal\n return var[1]\n temp = self.make_temp_var(name)\n init.append('%s = %s' % (temp, var[1]))\n return temp",
"def make_variable(self, name=None):\r\n return self.Variable(self, name=name)",
"def make_variable(self, name=None):\r\n return self.Variable(self, name=name)",
"def gentemp(root=\"obj-\"):\n return GenTemp(root)",
"def __new_var__(self, var_type=None, name=None) -> str:\n if name is None:\n from time import time\n name = str(int(time()*1e7))\n self.__numVar += 1\n self.__variables.append((name, var_type))\n if var_type == List:\n return name + \" = []\"\n return name",
"def _get_reuse_value(self):\n return self.__reuse_value",
"def _build_pop_temp(self):\n value = self.value\n return dedent(\n f\"\"\"\n @{value}\n D=A // D = i\n @5\n D=D+A // addr = 5 + i\n @SP\n M=M-1\n A=M\n D=D+M // addr = addr + RAM[SP]\n A=D-M // A = addr - RAM[SP] \n M=D-A // RAM[A] = addr - A\n \"\"\"\n )",
"def varcopy(self, vars):",
"def make_internal(self, temper_lpost: bool):",
"def GenTemp(root=\"obj-\"):\n global _GT_NAMES_\n if root in _GT_NAMES_.keys():\n _GT_NAMES_[root]+=1\n else:\n _GT_NAMES_[root]=0\n return \"%s%d\" % (root, _GT_NAMES_[root])",
"def memory(n):\r\n def f(g):\r\n nonlocal n\r\n n = g(n)\r\n return n\r\n return f",
"def retrieve_temp(self):\n\n i = CodeGenerator.RESERVED_REGISTER_LOCATION_1\n j = CodeGenerator.RESERVED_REGISTER_LOCATION_2\n t = CodeGenerator.RESERVED_REGISTER_LOCATION_3\n fp = CodeGenerator.RESERVED_REGISTER_LOCATION_4\n self.add_pc(1)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(self.top_sp, \"@\"), _m(fp)\n self.add_pc(7)\n self.pb[self.pc - 7] = \"ADD\", _m(self.top_sp), _m(4, \"#\"), _m(j)\n self.pb[self.pc - 6] = \"ASSIGN\", _m(CodeGenerator.INIT_MEMORY_VALUE + 4, \"#\"), _m(i)\n self.pb[self.pc - 5] = \"ADD\", _m(i), _m(CodeGenerator.INIT_MEMORY_VALUE + 4, \"#\"), _m(i)\n self.pb[self.pc - 4] = \"ASSIGN\", _m(i), _m(self.top_sp, \"@\")\n self.pb[self.pc - 3] = \"ADD\", _m(self.top_sp), _m(4, \"#\"), _m(self.top_sp)\n self.pb[self.pc - 2] = \"LT\", _m(CodeGenerator.INIT_MEMORY_VALUE + CodeGenerator.REGISTER_SIZE, \"#\"), _m(i), _m(\n t)\n self.pb[self.pc - 1] = \"JPF\", _m(t), _m(self.pc - 5)\n self.add_pc(1)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(fp), _m(self.top_sp, \"@\")",
"def generate_variable_names():\n while True:\n name = uuid.uuid4()\n yield f\"_{name.hex}\"",
"def dup():\n stack = currentframe().f_back.f_locals.setdefault(SN, [])\n value = stack[-1]\n stack.append(value)\n return value",
"def ptr_to_fresh(c : Contract, ty : LLVMType, name : Optional[str] = None) -> Tuple[FreshVar, SetupVal]:\n var = c.fresh_var(ty, name)\n ptr = c.alloc(ty, points_to = var)\n return (var, ptr)",
"def var(*args, **kwargs):\n return Variable(*args, **kwargs)",
"def let(self, var, val):\n\n self.d['__vstemp'] = val\n if var.endswith('+'):\n rvar = var.rstrip('+')\n # .. obj = eval(rvar,self.d)\n exec(\"%s.append(__vstemp)\" % rvar, self.d)\n else:\n exec(var + \" = __vstemp\", self.d)\n del self.d['__vstemp']",
"def test_case4(X):\n ## homework:start\n X_new = \n ## homework:end\n return X_new",
"def make_free_in(self, other):\n var = self.var\n newvar = var.prime()\n while other != other.substitute({var: newvar}):\n var, newvar = newvar, newvar.prime()\n return _coconut_tail_call(self.change_var, var)",
"def test_case3(X):\n ## homework:start\n X_new = \n ## homework:end\n return X_new",
"def test_case2(X):\n ## homework:start\n X_new = \n ## homework:end\n return X_new",
"def mk_var(self, name, type_):\n # type: (str, ty.Type) -> expr.Var\n\n var = expr.Var(name, type_)\n self.var_scopes[0].appendleft((name, var))\n return var",
"def _make_identical(self, name):\n if not name in self.all_variables:\n return name\n i = 2\n while '%s%s' % (name, i) in self.all_variables:\n i += 1\n return '%s%s' % (name, i)",
"def reset_temp(self):\n i = CodeGenerator.RESERVED_REGISTER_LOCATION_1\n t = CodeGenerator.RESERVED_REGISTER_LOCATION_2\n fp = CodeGenerator.RESERVED_REGISTER_LOCATION_3\n self.add_pc(1)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(self.top_sp, \"@\"), _m(fp)\n self.add_pc(6)\n self.pb[self.pc - 6] = \"ASSIGN\", _m(CodeGenerator.INIT_MEMORY_VALUE, \"#\"), _m(i)\n self.pb[self.pc - 5] = \"ADD\", _m(i), _m(4, \"#\"), _m(i)\n self.pb[self.pc - 4] = \"ADD\", _m(self.top_sp), _m(4, \"#\"), _m(self.top_sp)\n self.pb[self.pc - 3] = \"ASSIGN\", _m(i), _m(self.top_sp, \"@\")\n self.pb[self.pc - 2] = \"LT\", _m(CodeGenerator.INIT_MEMORY_VALUE + CodeGenerator.REGISTER_SIZE, \"#\"), _m(i), _m(\n t)\n self.pb[self.pc - 1] = \"JPF\", _m(t), _m(self.pc - 5)\n self.add_pc(1)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(fp), _m(self.top_sp, \"@\")\n # self.pb[]",
"def gpu_safe_new(x, tag=''):\r\n if hasattr(x, 'name') and x.name is not None:\r\n nw_name = x.name + tag\r\n else:\r\n nw_name = None\r\n if isinstance(x, theano.Constant):\r\n return x.clone()\r\n\r\n nw_x = x.type()\r\n nw_x.name = nw_name\r\n return nw_x",
"def gpu_safe_new(x, tag=''):\r\n if hasattr(x, 'name') and x.name is not None:\r\n nw_name = x.name + tag\r\n else:\r\n nw_name = None\r\n if isinstance(x, theano.Constant):\r\n return x.clone()\r\n\r\n nw_x = x.type()\r\n nw_x.name = nw_name\r\n return nw_x",
"def Variable(name):\n placeholder_node = placeholder_op()\n placeholder_node.name = name\n return placeholder_node"
] | [
"0.665959",
"0.6396607",
"0.61197865",
"0.6099496",
"0.60078543",
"0.60078543",
"0.59730935",
"0.5972153",
"0.5922348",
"0.5857888",
"0.58486015",
"0.57957274",
"0.57602435",
"0.5705927",
"0.5658735",
"0.5635153",
"0.5590427",
"0.5547302",
"0.5530237",
"0.5522898",
"0.55076873",
"0.54982066",
"0.54891866",
"0.5478572",
"0.54655665",
"0.54615986",
"0.54563314",
"0.5452519",
"0.5452519",
"0.5445344"
] | 0.7708905 | 0 |
Logs ban events not made through the bot. | async def on_member_ban(self, guild, target):
entry = await fetch_recent_audit_log_entry(
self.bot, guild, target=target, action=discord.AuditLogAction.ban, retry=3
)
if entry.user == self.bot.user:
return
action = Ban(
target=target,
user=entry.user,
reason=entry.reason,
guild_id=guild.id,
created_at=entry.created_at,
)
self.bot.dispatch("action_perform", action) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def logger_bans(self, ctx, *, channel: ChannelSetting):\n await queries.update_setting(\n ctx,\n \"logging_settings\",\n \"ban_log_channel_id\",\n channel.id if channel is not None else None,\n )\n if channel is None:\n await util.send_success(ctx, \"Bans logging **disabled**\")\n else:\n await util.send_success(ctx, f\"Bans will now be logged to {channel.mention}\")",
"async def on_member_ban(self, guild: Guild, user: Union[Member, User], *args):\n\n if not self._is_tracked(guild, EventPriority.ban):\n return\n\n # Event is sometimes called with 3 arguments\n # Capture occurrence\n await self.errorlog.send(Exception(f\"Additional arguments sent to `on_member_ban`: {args}\"))\n\n em = self.em_base(\n user,\n f\"User {user.mention} ({user.name}) was banned\",\n EventColors.ban.value\n )\n\n # Attempt to retrieve unban reason and mod that unbanned from Audit Log\n found, errored, mod, reason = await self._get_last_audit_action(guild, AuditLogAction.ban, user)\n\n # Audit log action found\n # Add details\n if found and not errored:\n em.add_field(\n name=\"Banned By\",\n value=f\"{mod.mention}\\n({mod.name}#{mod.discriminator})\"\n )\n em.add_field(\n name=\"Reason\",\n value=reason if reason is not None else \"No reason given\"\n )\n\n # Cannot access audit log or HTTP error prevented access\n elif errored and not found:\n em.add_field(\n name=\"Banned By\",\n value=\"Unknown\\nAudit Log inaccessible\"\n )\n em.add_field(\n name=\"Reason\",\n value=\"Irretrievable\\nAudit Log inaccessible\"\n )\n\n # No audit log entry found for ban\n else:\n em.add_field(\n name=\"Banned By\",\n value=\"Unknown\\nAudit Log missing data\"\n )\n em.add_field(\n name=\"Reason\",\n value=\"Irretrievable\\nAudit Log missing data or no reason given\"\n )\n\n # If banned user was a member of the server, capture roles\n if isinstance(user, Member):\n roles = \"\\n\".join(\n [f\"{role.mention} ({role.name})\" for role\n in sorted(user.roles, reverse=True) if role.name != \"@everyone\"]\n )\n em.add_field(\n name=\"Roles\",\n value=roles if roles else \"User had no roles\"\n )\n\n await self.log_event(em, guild, priority=EventPriority.ban)",
"async def on_member_unban(self, guild: Guild, user: User, *args):\n\n if not self._is_tracked(guild, EventPriority.unban):\n return\n\n # Event is sometimes called with 3 arguments\n # Capture occurrence\n await self.errorlog.send(Exception(f\"Additional arguments sent to `on_member_unban`: {args}\"))\n\n em = self.em_base(\n user,\n f\"User {user.mention} ({user.name}) was unbanned\",\n EventColors.unban.value\n )\n\n # Attempt to retrieve unban reason and mod that unbanned from Audit Log\n found, errored, mod, reason = await self._get_last_audit_action(guild, AuditLogAction.unban, user)\n\n # Audit log action found\n # Add details\n if found and not errored:\n em.add_field(\n name=\"Unbanned By\",\n value=f\"{mod.mention}\\n({mod.name}#{mod.discriminator})\"\n )\n em.add_field(\n name=\"Reason\",\n value=reason if reason is not None else \"No reason given\"\n )\n\n # Cannot access audit log or HTTP error prevented access\n elif errored and not found:\n em.add_field(\n name=\"Unbanned By\",\n value=\"Unknown\\nAudit Log inaccessible\"\n )\n em.add_field(\n name=\"Reason\",\n value=\"Irretrievable\\nAudit Log inaccessible\"\n )\n\n # No audit log entry found for ban\n else:\n em.add_field(\n name=\"Unbanned By\",\n value=\"Unknown\\nAudit Log missing data\"\n )\n em.add_field(\n name=\"Reason\",\n value=\"Irretrievable\\nAudit Log missing data or no reason given\"\n )\n\n await self.log_event(em, guild, priority=EventPriority.unban)",
"def anti_bot(self, message):\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n bot_creation_date = self._get_creation_date(msg_list[1])\n viewers = self.ts.fetch_chatters_from_API()['viewers']\n mod_list = self.ts.get_mods()\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n whitelist = json.load(f)\n for viewer in viewers:\n if self._get_creation_date(viewer) == bot_creation_date and viewer not in whitelist:\n self.ts.send_message('/ban {}'.format(viewer))\n mod_str = ', '.join(mod_list)\n self._add_to_whisper_queue(viewer, 'We\\'re currently experiencing a bot attack. If you\\'re a human and were accidentally banned, please whisper a mod: {}'.format(mod_str))",
"async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None:\n\n # retrieve logging information\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n PartialLoggingAction,\n 'SELECT CHANNEL_ID, BITS FROM LOGGING WHERE GUILD_ID=?',\n (guild.id,))\n ):\n await log_to_channel(\n self.bot,\n LoggingActions.USER_BANNED,\n logging_info[0].bits,\n logging_info[0].channel_id,\n f'**{str(member)}** was banned from the guild.'\n )",
"async def on_member_unban(self, guild: discord.Guild, member: discord.Member) -> None:\n\n # retrieve logging information\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n PartialLoggingAction,\n 'SELECT CHANNEL_ID, BITS FROM LOGGING WHERE GUILD_ID=?',\n (guild.id,))\n ):\n await log_to_channel(\n self.bot,\n LoggingActions.USER_UNBANNED,\n logging_info[0].bits,\n logging_info[0].channel_id,\n f'**{str(member)}** was unbanned from the guild.'\n )",
"def ban(sock, user):\r\n chat(sock, \"/ban {}\".format(user))",
"def logbids(msg, t=None, obj=None):\n logging.root.log(msg, level=BIDS, t=t, obj=obj)",
"def __onBanNotifyHandler(self):\n LOG_DEBUG('GameSessionController:__onBanNotifyHandler')\n banTime = time.strftime('%H:%M', time.gmtime(time.time() + self.PLAY_TIME_LEFT_NOTIFY))\n self.__lastBanMsg = (self.isPlayTimeBlock, banTime)\n self.onTimeTillBan(*self.__lastBanMsg)\n self.__banCallback = BigWorld.callback(self.DAY_DURATION, self.__onBanNotifyHandler)",
"async def logban(self, ctx, member: BannedMember, *,\n reason: ActionReason = None):\n if self.bot.server_settings[ctx.guild.id]['modlog_enabled']:\n try:\n confirm = await helpers.custom_confirm(\n ctx,\n f'```\\nUser: {member.user}\\nReason: {reason}\\n```'\n )\n if not confirm:\n return\n resp_mod = ctx.author\n ban_reason = reason if reason else member.reason\n local_embed = embeds.BanEmbed(\n member.user, resp_mod, ban_reason)\n mod_logs = await self.bot.pg_utils.get_modlogs(ctx.guild.id)\n for channel_id in mod_logs:\n try:\n await self.bot.pg_utils.insert_modaction(\n ctx.guild.id,\n resp_mod.id,\n member.user.id,\n ban_reason,\n enums.Action.BAN\n )\n except Exception as e:\n self.bot.logger.warning(f'Error storing modaction: {e}') # noqa\n await (self.bot.get_channel(channel_id)).send(\n embed=local_embed)\n except Exception as e:\n self.bot.logger.warning(f'Issue posting to mod log: {e}')\n else:\n await ctx.send(f'No modlog channels detected', delete_after=3)",
"def on_b(self):\r\n self.log()",
"def test_logs_user_banned(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n other_user = make_user()\n community = make_community(creator=other_user, type='P')\n community_name = community.name\n\n user.join_community_with_name(community_name)\n other_user.add_moderator_with_username_to_community_with_name(username=user.username,\n community_name=community.name)\n\n user_to_ban = make_user()\n\n url = self._get_url(community_name=community.name)\n self.client.post(url, {\n 'username': user_to_ban.username\n }, **headers)\n\n self.assertTrue(community.logs.filter(action_type='B',\n source_user=user,\n target_user=user_to_ban).exists())",
"async def test_ban(self, ctx):\n try:\n pass\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))",
"def event_log(self):\n pass",
"async def ban(self, ctx, user: discord.Member, reason=\"Banned from guild by Talos\"):\n await user.ban(reason=reason)\n await self.bot.mod_log(ctx, \"ban\", user, reason)\n await ctx.send(f\"User {user} banned\")",
"def test_logs_user_unbanned(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n other_user = make_user()\n community = make_community(creator=other_user, type='P')\n community_name = community.name\n\n user.join_community_with_name(community_name)\n other_user.add_moderator_with_username_to_community_with_name(username=user.username,\n community_name=community.name)\n\n user_to_unban = make_user()\n\n other_user.ban_user_with_username_from_community_with_name(username=user_to_unban.username,\n community_name=community_name)\n\n url = self._get_url(community_name=community.name)\n self.client.post(url, {\n 'username': user_to_unban.username\n }, **headers)\n\n self.assertTrue(community.logs.filter(action_type='U',\n source_user=user,\n target_user=user_to_unban).exists())",
"def ban(sock, chan, user):\n chat(sock, \".ban {}\\r\\n\".format(user))\n console.info(\"banned user {} from channel {}\".format(user, chan))",
"async def hackban(self, ctx, user_id: int, *, reason: str = None):\r\n author = ctx.message.author\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n action = \"Ban\"\r\n if str(server.id) not in self._time:\r\n self._time[str(server.id)] = {}\r\n dataIO.save_json(self._time_file, self._time)\r\n if \"bantime\" not in self._time[str(server.id)]:\r\n self._time[str(server.id)][\"bantime\"] = 0\r\n dataIO.save_json(self._time_file, self._time)\r\n try:\r\n user = await self.bot.get_user_info(user_id)\r\n except discord.errors.NotFound:\r\n await ctx.send(\"The user was not found, check if the ID specified is correct :no_entry:\")\r\n return\r\n except discord.errors.HTTPException:\r\n await ctx.send(\"The ID specified does not exist :no_entry:\")\r\n return\r\n ban_list = await server.bans()\r\n can_ban = channel.permissions_for(ctx.me).ban_members\r\n if user in server.members:\r\n await ctx.send(\"Use the ban command to ban people in the server :no_entry:\")\r\n return\r\n if not can_ban:\r\n await ctx.send(\"I need the `BAN_MEMBERS` permission :no_entry:\")\r\n return\r\n if user == self.bot.user:\r\n await ctx.send(\"I'm not going to ban myself ¯\\_(ツ)_/¯\")\r\n return\r\n if author == user:\r\n await ctx.send(\"Why would you want to ban yourself, just leave.\")\r\n return\r\n if user in [x.user for x in ban_list]:\r\n await ctx.send(\"That user is already banned :no_entry:\")\r\n return\r\n try:\r\n await self.bot.http.ban(user_id, server.id, reason=\"Ban made by {}\".format(author))\r\n self._time[str(server.id)][\"bantime\"] = datetime.datetime.utcnow().timestamp()\r\n dataIO.save_json(self._time_file, self._time)\r\n except:\r\n await ctx.send(\"I'm not able to ban that user :no_entry:\")\r\n return\r\n await ctx.send(f\"**{user}** has been banned by ID {self.bot.get_emoji(470063310386233344)}\")\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass",
"def ban(self, nickname, duration, unit_time, reason):\n cmd = '{}ban \"{}\" {} {} \"{}\"'.format(self.console, Commands.aquote(nickname), duration,\n unit_time, Commands.aquote(reason))\n self.write_command(cmd)",
"def log_broadcast(bcast):\n logger.info(\"Broadcast received: {bcast}\")",
"async def user_banned_button(self, payload: discord.RawReactionActionEvent) -> None:\n\n self.bits = flip_action_bits(LoggingActions.USER_BANNED, self.bits)\n await self.update_embed()",
"def ban_command(server, output):\n for target in output.message.split()[1:]:\n if target in server.ops:\n server.tell(output.name, 'Operators cannot be banned')\n continue\n server.banip(target)\n server.ban(target)\n return",
"async def banAll(ctx):\r\n await ctx.message.delete()\r\n for member in ctx.guild.members:\r\n try:\r\n await member.ban()\r\n except Exception as e:\r\n print(\r\n f\"{Fore.RED}[-]banAll => {Fore.RESET}Failed to ban {member}\\n{e}\\n\"\r\n )",
"def forbidden(transactionId):\n _log.info(f\"{transactionId.hex} END FORBIDDEN\")",
"def notice(self):\n if(self.data[1][0:18:] == \"*** You are banned\"):\n username = SOCKET_TO_USERID[self.target]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(username)\n network = self.source[1]\n BANHANDLER.add_ban(10080, user_pseudonym, network, self.data[0], 1)\n self.message = self.message + \"\\r\\n :orcbot!@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You've been banned from this server\"\n\n self.send()",
"async def unban(self, ctx, user_id: int, *, reason: str = None):\r\n author = ctx.message.author\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n action = \"Unban\"\r\n if str(server.id) not in self._time:\r\n self._time[str(server.id)] = {}\r\n dataIO.save_json(self._time_file, self._time)\r\n if \"unbantime\" not in self._time[str(server.id)]:\r\n self._time[str(server.id)][\"unbantime\"] = 0\r\n dataIO.save_json(self._time_file, self._time)\r\n try:\r\n user = await self.bot.get_user_info(user_id)\r\n except discord.errors.NotFound:\r\n await ctx.send(\"The user was not found :no_entry:\")\r\n return\r\n except discord.errors.HTTPException:\r\n await ctx.send(\"The ID specified does not exist :no_entry:\")\r\n return\r\n can_ban = channel.permissions_for(ctx.me).ban_members\r\n if not can_ban:\r\n await ctx.send(\"I need the `BAN_MEMBERS` permission :no_entry:\")\r\n return\r\n ban_list = await server.bans()\r\n invite = await channel.create_invite(max_age=86400, max_uses=1)\r\n s = discord.Embed(title=\"You have been unbanned from {}\".format(server.name),\r\n description=\"Feel free to join back whenever.\", colour=000000,\r\n timestamp=__import__('datetime').datetime.utcnow())\r\n s.set_thumbnail(url=server.icon_url)\r\n s.add_field(name=\"Moderator\", value=\"{} ({})\".format(author, str(author.id)), inline=False)\r\n s.add_field(name=\"Invite\", value=\"{} (This will expire in 1 week)\".format(str(invite)))\r\n if user == author:\r\n await ctx.send(\"You can't unban yourself :no_entry:\")\r\n return\r\n if user == self.bot.user:\r\n await ctx.send(\"I'm not even banned ¯\\_(ツ)_/¯\")\r\n return\r\n i = 0\r\n n = 0\r\n if user in [x.user for x in ban_list]:\r\n pass\r\n else:\r\n await ctx.send(\"That user is not banned :no_entry:\")\r\n return\r\n try:\r\n await server.unban(user, reason=\"Unban made by {}\".format(author))\r\n self._time[str(server.id)][\"unbantime\"] = datetime.datetime.utcnow().timestamp()\r\n dataIO.save_json(self._time_file, self._time)\r\n except discord.errors.Forbidden:\r\n await ctx.send(\"I need the **Ban Members** permission to unban :no_entry:\")\r\n return\r\n await ctx.send(\"**{}** has been unbanned :white_check_mark:\".format(user))\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass\r\n try:\r\n await user.send(embed=s)\r\n except:\r\n pass",
"def log_event(event):\r\n tracker.send(event)",
"async def ban(self, ctx, target: discord.Member, reason=None):\n await target.ban(reason=reason)\n await ctx.send(f'\\N{OK HAND SIGN} {target} banned')",
"async def banish(self, ctx : commands.Context, member: discord.Member, *, reason: str = None):\n if await checks.check_priv(ctx, member):\n return\n try:\n await member.ban(reason=default.responsible(ctx.author, reason))\n embed = discord.Embed(\n color = 0x2F3136\n )\n embed.set_footer(text=f\"Command invoked by {ctx.author}\")\n embed.set_author(name=f\"✅ {member.name} has been banned from the server\", icon_url=member.avatar_url)\n await ctx.send(embed=embed)\n await member.send(f\"You've been banned from **{ctx.guild.name}** for **{reason}** by **{ctx.author}**\")\n\n log_channel = self.bot.get_channel(self.logs(ctx.guild.id))\n if log_channel:\n embed = discord.Embed(\n title=\"Ban 📝\",\n description=f\"**User banned:** `{member}`\\n**Moderator:** `{ctx.author}`\\n**Reason:** `{reason}`\"\n )\n await log_channel.send(embed=embed)\n\n except Exception as e:\n await ctx.send(e)",
"def ban(self, mask, target, args):\n self.bot.send('MODE %s +b %s' % (as_channel(args['<channel>']), args['<nick>']))"
] | [
"0.66274333",
"0.6289445",
"0.6211584",
"0.6128353",
"0.5976262",
"0.5881616",
"0.5765396",
"0.5755743",
"0.5754436",
"0.57243574",
"0.56665677",
"0.5616894",
"0.55863464",
"0.5518944",
"0.54894495",
"0.548391",
"0.54765815",
"0.5444986",
"0.5386784",
"0.53838974",
"0.53649265",
"0.5353021",
"0.53175026",
"0.52869856",
"0.5281037",
"0.52635753",
"0.5259709",
"0.5250781",
"0.524605",
"0.52032924"
] | 0.6299255 | 1 |
Sets up the Muted role's permissions. You must have the Administrator permission to use this. | async def setup(self, ctx):
role = discord.utils.get(ctx.guild.roles, name="Muted")
if role is None:
return await ctx.send("Please create a role named Muted first.")
for channel in ctx.guild.channels:
if isinstance(channel, CategoryChannel) or not channel.permissions_synced:
await channel.set_permissions(
role, send_messages=False, add_reactions=False, speak=False, stream=False
)
await ctx.send("Set up permissions for the Muted role.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init() -> None:\n appbuilder.add_permissions(update_perms=True)\n security_manager.sync_role_definitions()",
"def setup(bot):\n bot.add_cog(RoleManager(bot))",
"async def _set_roles(self, ctx: Context):\n\n guild: discord.Guild = ctx.guild\n\n host = await guild.create_role(\n name=\"Host\", colour=discord.Color(0xFFBF37),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).host_id.set(host.id)\n await ctx.author.add_roles(host)\n\n player = await guild.create_role(\n name=\"Player\", colour=discord.Color(0x37BFFF),\n hoist=True, mentionable=True\n )\n await self.config.guild(guild).player_id.set(player.id)\n\n repl = await guild.create_role(\n name=\"Replacement\", colour=discord.Color(0x86FF40)\n )\n await self.config.guild(guild).repl_id.set(repl.id)\n\n spec = await guild.create_role(\n name=\"Spectator\", colour=discord.Color(0xD837FF)\n )\n await self.config.guild(guild).spec_id.set(spec.id)\n\n dead = await guild.create_role(\n name=\"Dead\", colour=discord.Color(0xDC5757)\n )\n await self.config.guild(guild).dead_id.set(dead.id)\n\n txt = _(\n \"Host: {}\"\n \"\\nPlayer: {}\"\n \"\\nSpectator: {}\"\n \"\\nDead: {}\"\n \"\\nReplacement: {}\"\n ).format(\n host.mention,\n player.mention,\n spec.mention,\n dead.mention,\n repl.mention\n )\n\n embed = discord.Embed(\n color=0x37BFFF, title=\"Created Roles!\", description=txt\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Set up required roles!\")",
"def setupPermissions( self, p ):\n mp = p.manage_permission\n for entry in Config.PortalPermissions:\n apply( mp, entry )",
"def setup_test_role(self):\n self.test_role = rand_name('role')\n resp, self.role = self.client.create_role(self.test_role)\n self.roles.append(self.role)",
"def setup_roles_and_persona(self):\n logging.info('Setting up roles, orders, persona.')\n self.end_onboarding_state()\n self.broadcast_apprentice_persona('') # clear onboarding persona\n starting_role = self.assign_roles()\n self.send_wizard_persona_emphasize_message()\n self.selected_persona = self.apprentice_choose_persona()\n self.broadcast_apprentice_persona(self.selected_persona)\n self.send_time_length_info()\n self.send_starter_instruction(starting_role)",
"def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()",
"def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")",
"def sync_roles(self) -> None:\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()",
"def setup(bot):\n bot.add_cog(ReactionRoles(bot))",
"def set_permissions(self, role):\n if role == User.ROLE_ADMIN:\n for perm in permissions.admin_permissions():\n self.user_permissions.add(perm)\n elif role == User.ROLE_MANAGER:\n for perm in permissions.manager_permissions():\n self.user_permissions.add(perm)\n elif role == User.ROLE_SUB_MANAGER:\n for perm in permissions.sub_manager_permissions():\n self.user_permissions.add(perm)\n else:\n for perm in permissions.user_permissions():\n self.user_permissions.add(perm)",
"async def roles(self, ctx):\n\n pass",
"def sync_role_definitions(self) -> None:\n\n logger.info(\"Syncing role definition\")\n\n self.create_custom_permissions()\n\n # Creating default roles\n self.set_role(\"Admin\", self._is_admin_pvm)\n self.set_role(\"Alpha\", self._is_alpha_pvm)\n self.set_role(\"Gamma\", self._is_gamma_pvm)\n self.set_role(\"granter\", self._is_granter_pvm)\n self.set_role(\"sql_lab\", self._is_sql_lab_pvm)\n\n # Configure public role\n if current_app.config[\"PUBLIC_ROLE_LIKE\"]:\n self.copy_role(\n current_app.config[\"PUBLIC_ROLE_LIKE\"],\n self.auth_role_public,\n merge=True,\n )\n\n self.create_missing_perms()\n\n # commit role and view menu updates\n self.get_session.commit()\n self.clean_perms()",
"async def roletools(self, ctx: Context) -> None:",
"def setup(self):\n # TODO: refactor database cleanup\n with gus.config.get_db_conn().cursor() as c:\n c.execute(\"TRUNCATE TABLE chef_roles, chef_roles_xref_projects CASCADE\")\n self.role_name = 'www'\n self.role_id = chef_role.create(self.role_name, True)",
"def role(name,\n description,\n privileges,\n base_roles):\n script_name = 'setup_role'\n script_data = nexus_groovy.setup_role\n\n ret = {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': '\"{0}\" script run for role: {1}'.format(script_name, name)}\n\n script_args = {'id': name,\n 'name': name,\n 'description': description,\n 'privileges': privileges,\n 'roles': base_roles}\n\n results = _script_processor(script_name, script_data, script_args, ret)\n\n return results",
"def grant_role(self, role, principal_ids):",
"async def setoperator(self, ctx, role_id: int, perms: int):\n s = db.session()\n role = s.query(db.AdminRole).filter(db.AdminRole.role_id == role_id).first()\n if role:\n if perms == 0:\n s.delete(role)\n else:\n role.perms = perms\n else:\n s.add(db.AdminRole(role_id=role_id, perms=perms))\n s.commit()\n s.close()\n await ctx.send(\"Role set\")",
"def test_add_role(self):\n pass",
"async def applysetup(self, ctx: commands.Context):\n pred = MessagePredicate.yes_or_no(ctx)\n role = MessagePredicate.valid_role(ctx)\n\n applicant = get(ctx.guild.roles, name=\"Staff Applicant\")\n channel = get(ctx.guild.text_channels, name=\"applications\")\n\n await ctx.send(\n \"This will create required channel and role. Do you wish to continue? (yes/no)\"\n )\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if not pred.result:\n return await ctx.send(\"Setup cancelled.\")\n if not applicant:\n try:\n applicant = await ctx.guild.create_role(\n name=\"Staff Applicant\", reason=\"Application cog setup\"\n )\n except discord.Forbidden:\n return await ctx.send(\n \"Uh oh. Looks like I don't have permissions to manage roles.\"\n )\n if not channel:\n await ctx.send(\n \"Do you want everyone to see the applications channel? (yes/no)\"\n )\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result:\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n send_messages=False\n ),\n ctx.guild.me: discord.PermissionOverwrite(send_messages=True),\n }\n else:\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n ),\n ctx.guild.me: discord.PermissionOverwrite(read_messages=True),\n }\n try:\n channel = await ctx.guild.create_text_channel(\n \"applications\",\n overwrites=overwrites,\n reason=\"Application cog setup\",\n )\n except discord.Forbidden:\n return await ctx.send(\n \"Uh oh. Looks like I don't have permissions to manage channels.\"\n )\n await ctx.send(f\"What role can accept or reject applicants?\")\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=role)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n accepter = role.result\n await self.config.guild(ctx.guild).applicant_id.set(applicant.id)\n await self.config.guild(ctx.guild).channel_id.set(channel.id)\n await self.config.guild(ctx.guild).accepter_id.set(accepter.id)\n await ctx.send(\n \"You have finished the setup! Please, move your new channel to the category you want it in.\"\n )",
"async def applysetup(self, ctx: commands.Context):\n pred = MessagePredicate.yes_or_no(ctx)\n role = MessagePredicate.valid_role(ctx)\n\n applicant = get(ctx.guild.roles, name=\"Staff Applicant\")\n channel = get(ctx.guild.text_channels, name=\"staff-applications\")\n\n await ctx.send(\n \"This will create required channel and role. Do you wish to continue? (yes/no)\"\n )\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if not pred.result:\n return await ctx.send(\"Setup cancelled.\")\n if not applicant:\n try:\n applicant = await ctx.guild.create_role(\n name=\"Staff Applicant\", reason=\"Application cog setup\"\n )\n except discord.Forbidden:\n return await ctx.send(\n \"Uh oh. Looks like I don't have permissions to manage roles.\"\n )\n if not channel:\n await ctx.send(\"Do you want everyone to see the applications channel? (yes/no)\")\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result:\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(send_messages=False),\n ctx.guild.me: discord.PermissionOverwrite(send_messages=True),\n }\n else:\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),\n ctx.guild.me: discord.PermissionOverwrite(read_messages=True),\n }\n try:\n channel = await ctx.guild.create_text_channel(\n \"staff-applications\",\n overwrites=overwrites,\n reason=\"Application cog setup\",\n )\n except discord.Forbidden:\n return await ctx.send(\n \"Uh oh. Looks like I don't have permissions to manage channels.\"\n )\n await ctx.send(f\"What role can accept or reject applicants?\")\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=role)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n accepter = role.result\n await self.config.guild(ctx.guild).applicant_id.set(applicant.id)\n await self.config.guild(ctx.guild).channel_id.set(channel.id)\n await self.config.guild(ctx.guild).accepter_id.set(accepter.id)\n await ctx.send(\n \"You have finished the setup! Please, move your new channel to the category you want it in.\"\n )",
"def create_permission(permission, event):\n setDefaultRoles(permission.title, ('Manager',))",
"def test04_role_perms(self):\n print_ln('test_role_perms')\n \n try:\n rList = review.find_roles(Role(name='py-role*'))\n for rle in rList: \n print_ln(\"Perm Roles name=\" + rle.name)\n pList = review.role_perms(rle)\n for perm in pList:\n print_ln(\"Assigned perm obj name=\" + perm.obj_name + ', op name=' + perm.op_name + ', obj id=' + perm.obj_id, 1)\n except Exception as e:\n self.fail('test_role_perms failed, exception=' + e.msg)",
"def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)",
"def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)",
"def init():\n click.secho(\"[+] Initialize permissions\", fg=\"cyan\")\n init_permissions()\n click.secho(\"[+] Initialize permissions successfully\", fg=\"green\")",
"def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()",
"def can_set_role(userid, role, group):",
"async def setup(bot: Bot) -> None:\n if len(ASSIGNABLE_ROLES) > ITEMS_PER_ROW * 5: # Discord limits views to 5 rows of buttons.\n log.error(\"Too many roles for 5 rows, not loading the Subscribe cog.\")\n else:\n await bot.add_cog(Subscribe(bot))",
"def setup(self):\n self.log.debug('upm - in upm setup()')\n # Add resource setup code here"
] | [
"0.70091057",
"0.6839477",
"0.66361266",
"0.6617161",
"0.6447892",
"0.6341974",
"0.62621003",
"0.6230001",
"0.6170113",
"0.61161715",
"0.61063576",
"0.60315776",
"0.602137",
"0.59753394",
"0.59370977",
"0.59244543",
"0.5920234",
"0.5868772",
"0.58590287",
"0.58578885",
"0.5850786",
"0.5850073",
"0.58468544",
"0.58419967",
"0.58419967",
"0.58354443",
"0.58293235",
"0.5822548",
"0.5803228",
"0.57876474"
] | 0.78424436 | 0 |
Unmutes a member in trading channels. You must have the Kick Members permission to use this. | async def tradingunmute(self, ctx, target: discord.Member, *, reason=None):
action = TradingUnmute(
target=target,
user=ctx.author,
reason=reason,
guild_id=ctx.guild.id,
)
await action.execute(ctx)
await action.notify()
await ctx.send(f"Unmuted **{target}** in trading channels.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def unmute(self, ctx, user: Redeemed):\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")",
"async def unmute(self, ctx, member: discord.Member):\n for channel in ctx.guild.text_channels:\n permissions = channel.permissions_for(member)\n\n if permissions.read_messages:\n # This removes the PermissionOverwrite on the channel, it\n # does not grant send_messages=True\n await channel.set_permissions(member, overwrite=None)",
"async def unmute(self, ctx: Context, members: commands.Greedy[discord.Member], *, reason: str = None):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for member in members:\n if role not in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already unmuted\")\n\n else:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.remove_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} unmuted {member_display}\")",
"def unban_member(self, *args, **kwargs):\n return self.bot.unban_chat_member(self.id, *args, **kwargs)",
"async def unmute(self, ctx,\n\t\ttarget: discord.Member\n\t):\n\n\t\tself.check_perms(ctx.author, target)\n\n\t\thandler = await Handler.new(self.bot, ctx.guild)\n\t\tawait handler.unmute(ctx.author, target)\n\n\t\tawait ctx.success(f\"{target} (`{target.id}`) has been unmuted.\")",
"def unmute(self, nick, chan, arg):\n if not arg:\n \tbot.msg(chan, get_doc())\n self.state.unmute(arg)\n self.msg(chan, \"%s: You are now allowed to use this bot\" % (arg))",
"async def unmute(self, ctx, target: discord.Member, *, reason=None):\n\n action = Unmute(\n target=target,\n user=ctx.author,\n reason=reason,\n guild_id=ctx.guild.id,\n )\n await action.execute(ctx)\n await action.notify()\n await ctx.send(f\"Unmuted **{target}**.\")",
"async def voice_unmute(self, ctx, member: discord.Member, *, reason: typing.Optional[str]):\n if member.voice and member.voice.mute:\n await member.edit(mute=False, reason=reason[:512])\n await ctx.send(f\"User {member.mention} successfully unmuted from voice\")\n return\n if member.voice and not member.voice.mute:\n await ctx.send(\"User is not muted\")\n return\n self.to_unmute.append(member.id)\n await self.add_to_unmutes(member.id)\n await ctx.send(f\"User {member.mention} added to users that will be unmuted\")",
"def unassign_members(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"unassign_members\"), kwargs)",
"async def unshush(self, ctx):\n author = ctx.message.author\n channel = author.voice.channel\n members = channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n await user.edit(mute=False, deafen=False)\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully unshushed the following users:\",\n title=channel.name)\n await ctx.send(embed=embed)",
"async def unmute(self, ctx, user: discord.Member, *, reason: str = None):\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n author = ctx.message.author\r\n action = \"Unmute\"\r\n role = discord.utils.get(server.roles, name=\"Muted - Sensei\")\r\n if not role:\r\n await ctx.send(\"No-one is muted in this server :no_entry:\")\r\n return\r\n if role not in user.roles:\r\n await ctx.send(\"**{}** is not muted :no_entry:\".format(user))\r\n return\r\n try:\r\n await user.remove_roles(role)\r\n except:\r\n await ctx.send(\"I cannot remove the mute role from the user :no_entry:\")\r\n return\r\n await ctx.send(f\"**{user}** has been unmuted {self.bot.get_emoji(470063310386233344)}\")\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass\r\n self.d[str(server.id)][str(user.id)][\"toggle\"] = False\r\n self.d[str(server.id)][str(user.id)][\"time\"] = None\r\n self.d[str(server.id)][str(user.id)][\"amount\"] = None\r\n dataIO.save_json(self.file, self.d)\r\n try:\r\n s = discord.Embed(title=\"You have been unmuted early in {}\".format(server.name), colour=000000,\r\n timestamp=datetime.datetime.utcnow())\r\n s.add_field(name=\"Moderator\", value=\"{} ({})\".format(author, str(author.id)))\r\n await user.send(embed=s)\r\n except:\r\n pass",
"async def _unpunish(self, member, reason=None):\n role = await self.get_role(member.server)\n if role:\n # Has to be done first to prevent triggering on_member_update listener\n self._unpunish_data(member)\n await self.bot.remove_roles(member, role)\n\n msg = 'Your punishment in %s has ended.' % member.server.name\n if reason:\n msg += \"\\nReason was: %s\" % reason",
"async def unplonk(ctx, user: typing.Union[discord.Member, discord.User]):\n await bot.plonk.delete(user.id)\n await r(ctx, f'Unplonked **{user}**')",
"async def unlock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n if (channel.category.name == CATEGORY_SO or channel.category.name == CATEGORY_GENERAL):\n await ctx.send(\"Synced permissions with channel category.\")\n return await channel.edit(sync_permissions=True)\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name != CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True, read_messages=True)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bRole = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(aRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Unlocked the channel to Member access. Please check if permissions need to be synced.\")",
"async def unban(self, ctx, *, member): # don't convert to discord.Member as it isn't a server member, just a string\n banned_users = await ctx.guild.bans() # pulls ban list\n member_name, member_discriminator = member.split('#') # split the member name from the numerical discriminator\n for ban_entry in banned_users:\n user = ban_entry.user\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f'Unbanned {user.name}#{user.discriminator}')\n return",
"def unmute(guild, channel):\n\tlogger.info('Unmuting channel {}::{}...', guild.name, channel.name)\n\tif str(guild.id) in Settings.muted_channels:\n\t\tif str(channel.id) in Settings.muted_channels[str(guild.id)]:\n\t\t\tSettings.muted_channels[str(guild.id)].remove(str(channel.id))",
"async def unban(ctx, *, member):\n banned_users = await ctx.guild.bans()\n member_name, member_discriminator = member.split(\"#\")\n\n for ban_entry in banned_users:\n user = ban_entry.user\n\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f\"Unbanned {user.mention}\")\n return",
"async def on_member_remove(member):\r\n pass",
"def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)",
"def mutate(self, info, user_id):\n del info\n assert self is None, \"Root `self` expected to be `None`!\"\n\n OnChatMessageSent.unsubscribe(group=f\"user_{user_id}\")\n\n return KickOutUser(success=True)",
"async def unmute(self, ctx, user: discord.Member = None):\n try:\n if not user:\n return await ctx.send(f\"> **<@{ctx.author.id}>, Please specify a user to unmute.**\")\n if user.id == ctx.author.id:\n return await ctx.send(f\"> **<@{ctx.author.id}>, You cannot unmute yourself.**\")\n mute_role = await self.get_mute_role(ctx)\n muted = await self.check_if_muted(user.id, mute_role)\n if not mute_role:\n return await ctx.send(\n \">**This user was not muted by me as the mute role could not be found. In order for me to create a \"\n \"custom mute role, I need to mute someone first.**\")\n if muted:\n await user.remove_roles(mute_role,\n reason=f\"UnMuting User - Requested by {ctx.author.display_name} ({user.id})\")\n return await ctx.send(f\"> **<@{user.id}> has been unmuted.**\")\n else:\n return await ctx.send(f\"> **<@{user.id}> is not muted.**\")\n except Exception as e:\n log.console(e)\n return await ctx.send(f\"> **I am missing permissions to unmute {user.display_name}. {e}**\")",
"async def unban(self, ctx, member: BannedMember, *, reason=None):\n if member is None:\n return await ctx.error(\"Could not find user to unban.\")\n\n try:\n await ctx.guild.unban(member.user, reason=reason)\n except:\n await ctx.error('Could not unban member.')\n\n em = discord.Embed(title=f'Banned: {member}', color=self.color, description=f'Reason: {reason}')\n await ctx.send(embed=em)",
"async def remove_mute(id: int) -> None:\n\n guild = BOT_GLOBAL.get_guild(BOT_GLOBAL.settings.guild_id)\n if guild is not None:\n mute_role = BOT_GLOBAL.settings.guild().role_mute\n mute_role = guild.get_role(mute_role)\n if mute_role is not None:\n user = guild.get_member(id)\n if user is not None:\n await user.remove_roles(mute_role)\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(user.id, case)\n\n u = await BOT_GLOBAL.settings.user(id=user.id)\n u.is_muted = False\n u.save()\n\n log = await prepare_unmute_log(BOT_GLOBAL.user, user, case)\n\n log.remove_author()\n log.set_thumbnail(url=user.avatar_url)\n\n public_chan = guild.get_channel(\n BOT_GLOBAL.settings.guild().channel_public)\n \n dmed = True\n try:\n await user.send(embed=log)\n except Exception:\n dmed = False\n \n await public_chan.send(user.mention if not dmed else \"\", embed=log)\n\n else:\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(id, case)\n\n u = await BOT_GLOBAL.settings.user(id=id)\n u.is_muted = False\n u.save()",
"async def clrreact(ctx, msg: discord.Message, *args: discord.Member):\n users = args\n if (not users):\n await msg.clear_reactions()\n await ctx.send(\"Cleared all reactions on message.\")\n else:\n for u in users:\n for r in msg.reactions:\n await r.remove(u)\n await ctx.send(f\"Cleared reactions on message from {len(users)} user(s).\")",
"def auto_unmute():\n muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs of people you want to remain muted here\n users_keep_muted = set([])\n \n # mute all \n for user_id in muted:\n if user_id not in users_keep_muted:\n t.mutes.users.destroy(user_id=user_id)\n print(\"unmuted %d\" % (user_id))",
"async def mute(self, ctx):\n author = ctx.message.author\n channel = author.voice.channel\n members = channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n await user.edit(mute=True)\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully muted the following users:\",\n title=channel.name)\n await ctx.send(embed=embed)",
"async def remove_modaction(self, ctx, member: GeneralMember, index: int=None): # noqa\n if member is None or index is None:\n await ctx.send(\n \"You need to supply the correct parameters <member, index (from 1)>, try again.\", # noqa\n delete_after=5)\n return\n try:\n status = await self.bot.pg_utils.delete_single_modaction(\n ctx.guild.id,\n member.id,\n index,\n self.bot.logger\n )\n if '0' in status:\n await ctx.send(\n embed=embeds.CommandErrorEmbed(\n 'User has not recieved any modactions.'),\n delete_after=3)\n return\n local_embed = embeds.ModRmEmbed(member)\n await ctx.send(embed=local_embed)\n except Exception as e:\n await ctx.send(embed=embeds.InternalErrorEmbed())\n self.bot.logger.warning(f'Error removing modaction for user: {e}')",
"async def mute(self, ctx, member: discord.Member, time='15m'):\n guild_permissions = member.guild_permissions\n wait_time = parse_time(time).total_seconds()\n # Because sometimes members have nicknames with markdown\n escaped_name = escape_markdown(member.display_name)\n\n if guild_permissions.kick_members:\n # do not mute someone who has permissions to kick members\n await ctx.send(f'Cannot mute {escaped_name} due to roles.')\n\n elif member.bot:\n # do not mute bots\n await ctx.send(f'Cannot mute {escaped_name} (is a bot).')\n\n else:\n overwrite = discord.PermissionOverwrite(\n add_reactions=False,\n send_messages=False,\n )\n\n log_str = (f'{ctx.author.display_name} has muted '\n f'member {member} (<@{member.id}>) for {time}.')\n logger.info(log_str)\n\n for channel in ctx.guild.text_channels:\n permissions = channel.permissions_for(member)\n\n if permissions.read_messages:\n await channel.set_permissions(member, overwrite=overwrite)\n\n await asyncio.sleep(wait_time)\n await ctx.invoke(self.unmute, member)",
"async def punish(\n self, ctx: commands.Context, member: discord.Member, punishment: Punishment\n ) -> None:",
"async def demote(self, ctx, *, member = None):\r\n if not await self._can_run(ctx): return \r\n em = discord.Embed(color = 0XFF8C00, description = \"> Menurunkan jabatan role xp kepada member ke role xp dibawahnya\\n> \\n\"\r\n \"> **Panduan**\\n\"\r\n \"> `{}demote [member]`\"\r\n .format(ctx.prefix))\r\n em.set_footer(text = \"Saat mengetik command, tanda [] tidak usah digunakan.\\n{}\".format(ctx.author), icon_url = f\"{ctx.author.avatar_url}\")\r\n\r\n if member == None:\r\n return await ctx.send(embed=em)\r\n\r\n if type(member) is str:\r\n memberName = member\r\n member = DisplayName.memberForName(memberName, ctx.message.guild)\r\n if not member:\r\n msg = '┐( ̄ヘ ̄;)┌\\nAku tidak dapat menemukan *{}* dalam server...'.format(memberName)\r\n # Check for suppress\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n\r\n # Get user's xp\r\n xp = int(self.settings.getUserStat(member, ctx.guild, \"XP\"))\r\n\r\n # Get the role list\r\n promoArray = self.getSortedRoles(ctx.guild)\r\n currentRole = self.getCurrentRoleIndex(member, ctx.guild)\r\n nextRole = currentRole - 1\r\n if nextRole == -1:\r\n # We're removing the user from all roles\r\n neededXp = int(promoArray[0]['XP'])-xp-1\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n remRoles = []\r\n for i in range(0, len(promoArray)):\r\n remRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if remRole:\r\n if remRole in member.roles:\r\n remRoles.append(remRole)\r\n # await member.remove_roles(*remRoles)\r\n # Use role manager instead\r\n self.settings.role.rem_roles(member, remRoles)\r\n msg = 'sejumlah *{} xp* telah dikurangi dari *{}* dan role dia telah diturunkan dari system xp!'.format(neededXp*-1, DisplayName.name(member))\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n elif nextRole < -1:\r\n msg = '┐( ̄ヘ ̄;)┌\\nTidak ada role xp yang lebih rendah untuk menurunkan role milik *{}*.'.format(DisplayName.name(member))\r\n else:\r\n newRole = DisplayName.roleForID(promoArray[nextRole]['ID'], ctx.guild)\r\n neededXp = int(promoArray[nextRole]['XP'])-xp\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n # Start at the currentRole and remove that and all roles above\r\n remRoles = []\r\n for i in range(currentRole, len(promoArray)):\r\n remRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if remRole:\r\n if remRole in member.roles:\r\n remRoles.append(remRole)\r\n # await member.remove_roles(*remRoles)\r\n # Use role manager instead\r\n self.settings.role.rem_roles(member, remRoles)\r\n if not newRole:\r\n # Promotion role doesn't exist\r\n msg = '┐( ̄ヘ ̄;)┌\\nSepertinya **{}** sudah tidak ada dalam server. namun sejumlah *{:,} xp* milik *{}* akan tetap dikurangi\\n tapi aku tidak dapat menurunkan jabatan role xp, pertimbangkan lagi untuk merevisi role xp dalam server mu.'.format(promoArray[nextRole]['Name'], neededXp*-1, DisplayName.name(member))\r\n else:\r\n msg = 'sejumlah *{:,} xp* milik *{}* telah dikurangi dan jabatan role xp telah diturunkan ke **{}**!'.format(neededXp*-1, DisplayName.name(member), newRole.name)\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)"
] | [
"0.69515646",
"0.69347554",
"0.69219375",
"0.68286806",
"0.67809457",
"0.6697323",
"0.66659373",
"0.65882045",
"0.6502169",
"0.6473313",
"0.6384512",
"0.6376163",
"0.6283388",
"0.60818565",
"0.6047106",
"0.60347235",
"0.59701824",
"0.5945254",
"0.5924875",
"0.59149057",
"0.5914142",
"0.59059274",
"0.590194",
"0.588424",
"0.58821976",
"0.58776695",
"0.58637667",
"0.586152",
"0.5844665",
"0.58402073"
] | 0.7148507 | 0 |
Views a member's punishment history. You must have the Kick Members permission to use this. | async def history(self, ctx, *, target: Union[discord.Member, FetchUserConverter]):
query = {"target_id": target.id, "guild_id": ctx.guild.id}
count = await self.bot.mongo.db.action.count_documents(query)
async def get_actions():
async for x in self.bot.mongo.db.action.find(query).sort("created_at", -1):
yield Action.build_from_mongo(self.bot, x)
def format_item(i, x):
name = f"{x._id}. {x.emoji} {x.past_tense.title()} by {x.user}"
reason = x.reason or "No reason provided"
lines = [
f"– **Reason:** {reason}",
f"– at {discord.utils.format_dt(x.created_at)} ({discord.utils.format_dt(x.created_at, 'R')})",
]
if x.duration is not None:
lines.insert(1, f"– **Duration:** {time.human_timedelta(x.duration)}")
return {"name": name, "value": "\n".join(lines), "inline": False}
pages = ViewMenuPages(
source=AsyncEmbedFieldsPageSource(
get_actions(),
title=f"Punishment History • {target}",
format_item=format_item,
count=count,
)
)
try:
await pages.start(ctx)
except IndexError:
await ctx.send("No punishment history found.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def punish(\n self, ctx: commands.Context, member: discord.Member, punishment: Punishment\n ) -> None:",
"def entry_member(request,member_id):\n return EntryView.__index(request,member_id)",
"async def on_member_remove(self, member: Member):\n\n if not self._is_tracked(member.guild, EventPriority.leave):\n return\n\n # Stop if ban. Will be handled in on_member_ban\n found, *_ = await self._get_last_audit_action(member.guild, AuditLogAction.ban, member)\n if found:\n return\n\n # Attempt to retrieve kic reason and mod that kicked from Audit Log\n found, errored, mod, reason = await self._get_last_audit_action(member.guild, AuditLogAction.kick, member)\n\n # Kick found in audit log\n if found and not errored:\n leave_type = EventPriority.kick\n\n em = self.em_base(\n member,\n f\"User {member.mention} ({member.name}) was kicked\",\n EventColors.kick.value\n )\n\n em.add_field(\n name=\"Kicked By\",\n value=f\"{mod.mention}\\n({mod.name}#{mod.discriminator})\"\n )\n\n em.add_field(\n name=\"Reason\",\n value=reason if reason else \"No reason given\"\n )\n\n # Cannot access audit log or HTTP error prevented access\n elif errored and not found:\n print(\"errored and not found\")\n leave_type = EventPriority.kick\n em = self.em_base(\n member,\n f\"User {member.name} may have been kicked\",\n EventColors.kick.value\n )\n em.description = f\"{em.description}\\n\\nAudit Log inaccessible\\n\" \\\n f\"Unable to determine if member remove was kick or leave\"\n em.add_field(\n name=\"Kicked By\",\n value=\"Unknown\\nAudit Log inaccessible\"\n )\n em.add_field(\n name=\"Reason\",\n value=\"Irretrievable\\nAudit Log inaccessible\"\n )\n\n # Successfully accessed audit log and found no kick\n # Presume voluntary leave\n else:\n leave_type = EventPriority.leave\n\n em = self.em_base(\n member,\n f\"User {member.mention} ({member.name}) left\",\n EventColors.kick.value\n )\n\n roles = \"\\n\".join(\n [f\"{role.mention} ({role.name})\" for role in\n sorted(member.roles, reverse=True) if role.name != \"@everyone\"]\n )\n\n em.add_field(\n name=\"Roles\",\n value=roles if roles else \"User had no roles\"\n )\n\n await self.log_event(em, member.guild, priority=leave_type)",
"def view_member(request, member_id):\n member = get_object_or_404(Member, pk=member_id)\n context = {'member': member }\n return render_to_response('member_view.html',\n context,\n context_instance=RequestContext(request))",
"async def on_member_unban(self, guild: discord.Guild, member: discord.Member) -> None:\n\n # retrieve logging information\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n PartialLoggingAction,\n 'SELECT CHANNEL_ID, BITS FROM LOGGING WHERE GUILD_ID=?',\n (guild.id,))\n ):\n await log_to_channel(\n self.bot,\n LoggingActions.USER_UNBANNED,\n logging_info[0].bits,\n logging_info[0].channel_id,\n f'**{str(member)}** was unbanned from the guild.'\n )",
"async def torment(self, ctx, *, member = None, times : int = None):\r\n\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tmessage = ctx.message\r\n\r\n\t\t# Only allow owner to change server stats\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\tusage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)\r\n\r\n\t\tisRole = False\r\n\r\n\t\tif member == None:\r\n\t\t\tawait ctx.channel.send(usage)\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\t# Check for formatting issues\r\n\t\tif times == None:\r\n\t\t\t# Either xp wasn't set - or it's the last section\r\n\t\t\tif type(member) is str:\r\n\t\t\t\t# It' a string - the hope continues\r\n\t\t\t\troleCheck = DisplayName.checkRoleForInt(member, server)\r\n\t\t\t\tif roleCheck and roleCheck[\"Role\"]:\r\n\t\t\t\t\tisRole = True\r\n\t\t\t\t\tmember = roleCheck[\"Role\"]\r\n\t\t\t\t\ttimes = roleCheck[\"Int\"]\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Role is invalid - check for member instead\r\n\t\t\t\t\tnameCheck = DisplayName.checkNameForInt(member, server)\r\n\t\t\t\t\tif not nameCheck:\r\n\t\t\t\t\t\tawait ctx.channel.send(usage)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tif not nameCheck[\"Member\"]:\r\n\t\t\t\t\t\tmsg = 'I couldn\\'t find that user or role on the server.'.format(member)\r\n\t\t\t\t\t\tawait ctx.channel.send(msg)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tmember = nameCheck[\"Member\"]\r\n\t\t\t\t\ttimes = nameCheck[\"Int\"]\r\n\t\t\t\t\t\r\n\t\t# Set the torment flag\r\n\t\tself.toTorment = True\r\n\r\n\t\tif times == None:\r\n\t\t\t# Still no times - roll back to default\r\n\t\t\ttimes = 25\r\n\t\t\t\r\n\t\tif times > 100:\r\n\t\t\ttimes = 100\r\n\t\t\t\r\n\t\tif times == 0:\r\n\t\t\tawait ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')\r\n\t\t\treturn\r\n\t\t\r\n\t\tif times < 0:\r\n\t\t\tawait ctx.channel.send('I just uh... *un-tormented* them. Yeah.')\r\n\t\t\treturn\r\n\t\t\r\n\t\t# Delete original torment message\r\n\t\tawait message.delete()\r\n\r\n\t\tfor i in range(0, times):\r\n\t\t\t# Do this over time\r\n\t\t\ttry:\r\n\t\t\t\tif member.name == \"@everyone\" and type(member) is discord.Role:\r\n\t\t\t\t\tawait channel.send(\"{}\".format(member.name),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\telse:\r\n\t\t\t\t\tawait channel.send('{}'.format(member.mention),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\texcept Exception:\r\n\t\t\t\tpass\r\n\t\t\tfor j in range(0, self.waitBetween):\r\n\t\t\t\t# Wait for 1 second, then check if we should cancel - then wait some more\r\n\t\t\t\tawait asyncio.sleep(1)\r\n\t\t\t\tif not self.toTorment:\r\n\t\t\t\t\treturn",
"async def replist_command(self, ctx):\n rep_model = (\n await ReputationPoints.filter(guild_id=ctx.guild.id)\n .order_by(\"-points\")\n .limit(10)\n )\n leaderboard = \"\\n\".join(\n [\n f\"**{i+1}.** {model.member_name} - {model.points}\"\n for (i, model) in enumerate(rep_model)\n ]\n )\n # print(leaderboard)\n embed = Embed(\n description=leaderboard if len(rep_model) else \"No data found\",\n color=Color.blurple(),\n timestamp=datetime.utcnow(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_author(\n name=f\"{ctx.guild.name} Reputation Leaderboard\", icon_url=ctx.guild.icon_url\n )\n await ctx.send(embed=embed)",
"async def reputation_command(self, ctx, member: Member):\n await self.reputation_handler(ctx, member)",
"async def info_user(self, ctx, member: Optional[discord.Member]):\n member1 = member or ctx.author\n embed = discord.Embed(title=\"Member Information\",\n color=discord.Color.blurple(),\n timestamp=datetime.utcnow())\n\n embed.add_field(name=\"ID\", value=f\"{member1.id}\", inline=False)\n embed.add_field(\n name=\"Name\", value=f\"{member1.name}#{member1.discriminator}\")\n embed.add_field(name=\"Top role\", value=f\"{member1.top_role.mention}\")\n embed.add_field(name=\"status\",\n value=f\"{str(member1.activity.type).split('.') if member1.activity else 'N/A'} {member1.activity.name if member1.activity else ''}\")\n embed.add_field(\n name=\"created at\", value=f\"{member1.created_at.strftime('%d/%m/%y %H:%M:%S')}\")\n embed.add_field(\n name=\"Joined at\", value=f\"{member1.joined_at.strftime('%d/%m/%y %H:%M:%S')}\")\n embed.add_field(name=\"Boosted?\", value=f\"{member1.premium_since}\")\n\n await ctx.reply(embed=embed)",
"async def user(self, ctx, member: discord.Member = None):\r\n rank = 1\r\n if member is None:\r\n member = ctx.author\r\n x = member.name\r\n for (k, v) in sorted(player.items(), key=lambda x: expose(x[1]), reverse=True):\r\n if k == x:\r\n start = '```md' + u\"\\u000A\" + '-'*68 + u\"\\u000A\" + u\"\\u000A\" + 'User Info: '\r\n middle = u\"\\u000A\" + u\"\\u000A\" + '[rank][#' + str(rank) + '] [rating]['\r\n end = str(round(expose(v), 2)) + '] [skill][' + str(round(v.mu, 2)) + '] [uncertainty]['\r\n await ctx.send(start + k + middle + end + str(round(v.sigma, 2)) + ']' + u\"\\u000A\" + u\"\\u000A\" + '-'*68 + u\"\\u000A\" + '```')\r\n if rank == 1:\r\n await ctx.send(k + ' is the G O A T 🐐')\r\n break\r\n rank += 1",
"async def dotabuff(self, ctx, *, member: discord.Member=None):\n if member is None:\n member = ctx.message.author\n\n steam_ids = self.bot.steam_info.get(member.id)\n\n if steam_ids is None:\n await self.bot.say(\"{0.name} has not linked their Steam account to MT5ABot.\".format(member))\n return\n\n msg = \"__Dotabuff page(s) for {0.name}:__\\n\\n\".format(member)\n try:\n response = self.steam_api.get_player_summaries(steam_ids)['response']\n except:\n await self.bot.say(\"The Steam Web API is down. Please try again later.\")\n # Response isn't in a guaranteed order.\n for steam_id in steam_ids:\n for player in response['players']:\n if player['steamid'] == steam_id:\n dota_id = steamapi.ID.steam_to_dota(steam_id)\n msg += \"{0} - <https://dotabuff.com/players/{1}>\\n\".format(player['personaname'], dota_id)\n await self.bot.say(msg)",
"async def unplonk(ctx, user: typing.Union[discord.Member, discord.User]):\n await bot.plonk.delete(user.id)\n await r(ctx, f'Unplonked **{user}**')",
"async def plagueProfile(self, ctx, *, member: FuzzyHuman = None):\n member = member or ctx.author\n data = await self.config.user(member).all()\n userRole = data[\"gameRole\"]\n userState = data[\"gameState\"]\n\n title = f\"Plague Profile\"\n description = (\n f\"Role: {userRole}\\nState: {userState}\\nNotifications: {data['notifications']}\"\n )\n color = await ctx.embed_color()\n if userRole == \"Doctor\":\n thumbnail = \"https://contestimg.wish.com/api/webimage/5b556e7ba225161706d6857a-large.jpg?cache_buster=e79a94ce3e105025c5655d67b3d5e1bd\"\n elif userRole == \"Plaguebearer\":\n thumbnail = \"https://vignette.wikia.nocookie.net/warhammer40k/images/c/c2/Plaguebearer1.png/revision/latest/scale-to-width-down/340?cb=20170829232116\"\n elif userState == \"infected\":\n thumbnail = (\n \"https://cdn.pixabay.com/photo/2020/04/29/07/54/coronavirus-5107715_960_720.png\"\n )\n else:\n thumbnail = \"https://static.thenounproject.com/png/2090399-200.png\"\n\n embed = discord.Embed(title=title, colour=color, description=description)\n embed.set_thumbnail(url=thumbnail)\n embed.set_author(name=member, icon_url=member.avatar_url)\n await ctx.send(embed=embed)",
"async def demote(self, ctx, *, member = None):\r\n if not await self._can_run(ctx): return \r\n em = discord.Embed(color = 0XFF8C00, description = \"> Menurunkan jabatan role xp kepada member ke role xp dibawahnya\\n> \\n\"\r\n \"> **Panduan**\\n\"\r\n \"> `{}demote [member]`\"\r\n .format(ctx.prefix))\r\n em.set_footer(text = \"Saat mengetik command, tanda [] tidak usah digunakan.\\n{}\".format(ctx.author), icon_url = f\"{ctx.author.avatar_url}\")\r\n\r\n if member == None:\r\n return await ctx.send(embed=em)\r\n\r\n if type(member) is str:\r\n memberName = member\r\n member = DisplayName.memberForName(memberName, ctx.message.guild)\r\n if not member:\r\n msg = '┐( ̄ヘ ̄;)┌\\nAku tidak dapat menemukan *{}* dalam server...'.format(memberName)\r\n # Check for suppress\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)\r\n\r\n # Get user's xp\r\n xp = int(self.settings.getUserStat(member, ctx.guild, \"XP\"))\r\n\r\n # Get the role list\r\n promoArray = self.getSortedRoles(ctx.guild)\r\n currentRole = self.getCurrentRoleIndex(member, ctx.guild)\r\n nextRole = currentRole - 1\r\n if nextRole == -1:\r\n # We're removing the user from all roles\r\n neededXp = int(promoArray[0]['XP'])-xp-1\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n remRoles = []\r\n for i in range(0, len(promoArray)):\r\n remRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if remRole:\r\n if remRole in member.roles:\r\n remRoles.append(remRole)\r\n # await member.remove_roles(*remRoles)\r\n # Use role manager instead\r\n self.settings.role.rem_roles(member, remRoles)\r\n msg = 'sejumlah *{} xp* telah dikurangi dari *{}* dan role dia telah diturunkan dari system xp!'.format(neededXp*-1, DisplayName.name(member))\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n elif nextRole < -1:\r\n msg = '┐( ̄ヘ ̄;)┌\\nTidak ada role xp yang lebih rendah untuk menurunkan role milik *{}*.'.format(DisplayName.name(member))\r\n else:\r\n newRole = DisplayName.roleForID(promoArray[nextRole]['ID'], ctx.guild)\r\n neededXp = int(promoArray[nextRole]['XP'])-xp\r\n self.settings.incrementStat(member, ctx.guild, \"XP\", neededXp)\r\n # Start at the currentRole and remove that and all roles above\r\n remRoles = []\r\n for i in range(currentRole, len(promoArray)):\r\n remRole = DisplayName.roleForID(promoArray[i]['ID'], ctx.guild)\r\n if remRole:\r\n if remRole in member.roles:\r\n remRoles.append(remRole)\r\n # await member.remove_roles(*remRoles)\r\n # Use role manager instead\r\n self.settings.role.rem_roles(member, remRoles)\r\n if not newRole:\r\n # Promotion role doesn't exist\r\n msg = '┐( ̄ヘ ̄;)┌\\nSepertinya **{}** sudah tidak ada dalam server. namun sejumlah *{:,} xp* milik *{}* akan tetap dikurangi\\n tapi aku tidak dapat menurunkan jabatan role xp, pertimbangkan lagi untuk merevisi role xp dalam server mu.'.format(promoArray[nextRole]['Name'], neededXp*-1, DisplayName.name(member))\r\n else:\r\n msg = 'sejumlah *{:,} xp* milik *{}* telah dikurangi dan jabatan role xp telah diturunkan ke **{}**!'.format(neededXp*-1, DisplayName.name(member), newRole.name)\r\n self.bot.dispatch(\"xp\", member, ctx.author, neededXp)\r\n msgDone = Utils.suppressed(ctx,msg)\r\n em = discord.Embed(color = 0XFF8C00, description = msgDone)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n return await ctx.send(embed = em)",
"async def _unpunish(self, member, reason=None):\n role = await self.get_role(member.server)\n if role:\n # Has to be done first to prevent triggering on_member_update listener\n self._unpunish_data(member)\n await self.bot.remove_roles(member, role)\n\n msg = 'Your punishment in %s has ended.' % member.server.name\n if reason:\n msg += \"\\nReason was: %s\" % reason",
"async def servertorment(self, ctx, *, member = None, times : int = None):\r\n\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tmessage = ctx.message\r\n\r\n\t\t# Only allow owner\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\tusage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)\r\n\r\n\t\tisRole = False\r\n\r\n\t\tif member == None:\r\n\t\t\tawait ctx.channel.send(usage)\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\t# Check for formatting issues\r\n\t\tif times == None:\r\n\t\t\t# Either xp wasn't set - or it's the last section\r\n\t\t\tif type(member) is str:\r\n\t\t\t\t# It' a string - the hope continues\r\n\t\t\t\troleCheck = DisplayName.checkRoleForInt(member, server)\r\n\t\t\t\tif roleCheck and roleCheck[\"Role\"]:\r\n\t\t\t\t\tisRole = True\r\n\t\t\t\t\tmember = roleCheck[\"Role\"]\r\n\t\t\t\t\ttimes = roleCheck[\"Int\"]\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Role is invalid - check for member instead\r\n\t\t\t\t\tnameCheck = DisplayName.checkNameForInt(member, server)\r\n\t\t\t\t\tif not nameCheck:\r\n\t\t\t\t\t\tawait ctx.channel.send(usage)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tif not nameCheck[\"Member\"]:\r\n\t\t\t\t\t\tmsg = 'I couldn\\'t find that user or role on the server.'.format(member)\r\n\t\t\t\t\t\tawait ctx.channel.send(msg)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tmember = nameCheck[\"Member\"]\r\n\t\t\t\t\ttimes = nameCheck[\"Int\"]\r\n\t\t\t\t\t\r\n\t\t# Set the torment flag\r\n\t\tself.toTorment = True\r\n\r\n\t\tif times == None:\r\n\t\t\t# Still no times - roll back to default\r\n\t\t\ttimes = 25\r\n\t\t\t\r\n\t\tif times > 100:\r\n\t\t\ttimes = 100\r\n\t\t\t\r\n\t\tif times == 0:\r\n\t\t\tawait ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')\r\n\t\t\treturn\r\n\t\t\r\n\t\tif times < 0:\r\n\t\t\tawait ctx.channel.send('I just uh... *un-tormented* them. Yeah.')\r\n\t\t\treturn\r\n\r\n\t\t# Delete original torment message\r\n\t\tawait message.delete()\r\n\t\t\r\n\t\tfor i in range(0, times):\r\n\t\t\t# Do this over time\r\n\t\t\tfor channel in server.channels:\r\n\t\t\t\t# Get user's permissions\r\n\t\t\t\tif type(member) is discord.Role or channel.permissions_for(member).read_messages and type(channel) is discord.TextChannel:\r\n\t\t\t\t\t# Only ping where they can read\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tif member.name == \"@everyone\" and type(member) is discord.Role:\r\n\t\t\t\t\t\t\tawait channel.send(\"{}\".format(member.name),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tawait channel.send('{}'.format(member.mention),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\t\texcept Exception:\r\n\t\t\t\t\t\tpass\r\n\t\t\tfor j in range(0, self.waitBetween):\r\n\t\t\t\t# Wait for 1 second, then check if we should cancel - then wait some more\r\n\t\t\t\tawait asyncio.sleep(1)\r\n\t\t\t\tif not self.toTorment:\r\n\t\t\t\t\treturn",
"async def logban(self, ctx, member: BannedMember, *,\n reason: ActionReason = None):\n if self.bot.server_settings[ctx.guild.id]['modlog_enabled']:\n try:\n confirm = await helpers.custom_confirm(\n ctx,\n f'```\\nUser: {member.user}\\nReason: {reason}\\n```'\n )\n if not confirm:\n return\n resp_mod = ctx.author\n ban_reason = reason if reason else member.reason\n local_embed = embeds.BanEmbed(\n member.user, resp_mod, ban_reason)\n mod_logs = await self.bot.pg_utils.get_modlogs(ctx.guild.id)\n for channel_id in mod_logs:\n try:\n await self.bot.pg_utils.insert_modaction(\n ctx.guild.id,\n resp_mod.id,\n member.user.id,\n ban_reason,\n enums.Action.BAN\n )\n except Exception as e:\n self.bot.logger.warning(f'Error storing modaction: {e}') # noqa\n await (self.bot.get_channel(channel_id)).send(\n embed=local_embed)\n except Exception as e:\n self.bot.logger.warning(f'Issue posting to mod log: {e}')\n else:\n await ctx.send(f'No modlog channels detected', delete_after=3)",
"async def stealthtorment(self, ctx, *, member = None, times : int = None):\r\n\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tmessage = ctx.message\r\n\r\n\t\t# Only allow owner\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\tusage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)\r\n\r\n\t\tisRole = False\r\n\r\n\t\tif member == None:\r\n\t\t\tawait ctx.channel.send(usage)\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\t# Check for formatting issues\r\n\t\tif times == None:\r\n\t\t\t# Either xp wasn't set - or it's the last section\r\n\t\t\tif type(member) is str:\r\n\t\t\t\t# It' a string - the hope continues\r\n\t\t\t\troleCheck = DisplayName.checkRoleForInt(member, server)\r\n\t\t\t\tif roleCheck and roleCheck[\"Role\"]:\r\n\t\t\t\t\tisRole = True\r\n\t\t\t\t\tmember = roleCheck[\"Role\"]\r\n\t\t\t\t\ttimes = roleCheck[\"Int\"]\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Role is invalid - check for member instead\r\n\t\t\t\t\tnameCheck = DisplayName.checkNameForInt(member, server)\r\n\t\t\t\t\tif not nameCheck:\r\n\t\t\t\t\t\tawait ctx.channel.send(usage)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tif not nameCheck[\"Member\"]:\r\n\t\t\t\t\t\tmsg = 'I couldn\\'t find that user or role on the server.'.format(member)\r\n\t\t\t\t\t\tawait ctx.channel.send(msg)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tmember = nameCheck[\"Member\"]\r\n\t\t\t\t\ttimes = nameCheck[\"Int\"]\r\n\t\t\t\t\t\r\n\t\t# Set the torment flag\r\n\t\tself.toTorment = True\r\n\r\n\t\tif times == None:\r\n\t\t\t# Still no times - roll back to default\r\n\t\t\ttimes = 25\r\n\t\t\t\r\n\t\tif times > 100:\r\n\t\t\ttimes = 100\r\n\t\t\t\r\n\t\tif times == 0:\r\n\t\t\tawait ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')\r\n\t\t\treturn\r\n\t\t\r\n\t\tif times < 0:\r\n\t\t\tawait ctx.channel.send('I just uh... *un-tormented* them. Yeah.')\r\n\t\t\treturn\r\n\r\n\t\t# Delete original torment message\r\n\t\tawait message.delete()\r\n\t\t\r\n\t\tfor i in range(0, times):\r\n\t\t\t# Do this over time\r\n\t\t\ttry:\r\n\t\t\t\tif member.name == \"@everyone\" and type(member) is discord.Role:\r\n\t\t\t\t\ttmessage = await ctx.channel.send(\"{}\".format(member.name),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\telse:\r\n\t\t\t\t\ttmessage = await ctx.channel.send('{}'.format(member.mention),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\tawait tmessage.delete()\r\n\t\t\texcept Exception:\r\n\t\t\t\tpass\r\n\t\t\tfor j in range(0, self.waitBetween):\r\n\t\t\t\t# Wait for 1 second, then check if we should cancel - then wait some more\r\n\t\t\t\tawait asyncio.sleep(1)\r\n\t\t\t\tif not self.toTorment:\r\n\t\t\t\t\treturn",
"async def _cmdf_pmview(self, substr, msg, privilege_level):\n buf = \"**Here's a copy of the PM greeting message template:**\\n\"\n buf += self._pm_msg_template\n await self._client.send_msg(msg, buf)\n return",
"def get_absolute_url(self):\n return ('member_detail', [self.pk])",
"async def on_member_unban(self, guild: Guild, user: User, *args):\n\n if not self._is_tracked(guild, EventPriority.unban):\n return\n\n # Event is sometimes called with 3 arguments\n # Capture occurrence\n await self.errorlog.send(Exception(f\"Additional arguments sent to `on_member_unban`: {args}\"))\n\n em = self.em_base(\n user,\n f\"User {user.mention} ({user.name}) was unbanned\",\n EventColors.unban.value\n )\n\n # Attempt to retrieve unban reason and mod that unbanned from Audit Log\n found, errored, mod, reason = await self._get_last_audit_action(guild, AuditLogAction.unban, user)\n\n # Audit log action found\n # Add details\n if found and not errored:\n em.add_field(\n name=\"Unbanned By\",\n value=f\"{mod.mention}\\n({mod.name}#{mod.discriminator})\"\n )\n em.add_field(\n name=\"Reason\",\n value=reason if reason is not None else \"No reason given\"\n )\n\n # Cannot access audit log or HTTP error prevented access\n elif errored and not found:\n em.add_field(\n name=\"Unbanned By\",\n value=\"Unknown\\nAudit Log inaccessible\"\n )\n em.add_field(\n name=\"Reason\",\n value=\"Irretrievable\\nAudit Log inaccessible\"\n )\n\n # No audit log entry found for ban\n else:\n em.add_field(\n name=\"Unbanned By\",\n value=\"Unknown\\nAudit Log missing data\"\n )\n em.add_field(\n name=\"Reason\",\n value=\"Irretrievable\\nAudit Log missing data or no reason given\"\n )\n\n await self.log_event(em, guild, priority=EventPriority.unban)",
"def __index(request,member_id=-1):\n assert isinstance(request, HttpRequest)\n try:\n member_id = int(member_id) #数値化(数値に対しても問題なく使える)\n except :\n member_id = -1 #例外が出た場合はデフォルトの-1を入れておく\n if member_id!=-1:\n try:\n member = Member.objects.get(pk=member_id)\n except Member.DoesNotExist:\n raise Http404(\"Member does not exist\")\n\n login = request.user and request.user.is_authenticated()\n login_user_id = -1 if not login else request.user.pk\n entry_list = EntryView.get_entry_list('-posted_at',member_id, login_user_id)\n page_no = request.GET.get('page')\n page = _get_page(entry_list, page_no, ENTRY_LIST_PAGE_IN_COUNT )\n auth_form = AuthenticationForm(None, request.POST or None)\n title = ('会員' if member_id==-1 else (member.name +'さん')) + 'のブログ' #タイトル文字列の変更\n return render(\n request,\n 'app/entry_list.html',\n {\n 'title':title, #'会員のブログ',\n 'year':datetime.now().year,\n 'articles':Article.objects.order_by('-released_at')[:5],\n 'blogs':EntryView.get_entry_list('-posted_at',-1, login_user_id)[:5],\n 'entry_list':page.object_list,\n 'auth_form':auth_form,\n 'current_user':request.user,\n 'page' : page,\n 'current_page':request.path #'entry_list'\n }\n )",
"async def on_member_ban(self, guild, target):\n\n entry = await fetch_recent_audit_log_entry(\n self.bot, guild, target=target, action=discord.AuditLogAction.ban, retry=3\n )\n if entry.user == self.bot.user:\n return\n\n action = Ban(\n target=target,\n user=entry.user,\n reason=entry.reason,\n guild_id=guild.id,\n created_at=entry.created_at,\n )\n self.bot.dispatch(\"action_perform\", action)",
"def mentor_list_view(request):\n # TODO: this view\n pass",
"def member_info(self, attempt=1):\n\n response = self.postman.request('member_list', page=attempt)\n\n if (response.status_code == requests.codes.ok):\n if (len(response.json()) != 0):\n self.members += len(response.json())\n\n self.member_info(attempt=attempt + 1)",
"def mentee_list_view(request):\n # TODO: this view\n pass",
"async def warnings(self, ctx):\n server = ctx.message.server\n server_id = server.id\n if not (server_id in self.warnlist2 and self.warnlist2[server_id]):\n await self.bot.say(\"No users are currently punished.\")\n return\n\n def getmname(mid):\n member = discord.utils.get(server.members, id=mid)\n if member:\n if member.nick:\n return '%s (%s)' % (member.nick, member)\n else:\n return str(member)\n else:\n return '(member not present, id #%d)'\n\n headers = ['Member', 'Warning Number', 'Moderator', 'Reason']\n table = []\n disp_table = []\n now = time.time()\n for member_id, data in self.warnlist2[server_id].items():\n\n #if not member_id.isdigit():\n #continue\n print (\"704\")\n member_name = getmname(data['User'])\n warnnum = data['Warning Number']\n punisher_name = getmname(data['Mod'])\n reason = data['Reason']\n table.append((member_name, warnnum, punisher_name, reason))\n\n #for _, name, warnum, mod, reason in sorted(table, key=lambda x: x[0]):\n disp_table.append((member_name, warnnum, punisher_name, reason))\n\n for page in pagify(tabulate(disp_table, headers)):\n await self.bot.say(box(page))",
"async def history(message, client, extra_args):\n\n if not extra_args or not (user_id := utils.from_mention(extra_args[0])):\n user_id = message.author.id\n\n @database.query\n def get_transactions(conn):\n cursor = conn.cursor()\n cursor.execute(\n \"SELECT * FROM funnypts WHERE awarder = ? OR awardee = ? ORDER BY date DESC\", (user_id, user_id))\n transactions = cursor.fetchall()\n cursor.close()\n conn.close()\n return transactions\n\n if not (transactions := get_transactions()):\n await message.channel.send(\"THIS USER HAS NO HISTORY, THEY SHOULD THOUGH\")\n return\n\n @utils.paginated_embeds\n def populate(embed, entry, entry_number):\n awarder = client.get_user(entry[0]).name.split(\"#\", 1)[0]\n awardee = client.get_user(entry[1]).name.split(\"#\", 1)[0]\n transaction = \"GIVEN TO\" if entry[3] > 0 else \"TAKEN FROM\"\n date = entry[4].split(\" \", 1)[0]\n reason = \"\\\"{0}\\\"\".format(entry[2])\n\n embed.add_field(\n name=\"{0} — {2} — {1} • {3}\".format(awarder, awardee, transaction, date), value=reason, inline=False)\n\n title = f\"{client.get_user(user_id).name}'s FUNNYPOINT HISTORY\"\n embeds = populate(title, transactions, page_length=5)\n await utils.sauce_pages(embeds, message, client)",
"async def history(self, ctx, user_id: str):\n\n session = self.bot.helpers.get_db_session()\n try:\n self.bot.log.info(\n f\"CMD {ctx.command} called by {ctx.message.author} ({ctx.message.author.id})\"\n )\n guild = ctx.message.guild\n user = await self.bot.helpers.get_member_or_user(user_id, guild)\n if not user:\n return await ctx.send(\n f\"Unable to find the requested user. Please make sure the user ID or @ mention is valid.\"\n )\n\n (\n embed_result_entries,\n footer_text,\n ) = await self.bot.helpers.get_action_history(session, user, guild)\n\n p = FieldPages(ctx, per_page=8, entries=embed_result_entries,)\n p.embed.color = 0xFF8C00\n p.embed.set_author(\n name=f\"Member: {user} ({user.id})\", icon_url=user.avatar_url\n )\n p.embed.set_footer(text=footer_text)\n await p.paginate()\n except discord.HTTPException as err:\n self.bot.log.exception(\n f\"Discord HTTP Error responding to {ctx.command} request via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n except DBAPIError as err:\n self.bot.log.exception(\n f\"Error logging note to database. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n session.rollback()\n except Exception as err:\n self.bot.log.exception(\n f\"Error responding to {ctx.command} via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n finally:\n session.close()",
"def user_history(username):\n follow_form = FollowForm()\n unfollow_form = UnfollowForm()\n return render_template('user_history.html',\n username=username,\n follow_form=follow_form,\n unfollow_form=unfollow_form)"
] | [
"0.5961462",
"0.53335446",
"0.5297916",
"0.5279688",
"0.5277867",
"0.5154507",
"0.5105471",
"0.5094938",
"0.50347066",
"0.49980646",
"0.4994079",
"0.49669912",
"0.4960998",
"0.4959691",
"0.49547735",
"0.48937735",
"0.4889033",
"0.48623428",
"0.484538",
"0.48425722",
"0.48137978",
"0.48101902",
"0.48012796",
"0.47970212",
"0.4788882",
"0.47862154",
"0.47771662",
"0.47356766",
"0.4724",
"0.47187898"
] | 0.5907632 | 1 |
Denormalize to uint8 [0...255] tensor from a float32 [0...1] tensor. | def denormalize(float32_frame):
if (not isinstance(float32_frame, tf.Tensor) or
float32_frame.dtype != tf.float32):
raise ValueError(f"Invalid input: {float32_frame}")
return tf.image.convert_image_dtype(float32_frame, tf.uint8, saturate=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def float32_to_uint8(inputs):\n return np.uint8(np.clip(np.round(inputs * 255), 0, 255))",
"def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im",
"def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im",
"def denormalize(batch_img: np.ndarray) -> np.ndarray:\n return np.uint8((batch_img + 1) * 127.5)",
"def bfloat16_to_float32(tensor):\n if tensor.dtype == tf.bfloat16:\n return tf.cast(tensor, dtype=tf.float32)\n else:\n return tensor",
"def tensor_to_image(x):\n\n # scale back from [-1,1] to [0,255] \n x = x.add(1).mul_(255).div_(2) # x = ((x + 1)*255 / (2))\n \n if x.is_cuda:\n x = x.cpu()\n\n x = x.data.numpy().astype(np.uint8) # convert to numpy\n\n return x",
"def to_tensor(x, **kwargs):\n return x.transpose(2, 0, 1).astype('float32')",
"def transpose_2d_int8_tensor(tensor: onnx_proto.TensorProto):\n if not isinstance(tensor, onnx_proto.TensorProto):\n raise ValueError(\"Expected input type is an ONNX TensorProto but got %s\" % type(tensor))\n\n if len(tensor.dims) != 2 or tensor.data_type != onnx_proto.TensorProto.INT8:\n raise ValueError(\"Only INT8 2-D tensors can be transposed\")\n\n if tensor.raw_data:\n int32_data = numpy.reshape(numpy.frombuffer(tensor.raw_data, dtype=\"int8\"), tensor.dims)\n int32_transposed_data = numpy.transpose(int32_data, [1, 0])\n tensor.raw_data = int32_transposed_data.tobytes()\n\n else:\n raise ValueError(\"only raw buffer supported\")\n\n return tensor",
"def make8UC(mat):\n mat_256 = mat[:,:]# *255\n mat_256.round()\n mat_8UC = np.uint8(mat_256)\n \n return mat_8UC",
"def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img",
"def rescale_to_uint16(arr):\n UINT16MAX = 65535\n if isinstance(arr, np.ndarray):\n arr = np.maximum(arr, 0.0)\n arr = arr / np.max(arr) * UINT16MAX\n arr = np.minimum(arr, UINT16MAX)\n return arr.astype(np.uint32)\n else:\n import tensorflow as tf\n if isinstance(arr, tf.Tensor):\n raise NotImplementedError\n else:\n raise TypeError(\n \"Requires np.yndarray or tf.Tensor, got {}.\".format(type(arr)))",
"def to_data(x):\n if torch.cuda.is_available():\n x = x.cpu()\n x = x.data.numpy()\n x = ((x +1)*255 / (2)).astype(np.uint8) # rescale to 0-255\n return x",
"def normalize_image(img_arr_uint):\n return img_arr_uint.astype(np.float64) * ONE_BYTE_SCALE",
"def unscale(image):\n return tf.cast(tf.math.multiply(image, 255), tf.uint8)",
"def to_uint(tensor_0to1, target_type='uint8'):\n if isinstance(tensor_0to1, tf.Tensor):\n target_type = tf.as_dtype(target_type)\n tensor_0to1 = _clip_0to1_warn(tensor_0to1)\n tensor_uint = tf.cast(tensor_0to1 * target_type.max, target_type)\n else:\n tensor_0to1 = _clip_0to1_warn(tensor_0to1)\n tensor_uint = (np.iinfo(target_type).max * tensor_0to1).astype(\n target_type)\n return tensor_uint",
"def to_float32(n):\n return np.cast[\"float32\"](n)",
"def to_uint8(f):\n from numpy import array, clip, uint8\n\n img = array(clip(f,0,255),uint8)\n return img",
"def test_f8_xf16_roundtrip(in_dtype, out_dtype):\n check_type_supported(out_dtype)\n\n @triton.jit\n def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):\n offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n input = tl.load(input_ptr + offsets, mask=mask)\n output = input\n tl.store(output_ptr + offsets, output, mask=mask)\n\n f8_tensor = torch.tensor(range(-128, 128), dtype=torch.int8, device='cuda')\n # f32_to_f8 doesn't handle nan, so we make sure f8_tensor doesn't contain any nan\n all_exp_ones = (f8_tensor & 0b01111100) == 128 - 2**in_dtype.fp_mantissa_width\n f8_tensor[all_exp_ones] = 0\n f8 = triton.reinterpret(f8_tensor, in_dtype)\n n_elements = f8_tensor.numel()\n xf16 = torch.empty_like(f8_tensor, dtype=out_dtype)\n grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)\n copy_kernel[grid](f8, xf16, n_elements, BLOCK_SIZE=1024)\n\n # exponent_mask = 0b01111100 for float8e5\n # exponent_mask = 0b01111000 for float8e4\n exponent_mask = 0b01111111 ^ ((1 << in_dtype.fp_mantissa_width) - 1)\n normal = torch.logical_and((f8_tensor & exponent_mask) != 0, (f8_tensor & exponent_mask) != exponent_mask)\n ref16 = convert_float_to_float32(f8_tensor, in_dtype)\n # WARN: currently only normal float8s are handled\n assert torch.all(xf16[normal] == ref16[normal])\n\n f8_output_tensor = torch.empty_like(xf16, dtype=torch.int8)\n f8_output = triton.reinterpret(f8_output_tensor, in_dtype)\n copy_kernel[grid](xf16, f8_output, n_elements, BLOCK_SIZE=1024)\n\n assert torch.all(f8_tensor == f8_output_tensor)",
"def convert_image(tensor):\n image = tensor.to('cpu').clone().detach()\n image = image.numpy().squeeze()\n image = image.transpose(1, 2, 0)\n \"\"\" Un-normalize \"\"\"\n image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))\n return image.clip(0, 1)",
"def canonicalize(data):\n\n if isinstance(data, np.ndarray):\n if data.dtype == np.uint8:\n data = data.astype(np.float32) / 255.0\n data = torch.from_numpy(np.ascontiguousarray(data))\n\n elif isinstance(data, torch.Tensor):\n if data.dtype == torch.uint8:\n data = data.float() / 255.0\n\n else:\n raise NotImplementedError()\n\n return data",
"def to_float32(elem):\n return elem.astype(np.float32)",
"def convert_image_np(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = (inp*255).astype(np.uint8)\n return inp",
"def float_to_int_32(x):\n return np.float32(x).view(np.int32)",
"def convert_image_to_tensor(image):\n # image = image.astype(np.float32)\n return transform(image)\n # return transform(image)",
"def tensor_to_im(tensor):\n return tensor.reshape(-1, *tensor.shape[2:])[:, None, :, :]",
"def convert_to_fp32(tensor):\n\n def _convert_to_fp32(tensor):\n return tensor.float()\n\n def _is_fp16_bf16_tensor(tensor):\n return hasattr(tensor, \"dtype\") and tensor.dtype in (torch.float16, torch.bfloat16)\n\n return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)",
"def test_quantize_conv_transpose_u8u8(self):\n\n np.random.seed(1)\n model_fp32_path = \"conv_transpose_fp32.onnx\"\n self.construct_model(model_fp32_path)\n data_reader = self.input_feeds(1, {\"input\": [1, 1, 7, 7]})\n\n self.static_quant_test_qdq(\n model_fp32_path,\n data_reader,\n activation_type=QuantType.QUInt8,\n weight_type=QuantType.QUInt8,\n )",
"def normalize(x, dtype='float32'):\n # x/=255.0 raises a TypeError\n # x = x/255.0\n \n # Converting to float32 and normalizing (float32 saves memory)\n x = x.astype(dtype) / 255\n return x",
"def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data",
"def clip_and_convert_tensor(tensor):\n img = tgm.utils.tensor_to_image(255.0 * tensor) # convert tensor to numpy\n img_cliped = np.clip(img, 0, 255) # clip and reorder the channels\n img = img_cliped.astype('uint8') # convert to uint\n return img"
] | [
"0.72965324",
"0.67540747",
"0.67540747",
"0.6749495",
"0.6719711",
"0.66579753",
"0.6494371",
"0.64181966",
"0.6409996",
"0.63891613",
"0.6387406",
"0.6382331",
"0.6354447",
"0.63429034",
"0.6335926",
"0.6300085",
"0.6289246",
"0.6245077",
"0.6231531",
"0.6220535",
"0.6185468",
"0.6114732",
"0.60651296",
"0.6057128",
"0.5989649",
"0.5974006",
"0.5952037",
"0.5922504",
"0.59174216",
"0.58895147"
] | 0.72193897 | 1 |
Obtain a new Frame batch by applying `fn` on each element. | def apply(self, fn):
return Frame(fn(self.rgb)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def batchify(fn, chunk):\n if chunk is None:\n return fn\n\n def ret(inputs, styles, alpha, feature):\n results = []\n for i in range(0, inputs.shape[0], chunk):\n input_chunk = inputs[i:i + chunk]\n style_chunk = styles[i:i + chunk]\n alpha_chunk = alpha[i:i + chunk] if alpha is not None else None\n feature_chunk = feature[i:i + chunk] if feature is not None else None\n results.append(fn(input_chunk, style_chunk, alpha_chunk, feature_chunk))\n return torch.cat(results, 0)\n return ret",
"def reduce(cls, frames,\n reduce_fn):\n rgb = reduce_fn([f.rgb for f in frames])\n return Frame(rgb)",
"def batchify(fn, chunk, world_fn = lambda x:x, gather_func = None):\n if chunk is None:\n return fn\n def ret(inputs, training = False, world_fn=world_fn):\n embedded = inputs[0]\n attention_poses = inputs[1]\n intrinsic = inputs[2]\n images_features = inputs[3]\n pts = inputs[4]\n\n ret_list = [fn([embedded[i:i+chunk], gather_func( world_fn(pts[i:i+chunk]), attention_poses, intrinsic, images_features),pts[i:i+chunk] ]\n , training=training) for i in range(0, int(embedded.shape[0]), chunk)]\n #necessary to cache computed results from coarse model\n if fn.coarse:\n return tf.concat([pred[0] for pred in ret_list], 0), tf.concat([pred[1] for pred in ret_list], 0)\n else:\n return tf.concat([pred[0] for pred in ret_list], 0), None\n return ret",
"def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x",
"def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass",
"def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass",
"def apply(df, f):\n return [f(row) for row in df]",
"def apply_(self, function):\n self.sequences = [function(seq) for seq in self.sequences]\n return self",
"def apply(self, fn, column_label):\n return [fn(v) for v in self[column_label]]",
"def batchify_cache(fn, chunk):\n if chunk is None:\n return fn\n\n def ret(inputs, training = False):\n\n ret_list = [fn(inputs[i:i+chunk], training=training) for i in range(0, int(inputs.shape[0]), chunk)]\n\n return tf.concat([ret for ret in ret_list], 0)\n return ret",
"def apply_fn(model: ModuleDef, variables: ModelVarDict, batch: DataSetDict) -> Array:\n output = model.apply(variables, batch[\"image\"], train=False, mutable=False)\n return output",
"def batch_transform(func):\n\n @functools.wraps(func)\n def create_window(*args, **kwargs):\n # passes the user defined function to BatchTransform which it\n # will call instead of self.get_value()\n return BatchTransform(*args, func=func, **kwargs)\n\n return create_window",
"def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:\n\t\tif hasattr(f_list, '__call__'):\n\t\t\traise ValueError(\"f_list must be a list of functions, not a function itself\")\n\n\t\tresult = []\n\t\tif axis == 0:\n\t\t\trows_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[0]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[0]:\n\t\t\t\trows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :][:, selection]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk)\n\t\t\t\tix = ix + rows_per_chunk\n\t\telif axis == 1:\n\t\t\tcols_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[1]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[1]:\n\t\t\t\tcols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk][selection, :]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk)\n\t\t\t\tix = ix + cols_per_chunk\n\t\treturn result",
"def from_function(cls, rows, columns, function):\n return cls([[function(x, y) for y in range(columns)]\n for x in range(rows)])",
"def _get_batch_fn(dataset):\n def get_batch(idx):\n x_bat = dataset['input'][idx]\n y_bat = dataset['label'][idx]\n x_bat, y_bat = preprocess(x_bat, y_bat)\n\n return x_bat, y_bat\n\n return get_batch",
"def apply(filter_fn, img):\n width, height = img.size\n newimg = Image.new(\"RGB\", (width, height))\n for j in range(1, height - 1):\n for i in range(1, width - 1):\n newimg.putpixel((i, j), filter_fn(img, i, j))\n return newimg",
"def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])",
"def run_func_in_windows(img: np.ndarray, func, columns: int = 4, rows: int = 4):\n h, w, _ = img.shape\n\n w_step = int(w / columns)\n h_step = int(h / rows)\n\n for c in range(0, columns):\n for r in range(0, rows):\n func(img[r * h_step : (r + 1) * h_step, c * w_step : (c + 1) * w_step])\n return img",
"def map(self, func, *args, **kwargs):\n\n if (self.n_workers == 1) or (self.scheduler == 'ray'):\n executor_pool = _executor_dummy\n ranger = range\n else:\n executor_pool = self.executor\n ranger = trange\n\n if self.scheduler == 'ray':\n\n if self.padding:\n raise SyntaxError('Ray cannot be used with array padding.')\n\n import ray\n\n if isinstance(self.data, rio.io.DatasetReader):\n data_id = self.data.name\n else:\n data_id = ray.put(self.data)\n\n results = []\n\n with executor_pool(self.n_workers) as executor:\n\n # Iterate over the windows in chunks\n for wchunk in ranger(0, self.n_windows, self.n_chunks):\n\n if self.padding:\n\n window_slice = self.windows[\n wchunk : wchunk + self.n_chunks\n ]\n\n # Read the padded window\n if len(self.data.shape) == 2:\n data_gen = (\n (\n self.data[\n w[1].row_off : w[1].row_off + w[1].height,\n w[1].col_off : w[1].col_off + w[1].width,\n ],\n widx + wchunk,\n *args,\n )\n for widx, w in enumerate(window_slice)\n )\n elif len(self.data.shape) == 3:\n data_gen = (\n (\n self.data[\n :,\n w[1].row_off : w[1].row_off + w[1].height,\n w[1].col_off : w[1].col_off + w[1].width,\n ],\n widx + wchunk,\n *args,\n )\n for widx, w in enumerate(window_slice)\n )\n else:\n data_gen = (\n (\n self.data[\n :,\n :,\n w[1].row_off : w[1].row_off + w[1].height,\n w[1].col_off : w[1].col_off + w[1].width,\n ],\n widx + wchunk,\n *args,\n )\n for widx, w in enumerate(window_slice)\n )\n\n else:\n\n window_slice = self.slices[wchunk : wchunk + self.n_chunks]\n\n if self.scheduler == 'ray':\n data_gen = (\n (data_id, slice_, widx + wchunk, *args)\n for widx, slice_ in enumerate(window_slice)\n )\n else:\n data_gen = (\n (self.data[slice_], widx + wchunk, *args)\n for widx, slice_ in enumerate(window_slice)\n )\n\n if (self.n_workers == 1) and (self.scheduler != 'ray'):\n\n for result in map(func, data_gen):\n results.append(result)\n\n else:\n\n if self.scheduler == 'mpool':\n\n for result in executor.imap(func, data_gen, **kwargs):\n results.append(result)\n\n elif self.scheduler == 'ray':\n\n if isinstance(func, ray.util.actor_pool.ActorPool):\n\n for result in tqdm(\n func.map(\n lambda a, v: a.exec_task.remote(*v),\n data_gen,\n ),\n total=len(window_slice),\n ):\n results.append(result)\n\n else:\n\n if isinstance(func, ray.actor.ActorHandle):\n futures = [\n func.exec_task.remote(*dargs)\n for dargs in data_gen\n ]\n else:\n futures = [\n func.remote(*dargs) for dargs in data_gen\n ]\n\n if self.get_ray:\n\n with tqdm(total=len(futures)) as pbar:\n\n results_ = []\n while len(futures):\n\n done_id, futures = ray.wait(futures)\n results_.append(ray.get(done_id[0]))\n\n pbar.update(1)\n\n results += results_\n\n else:\n results += futures\n\n else:\n\n for result in executor.map(func, data_gen):\n results.append(result)\n\n if self.scheduler == 'ray':\n del data_id\n\n return results",
"def input_fn_builder(features, seq_length):\n\n all_label_ids = []\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n\n for feature in features:\n all_label_ids.append(feature.label_ids)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices(\n {\n \"label_ids\": tf.constant(\n all_label_ids, shape=[num_examples], dtype=tf.int32\n ),\n \"input_ids\": tf.constant(\n all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"input_mask\": tf.constant(\n all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"segment_ids\": tf.constant(\n all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n }\n )\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn",
"def process(dataset, f):\n logger.info('processing dataset ({0})'.format(len(dataset.samples)))\n for sample in dataset.samples:\n sample.proc = f(sample.image)",
"def process_in_chunks(function, *args, batch_size, out=None, **kwargs):\n total_size = args[0].shape[0]\n first_output = function(*[x[0: batch_size] for x in args])\n output_shape = (total_size,) + tuple(first_output.shape[1:])\n if out is None:\n out = torch.zeros(*output_shape, dtype=first_output.dtype, device=first_output.device,\n layout=first_output.layout, **kwargs)\n\n out[0: batch_size] = first_output\n for i in range(batch_size, total_size, batch_size):\n batch_ix = slice(i, min(i + batch_size, total_size))\n out[batch_ix] = function(*[x[batch_ix] for x in args])\n return out",
"def minibatched_call(fn, mbsize, *args, **kwargs):\n tensor_list, _ = tree_util.tree_flatten((args, kwargs))\n batchsize = tensor_list[0].shape[0]\n mbs = [\n fn(*tree_slice(args, inds), **tree_slice(kwargs, inds))\n for inds in th.arange(batchsize).split(mbsize)\n ]\n return tree_cat(mbs, dim=0)",
"def each(self, func):\n\n for i in self._:\n func(i)\n return self",
"def process_frame_seq(data, index, functions):\n results_list = []\n failed = False\n for function_id in functions:\n function = function_mapper[function_id]\n result = function(data.slice, functions[function_id])\n results_list.append(result)\n if not result.res:\n failed = True\n\n results = ct.Results(index, failed, results_list)\n return results",
"def input_fn_builder(features, seq_length):\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(all_input_ids, \n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn",
"def multichannel(fcn):\n return lambda args: (fcn(*args), )",
"def map(self, function):\n return FunctionalWrapper(map(function, self.data))",
"def batch(self, batch_size=None):\n if self.gen_batches:\n assert batch_size is None, 'Cannot enforce a batch size if `func()` returns batches!'\n batch = self._queue.dequeue()\n for name, pl in self.placeholders.items():\n shape = pl.get_shape()\n if shape.ndims is not None:\n batch[name].set_shape(shape.as_list())\n\n else:\n batch = self._queue.dequeue_many(batch_size)\n\n return Struct.make(batch)",
"def apply(L, f):\n\n result = []\n for i in L:\n result.append(f(i))\n\n return result"
] | [
"0.61101085",
"0.61065644",
"0.59294605",
"0.5812814",
"0.57043624",
"0.57043624",
"0.5694395",
"0.56770957",
"0.56308436",
"0.5596898",
"0.5539138",
"0.5528681",
"0.53403354",
"0.5315339",
"0.5265743",
"0.52645123",
"0.5256261",
"0.5194063",
"0.51897436",
"0.51605433",
"0.51461285",
"0.5138528",
"0.51203126",
"0.5107973",
"0.51001287",
"0.5097295",
"0.5095107",
"0.5057869",
"0.5054282",
"0.5042397"
] | 0.7081017 | 0 |
Obtain a new Frame batch by applying `reduce_fn` on each element. | def reduce(cls, frames,
reduce_fn):
rgb = reduce_fn([f.rgb for f in frames])
return Frame(rgb) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply(self, fn):\n return Frame(fn(self.rgb))",
"def reduce(self, function):\n return reduce(function, self.data)",
"def batchify(fn, chunk, world_fn = lambda x:x, gather_func = None):\n if chunk is None:\n return fn\n def ret(inputs, training = False, world_fn=world_fn):\n embedded = inputs[0]\n attention_poses = inputs[1]\n intrinsic = inputs[2]\n images_features = inputs[3]\n pts = inputs[4]\n\n ret_list = [fn([embedded[i:i+chunk], gather_func( world_fn(pts[i:i+chunk]), attention_poses, intrinsic, images_features),pts[i:i+chunk] ]\n , training=training) for i in range(0, int(embedded.shape[0]), chunk)]\n #necessary to cache computed results from coarse model\n if fn.coarse:\n return tf.concat([pred[0] for pred in ret_list], 0), tf.concat([pred[1] for pred in ret_list], 0)\n else:\n return tf.concat([pred[0] for pred in ret_list], 0), None\n return ret",
"def apply_and_reduce_nb(a, apply_func_nb, apply_args, reduce_func_nb, reduce_args):\n out = np.full(a.shape[1], np.nan, dtype=np.float_)\n\n for col in range(a.shape[1]):\n mapped = apply_func_nb(col, a[:, col], *apply_args)\n out[col] = reduce_func_nb(col, mapped, *reduce_args)\n return out",
"def reduce_nb(a, reduce_func_nb, *args):\n out = np.full(a.shape[1], np.nan, dtype=np.float_)\n\n for col in range(a.shape[1]):\n out[col] = reduce_func_nb(col, a[:, col], *args)\n return out",
"def do_reduce(iterable, fn, initial=None):\n if initial is not None:\n return reduce(GlobalFns(fn), iterable, initial)\n else:\n return reduce(GlobalFns(fn), iterable)",
"def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg",
"def unbatch():\n\n def _apply_fn(dataset):\n return dataset.unbatch()\n\n return _apply_fn",
"def batchify(fn, chunk):\n if chunk is None:\n return fn\n\n def ret(inputs, styles, alpha, feature):\n results = []\n for i in range(0, inputs.shape[0], chunk):\n input_chunk = inputs[i:i + chunk]\n style_chunk = styles[i:i + chunk]\n alpha_chunk = alpha[i:i + chunk] if alpha is not None else None\n feature_chunk = feature[i:i + chunk] if feature is not None else None\n results.append(fn(input_chunk, style_chunk, alpha_chunk, feature_chunk))\n return torch.cat(results, 0)\n return ret",
"def batchify_cache(fn, chunk):\n if chunk is None:\n return fn\n\n def ret(inputs, training = False):\n\n ret_list = [fn(inputs[i:i+chunk], training=training) for i in range(0, int(inputs.shape[0]), chunk)]\n\n return tf.concat([ret for ret in ret_list], 0)\n return ret",
"def apply(df, f):\n return [f(row) for row in df]",
"def pool(inputs, init, reduce_fn, window_shape, strides, padding):\n num_batch_dims = inputs.ndim - (len(window_shape) + 1)\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(\n strides\n ), f\"len({window_shape}) must equal len({strides})\"\n strides = (1,) * num_batch_dims + strides + (1,)\n dims = (1,) * num_batch_dims + window_shape + (1,)\n\n is_single_input = False\n if num_batch_dims == 0:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n strides = (1,) + strides\n dims = (1,) + dims\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\"\n )\n assert all(\n [len(x) == 2 for x in padding]\n ), f\"each entry in padding {padding} must be length 2\"\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y",
"def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x",
"def apply_over_rows(func, data, **kwargs):\n\n axis = 1\n cpu_cnt = multiprocessing.cpu_count()\n \n chunks = [(func, axis, split_data, kwargs) for split_data in np.array_split(data, cpu_cnt) if split_data.size > 0]\n\n pool = multiprocessing.Pool()\n map_results = pool.starmap(np_apply_along_axis, chunks)\n \n pool.close()\n pool.join()\n\n return np.concatenate(map_results)",
"def reduce(self, func):\n execute = ExecutorReduce(func)\n self._funcs.append(execute)\n return self",
"def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:\n\t\tif hasattr(f_list, '__call__'):\n\t\t\traise ValueError(\"f_list must be a list of functions, not a function itself\")\n\n\t\tresult = []\n\t\tif axis == 0:\n\t\t\trows_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[0]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[0]:\n\t\t\t\trows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :][:, selection]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk)\n\t\t\t\tix = ix + rows_per_chunk\n\t\telif axis == 1:\n\t\t\tcols_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[1]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[1]:\n\t\t\t\tcols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk][selection, :]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk)\n\t\t\t\tix = ix + cols_per_chunk\n\t\treturn result",
"def reduce_grouped_nb(a, group_lens, reduce_func_nb, *args):\n out = np.empty(len(group_lens), dtype=np.float_)\n from_col = 0\n for group in range(len(group_lens)):\n to_col = from_col + group_lens[group]\n out[group] = reduce_func_nb(group, a[:, from_col:to_col], *args)\n from_col = to_col\n return out",
"def batch_transform(func):\n\n @functools.wraps(func)\n def create_window(*args, **kwargs):\n # passes the user defined function to BatchTransform which it\n # will call instead of self.get_value()\n return BatchTransform(*args, func=func, **kwargs)\n\n return create_window",
"def valuesReducer(aggregation_fn):\n return partial(reduceWith, aggregation_fn)",
"def foldr(fn,\r\n sequences,\r\n outputs_info,\r\n non_sequences=None,\r\n mode=None,\r\n name=None):\r\n return reduce(fn=fn,\r\n sequences=sequences,\r\n outputs_info=outputs_info,\r\n non_sequences=non_sequences,\r\n go_backwards=True,\r\n mode=mode,\r\n name=name)",
"def featurize_batch(\n self, input_record_list: Sequence[InputRecord]\n ) -> Sequence[OutputRecord]:\n return [self.featurize(record) for record in input_record_list]",
"def batch(byte_array, funcs):\n result = []\n length = bytes_to_int(byte_array[0:4])\n item_size = bytes_to_int(byte_array[4:8])\n for i in range(0, length):\n chunk = byte_array[8+i*item_size:8+(i+1)*item_size]\n for f in funcs:\n f(chunk)\n return result",
"def apply_fn(model: ModuleDef, variables: ModelVarDict, batch: DataSetDict) -> Array:\n output = model.apply(variables, batch[\"image\"], train=False, mutable=False)\n return output",
"def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass",
"def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass",
"def foldl(fn,\r\n sequences,\r\n outputs_info,\r\n non_sequences=None,\r\n mode=None,\r\n name=None):\r\n return reduce(fn=fn,\r\n sequences=sequences,\r\n outputs_info=outputs_info,\r\n non_sequences=non_sequences,\r\n go_backwards=False,\r\n mode=mode,\r\n name=name)",
"def _fold_rec(cls, f, agg, next):\n\n if next is None:\n return agg\n (val, next) = next\n return cls._fold_rec(f, f(val, agg), next)",
"def map(self, func, *args, **kwargs):\n\n if (self.n_workers == 1) or (self.scheduler == 'ray'):\n executor_pool = _executor_dummy\n ranger = range\n else:\n executor_pool = self.executor\n ranger = trange\n\n if self.scheduler == 'ray':\n\n if self.padding:\n raise SyntaxError('Ray cannot be used with array padding.')\n\n import ray\n\n if isinstance(self.data, rio.io.DatasetReader):\n data_id = self.data.name\n else:\n data_id = ray.put(self.data)\n\n results = []\n\n with executor_pool(self.n_workers) as executor:\n\n # Iterate over the windows in chunks\n for wchunk in ranger(0, self.n_windows, self.n_chunks):\n\n if self.padding:\n\n window_slice = self.windows[\n wchunk : wchunk + self.n_chunks\n ]\n\n # Read the padded window\n if len(self.data.shape) == 2:\n data_gen = (\n (\n self.data[\n w[1].row_off : w[1].row_off + w[1].height,\n w[1].col_off : w[1].col_off + w[1].width,\n ],\n widx + wchunk,\n *args,\n )\n for widx, w in enumerate(window_slice)\n )\n elif len(self.data.shape) == 3:\n data_gen = (\n (\n self.data[\n :,\n w[1].row_off : w[1].row_off + w[1].height,\n w[1].col_off : w[1].col_off + w[1].width,\n ],\n widx + wchunk,\n *args,\n )\n for widx, w in enumerate(window_slice)\n )\n else:\n data_gen = (\n (\n self.data[\n :,\n :,\n w[1].row_off : w[1].row_off + w[1].height,\n w[1].col_off : w[1].col_off + w[1].width,\n ],\n widx + wchunk,\n *args,\n )\n for widx, w in enumerate(window_slice)\n )\n\n else:\n\n window_slice = self.slices[wchunk : wchunk + self.n_chunks]\n\n if self.scheduler == 'ray':\n data_gen = (\n (data_id, slice_, widx + wchunk, *args)\n for widx, slice_ in enumerate(window_slice)\n )\n else:\n data_gen = (\n (self.data[slice_], widx + wchunk, *args)\n for widx, slice_ in enumerate(window_slice)\n )\n\n if (self.n_workers == 1) and (self.scheduler != 'ray'):\n\n for result in map(func, data_gen):\n results.append(result)\n\n else:\n\n if self.scheduler == 'mpool':\n\n for result in executor.imap(func, data_gen, **kwargs):\n results.append(result)\n\n elif self.scheduler == 'ray':\n\n if isinstance(func, ray.util.actor_pool.ActorPool):\n\n for result in tqdm(\n func.map(\n lambda a, v: a.exec_task.remote(*v),\n data_gen,\n ),\n total=len(window_slice),\n ):\n results.append(result)\n\n else:\n\n if isinstance(func, ray.actor.ActorHandle):\n futures = [\n func.exec_task.remote(*dargs)\n for dargs in data_gen\n ]\n else:\n futures = [\n func.remote(*dargs) for dargs in data_gen\n ]\n\n if self.get_ray:\n\n with tqdm(total=len(futures)) as pbar:\n\n results_ = []\n while len(futures):\n\n done_id, futures = ray.wait(futures)\n results_.append(ray.get(done_id[0]))\n\n pbar.update(1)\n\n results += results_\n\n else:\n results += futures\n\n else:\n\n for result in executor.map(func, data_gen):\n results.append(result)\n\n if self.scheduler == 'ray':\n del data_id\n\n return results",
"def call(self, x):\n self._check_shape(x.shape)\n return tf.concat(tf.unstack(x, axis=self._axis), axis=0)",
"def collect_fn_local(batch):\r\n max_detection = max(list(map(lambda x: len(x[4]), batch)))\r\n for i in range(len(batch)):\r\n batch[i] = list(batch[i]) # because the element in the batch is a tuple\r\n dummy = torch.zeros((1,128,64), dtype=batch[i][4][0].dtype)\r\n temp = batch[i][4]\r\n # make the detection to the same length in order to stack the\r\n while temp.size(0) < max_detection:\r\n # while len(temp) < max_detection:\r\n temp = torch.cat((temp, dummy))\r\n # temp.append(dummy)\r\n batch[i][4] = temp\r\n \r\n return default_collate(batch)"
] | [
"0.5898605",
"0.5626834",
"0.5527074",
"0.54001063",
"0.5352686",
"0.52958137",
"0.5288031",
"0.52771777",
"0.5268288",
"0.524487",
"0.52035064",
"0.51752454",
"0.51169777",
"0.50755507",
"0.5073291",
"0.5058255",
"0.5054055",
"0.50492036",
"0.5028246",
"0.49981123",
"0.49937952",
"0.49905738",
"0.49411577",
"0.49310967",
"0.49310967",
"0.49272883",
"0.4922252",
"0.49103743",
"0.48898682",
"0.48892134"
] | 0.71606076 | 0 |
Create an instance from a uint8 rgb. | def make(cls, rgb_uint8):
if rgb_uint8.dtype != tf.uint8:
raise ValueError("Need uint8!")
rgb = normalize_for_rgb(rgb_uint8)
instance = cls(rgb)
instance.validate_shape()
return instance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, rgb):\n \n ## The following are this class's attributes\n self.r = rgb[0]\n self.g = rgb[1]\n self.b = rgb[2]",
"def __init__(self, rgb):\n \n ## The following are this class's attributes\n self.r = rgb[0]\n self.g = rgb[1]\n self.b = rgb[2]",
"def _from_rgb(self, rgb):\r\n return \"#%02x%02x%02x\" % rgb",
"def from_rgb(self, rgb):\n return \"#%02x%02x%02x\" % rgb",
"def new_from_rgb_hex(cls, hex_str):\r\n\r\n colorstring = hex_str.strip()\r\n if colorstring[0] == '#':\r\n colorstring = colorstring[1:]\r\n if len(colorstring) != 6:\r\n raise ValueError(\"input #%s is not in #RRGGBB format\" % colorstring)\r\n r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]\r\n r, g, b = [int(n, 16) / 255.0 for n in (r, g, b)]\r\n return cls(r, g, b)",
"def _from_rgb(rgb):\n return \"#%02x%02x%02x\" % rgb",
"def from_string(cls, text_color):\n\n a = 255\n try:\n r, g, b, a = text_color.replace('rgb(', '').replace(')', '').split(',')\n except ValueError:\n r, g, b = text_color.replace('rgb(', '').replace(')', '').split(',')\n\n return cls(int(r), int(g), int(b), int(a))",
"def from_color(cls, color):\n\n color = ('rgb(%d, %d, %d, %d)' % color.getRgb())\n return cls.from_string(color)",
"def frombytes(mode, size, data, decoder_name=\"raw\", *args):\r\n\r\n _check_size(size)\r\n \r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n if decoder_name == \"raw\" and args == ():\r\n args = mode\r\n\r\n im = new(mode, size)\r\n im.frombytes(mode, size, data, decoder_name, args)\r\n return im",
"def __init__(self, *rgb):\n self.alpha = 255\n if len(rgb) == 1:\n\t #Accept a string in the hext fromat made by color_rgb func.\n\t if isinstance(rgb[0],str):\n self.rgb = rgb_color(rgb[0])\n\t else:\n self.rgb=rgb[0]\n elif len(rgb) == 3:\n self.rgb = rgb\n elif len(rgb) == 4:\n self.rgb = rgb[:-1]\n self.alpha = rgb[-1]\n else:\n raise AttributeError, \"invalid arguments to Color(); needs at least 3 integers: red, green, blue (transparency optional)\"\n self.rgb = map(lambda v: int(max(min(v,255),0)), self.rgb)",
"def fromRGB(self, *col):\n self.HEX = RGBtoHEX(*self._parse_input(cRGB.ColRGB, *col))\n self.update(self.HEX)\n return self",
"def pack_argb8(pixel):\n\n r, g, b, a = pixel\n if a == 0:\n value = 0\n else:\n value = (a & 0xc0) | ((r & 0xc0) >> 2) | ((g & 0xc0) >> 4) | ((b & 0xc0) >> 6)\n return value",
"def __init__(self, scale, rgb):\n\n super(RGBColor, self).__init__(scale)\n self.rgb = rgb",
"def from_bytes(data):\n\tstream = Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new(data))\n\tpixbuf = GdkPixbuf.Pixbuf.new_from_stream(stream)\n\treturn pixbuf",
"def unpack_argb8(value):\n a = ((value >> 6) & 0x03) * 0x55\n r = ((value >> 4) & 0x03) * 0x55\n g = ((value >> 2) & 0x03) * 0x55\n b = ((value) & 0x03) * 0x55\n return (r, g, b, a)",
"def create_from_array(cls, array: np.ndarray) -> \"TensorImage\":\n if array.dtype != np.uint8:\n raise ValueError(\"Expect numpy array with dtype=uint8.\")\n\n image_data = image_utils.ImageData(np.squeeze(array))\n return cls(image_data)",
"def from_binary(cls, packed_data):\n\n # num + 1 because of crc16\n num_values = cls.num_properties() + 1\n unpacked = struct.unpack('H'*num_values, packed_data)\n crc = [unpacked[-1]]\n\n start = unpacked[0]\n peep = pressure_to_cm_h2o(unpacked[1])\n freq = unpacked[2]\n ratio = unpacked[3]\n pressure = pressure_to_cm_h2o(unpacked[4]) - peep\n oxygen = unpacked[5]\n return cls(start, peep, freq, ratio, pressure, oxygen)",
"def parse(cls, data: bytes) -> Pixmap:\n assert len(data) >= 14\n return cls(*(unpack(\">3I3B\", data[:15]) + (data[15:],)))",
"def __init__(self, *args):\n _itkRGBAPixelPython.itkRGBAPixelUC_swiginit(self,_itkRGBAPixelPython.new_itkRGBAPixelUC(*args))",
"def create_unique_color_uchar(tag, hue_step=0.41):\n r, g, b = create_unique_color_float(tag, hue_step)\n return int(255*r), int(255*g), int(255*b)",
"def fromInts(r, g, b):\n return IColor(r/255.,g/255.,b/255.)",
"def frombytes(self, mode, size, data, decoder_name=\"raw\", *args):\r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n # default format\r\n if decoder_name == \"raw\" and args == ():\r\n args = self.mode\r\n\r\n # unpack data\r\n channels, depth = self._get_channels_and_depth(mode)\r\n self._instance = np.fromstring(data, dtype=depth)\r\n try:\r\n self._instance = self._instance.reshape((size[1], size[0], channels))\r\n except:\r\n raise ValueError(\"not enough image data\")\r\n try:\r\n self._instance = self._instance.astype(depth)\r\n if channels == 3:\r\n self._instance = cv2.cvtColor(self._instance, cv2.COLOR_BGR2RGB)\r\n elif channels == 4:\r\n self._instance = cv2.cvtColor(self._instance, cv2.COLOR_BGRA2RGBA)\r\n except:\r\n raise ValueError(\"cannot decode image data\")",
"def test_fromarray_rgb_fail():\n arr = numpy.zeros((20, 10, 3), dtype='float')\n\n parameters = {'data': [arr]}\n\n images.fromarray(parameters).convert('RGB')",
"def decodeRGB(self, rgb):\n r, g, b = rgb\n sid = r*65536 + g*256 + b\n if sid < 16777215: # 2**24 - 1\n return sid # it's a valid sid",
"def decodeRGB(self, rgb):\n r, g, b = rgb\n sid = r*65536 + g*256 + b\n if sid < 16777215: # 2**24 - 1\n return sid # it's a valid sid",
"def __init__(self, r, g, b):\n if r < 0 or r > 255: raise ValueError(\"r value is out of range: %d\"%r)\n if g < 0 or g > 255: raise ValueError(\"g value is out of range: %d\"%g)\n if b < 0 or b > 255: raise ValueError(\"b value is out of range: %d\"%b)\n \n self.r, self.g, self.b = r, g, b",
"def from_rgb(r, g, b) -> str:\n return \"#{0:02x}{1:02x}{2:02x}\".format(r, g, b)",
"def fromhls(self, h, l, s):\n\n r, g, b = hls_to_rgb(h, l, s)\n self.r = round_int(r * 255.0) & 0xFF\n self.g = round_int(g * 255.0) & 0xFF\n self.b = round_int(b * 255.0) & 0xFF",
"def create_blank(w, h, rgb_color=(0, 0, 0)):\n image = np.zeros((h, w), np.uint8)\n color = tuple(reversed(rgb_color))\n image[:] = 0\n return image",
"def create_blank(w, h, rgb_color=(0, 0, 0)):\n image = np.zeros((h, w), np.uint8)\n color = tuple(reversed(rgb_color))\n image[:] = 0\n return image"
] | [
"0.66057116",
"0.66057116",
"0.61717176",
"0.59638083",
"0.5919522",
"0.5902202",
"0.58918345",
"0.58142704",
"0.57922655",
"0.576885",
"0.57604164",
"0.5719106",
"0.57088804",
"0.56110096",
"0.56023633",
"0.55925435",
"0.55275285",
"0.55231756",
"0.5508837",
"0.5498434",
"0.54728836",
"0.5467981",
"0.5467693",
"0.5368364",
"0.5368364",
"0.5338444",
"0.53339136",
"0.5333874",
"0.53245574",
"0.53245574"
] | 0.81517035 | 0 |
Raise ValueError if we have invalid shapes. | def validate_shape(self):
if len(self._first_rgb.shape) != 5:
raise ValueError(f"Invalid shape: {self._first_rgb.shape}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _shape_check(self, X, y):\n if not len(y.shape) > 1:\n raise RuntimeError(\"The shape of y is incorrect.\")\n if y.shape != X.shape[:-1]:\n raise RuntimeError(\"X and y must have the same number of \" +\n \"samples and microstructure shape.\")\n if X.shape[-1] != 3:\n raise RuntimeError(\"X must have 3 continuous local states \" +\n \"(euler angles)\")",
"def test_invalid_input_shape(self):\r\n seed_rng()\r\n verbose = 0\r\n random = True\r\n print_ = False\r\n ones = False\r\n if ones:\r\n random = False\r\n\r\n global mode_with_gpu\r\n mode_with_gpu_orig = mode_with_gpu\r\n try:\r\n if theano.config.mode in ['DebugMode', 'DEBUG_MODE']:\r\n mode_with_gpu = theano.compile.mode.get_mode(\r\n 'FAST_RUN').including('gpu')\r\n for mode in ['valid', 'full']:\r\n for shapes in [((3, 2, 8, 8), (4, 2, 5, 5), (8, 8)),\r\n ((3, 2, 8, 8), (4, 2, 5, 5), (5, 8)),\r\n #((3, 2, 8, 8), (4, 2, 5, 5), (8, 5)),\r\n # We use only the number of columns.\r\n ]:\r\n\r\n self.assertRaises(ValueError, _params_allgood,\r\n shapes[0], shapes[1],\r\n verbose=verbose, random=random,\r\n mode=mode,\r\n print_=print_, ones=ones,\r\n compile_kshp=shapes[2])\r\n finally:\r\n mode_with_gpu = mode_with_gpu_orig",
"def _check_shape(self, obj, expected_shape):\n if self.shape != expected_shape:\n raise ValueError(\n '%s expects internal signal %s to be %s, but it is %s' % (\n obj, self.name, Shaped(expected_shape).describe_shape(),\n self.describe_shape()))",
"def test_invalid_input_shape(self):\r\n seed_rng()\r\n verbose = 0\r\n random = True\r\n print_ = False\r\n ones = False\r\n if ones:\r\n random = False\r\n\r\n global theano_mode\r\n theano_mode_orig = theano_mode\r\n try:\r\n if theano.config.mode in ['DebugMode', 'DEBUG_MODE']:\r\n theano_mode = theano.compile.mode.get_mode(\r\n 'FAST_RUN').including('gpu')\r\n for mode in ['valid', 'full']:\r\n for shapes in [((3, 2, 8, 8), (4, 2, 5, 5), (8, 8)),\r\n ((3, 2, 8, 8), (4, 2, 5, 5), (5, 8)),\r\n #((3, 2, 8, 8), (4, 2, 5, 5), (8, 5)),\r\n # We use only the number of columns.\r\n ]:\r\n\r\n self.assertRaises(ValueError, _params_allgood,\r\n shapes[0], shapes[1],\r\n verbose=verbose, random=random,\r\n mode=mode,\r\n print_=print_, ones=ones,\r\n compile_kshp=shapes[2])\r\n finally:\r\n theano_mode = theano_mode_orig",
"def test_shape_error_1():\n dmd = DMD()\n with raises(ValueError):\n dmd.fit(X=sample_data, Y=sample_data[:, 1:])",
"def test_shape_error_2():\n dmd = DMD()\n with raises(ValueError):\n dmd.fit(X=sample_data, Y=sample_data[1:])",
"def test_shape_error(self):\n raise unittest.SkipTest(\"Failing after fixing Poly unsoundness #4878\")\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n self.CheckShapePolymorphism(\n lambda x, y: x + y,\n input_signature=[tf.TensorSpec([None]), tf.TensorSpec([4])],\n in_shapes=[\"(v,)\", \"(4,)\"],\n expected_output_signature=tf.TensorSpec([None]))\n\n four_ones = np.ones((4,))\n # We get the error even if we use correct actual arguments\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n jax2tf.convert(lambda x, y: x + y,\n in_shapes=[\"(v,)\", \"(4,)\"])(four_ones, four_ones)\n\n with self.assertRaisesRegex(TypeError,\n re.escape(\"dot_general requires contracting dimensions to have the same shape, got [4] and [v].\")):\n jax2tf.convert(lambda x: jnp.matmul(x, x),\n in_shapes=[\"(v, 4)\"])(np.ones((4, 4)))\n\n # TODO: this is an opportunity to improve the translation, should not error\n with self.assertRaisesRegex(TypeError,\n \"Only integers, .* tensors are valid indices, got 0\"):\n jax2tf.convert(lambda x: jnp.split(x, 2),\n in_shapes=[\"(2*v,)\"])(four_ones)",
"def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg",
"def _ShapeMismatch(a, b):\n return 'Shapes do not match, %s v. %s' % (str(a), str(b))",
"def check_data_shape(self, data_shape):\n if not len(data_shape) == 2:\n raise ValueError('data_shape should have length 2')\n if not data_shape[0] == 1:\n raise ValueError('This iterator expects inputs to have 1 channels.')",
"def _validate_matrix_shape(matrix: FieldMatrix, shape: Tuple[int, int]):\n if len(matrix) != shape[0]:\n raise ValueError(\n 'Invalid matrix row len = %d: not consistent with expected shape: %s.' %\n (len(matrix), shape))\n\n for m in matrix:\n if len(m) != shape[1]:\n raise ValueError(\n 'Invalid matrix col len = %d: not consistent with expected shape: %s.'\n % (len(m), shape))",
"def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")",
"def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg",
"def _verify_integrity(self):\n if len(self.data.shape) != 1:\n raise ValueError(\n \"Data array must be one dimensional \"\n \"(is {})\".format(len(self.data.shape))\n )\n\n if len(self.shape.shape) != 2:\n raise ValueError(\n \"Shape array must be two dimensional \"\n \"(is {})\".format(len(self.shape.shape))\n )\n\n shape_size, data_size = self._cumsum[-1], self.data.size\n\n if not shape_size == data_size:\n raise ValueError(\n \"Size of data ({data_size}) does not match that \"\n \"of the given shapes ({shape_size}).\".format(\n data_size=data_size, shape_size=shape_size\n )\n )",
"def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)",
"def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1",
"def _check_shape(placeholder_shape, data_shape):\n\n return True",
"def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)",
"def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape",
"def test_invalid_filter_shape(self):\r\n self.assertRaises(AssertionError, self.validate,\r\n (3, 2, 8, 8), (4, 3, 5, 5),\r\n 'valid')",
"def test_input_shape_error(self):\n\n def net_func():\n input_value = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])\n paddle.bincount(input_value)\n\n with self.assertRaises(ValueError):\n self.run_network(net_func)",
"def test_wrong_shape(self):\n scores = np.column_stack((np.ones(10), np.ones(10)))\n with self.assertRaises(ValueError):\n calc_disc(scores)",
"def validate_shape_and_dtype(self):\n if self.rgb.dtype != tf.float32:\n raise ValueError(\"Expected float32 rgb!\")\n if len(self.rgb.shape) != 4:\n raise ValueError(f\"Expected (B, H, W, C), got {self.rgb.shape}\")\n _, _, _, channels = self.rgb.shape.as_list()\n if channels != 3:\n raise ValueError(f\"Expected 3 rgb channels, got shape {self.rgb.shape}\")",
"def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))",
"def test_shape(self, varname, varshape, ndim): \n if len(varshape) != ndim:\n raise ShapeError('Shape=%s. Expected %i-D array for %s' %\n (repr(varshape), ndim, varname))",
"def test_shape_fail():\n lons, lats = np.arange(10), np.arange(10).reshape(5, 2)\n emsg = \"Require longitudes and latitudes with same shape\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)",
"def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")",
"def test_bad_number_of_shape(self):\r\n specify_shape = SpecifyShape()\r\n\r\n x = vector()\r\n shape_vec = ivector()\r\n xval = numpy.random.rand(2).astype(floatX)\r\n self.assertRaises(AssertionError, specify_shape, x, [])\r\n self.assertRaises(AssertionError, specify_shape, x, [2, 2])\r\n\r\n f = theano.function([x, shape_vec], specify_shape(x, shape_vec),\r\n mode=self.mode)\r\n assert isinstance([n for n in f.maker.fgraph.toposort()\r\n if isinstance(n.op, SpecifyShape)][0].inputs[0].type,\r\n self.input_type)\r\n self.assertRaises(AssertionError, f, xval, [])\r\n self.assertRaises(AssertionError, f, xval, [2, 2])\r\n\r\n x = matrix()\r\n xval = numpy.random.rand(2, 3).astype(floatX)\r\n for shape in [(),\r\n (1,),\r\n (2, 3, 4)]:\r\n self.assertRaises(AssertionError, specify_shape, x, shape)\r\n f = theano.function([x, shape_vec], specify_shape(x, shape_vec),\r\n mode=self.mode)\r\n assert isinstance([n for n in f.maker.fgraph.toposort()\r\n if isinstance(n.op, SpecifyShape)][0].inputs[0].type,\r\n self.input_type)\r\n self.assertRaises(AssertionError, f, xval, shape)",
"def check_input_shapes(*args):\n\n # Collect the shapes of the inputs\n shapes = set()\n\n # DESIGN NOTES - currently allow:\n # - scalars,\n # - 0 dim ndarrays (also scalars but packaged differently)\n # - 1 dim ndarrays with only a single value\n\n for val in args:\n if isinstance(val, np.ndarray):\n # Note that 0-dim ndarrays (which are scalars) pass through as do\n # one dimensional arrays with a single value (also a scalar)\n if not(val.ndim == 0 or val.shape == (1,)):\n shapes.add(val.shape)\n # elif isinstance(val, Series):\n # # Note that 0-dim ndarrays (which are scalars) pass through\n # if val.ndim > 0:\n # shapes.add(val.shape)\n elif val is None or isinstance(val, (float, int, np.generic)):\n pass # No need to track scalars and optional values pass None\n else:\n raise ValueError(f'Unexpected input to check_input_shapes: {type(val)}')\n\n # shapes can be an empty set (all scalars) or contain one common shape\n # otherwise raise an error\n if len(shapes) > 1:\n raise ValueError('Inputs contain arrays of different shapes.')\n\n if len(shapes) == 1:\n return shapes.pop()\n\n return 1",
"def _check_shape(shape):\n if type(shape) == int:\n shape = (shape, shape)\n check_odd(shape, 'psf shape')\n return shape"
] | [
"0.6995105",
"0.6986119",
"0.6962179",
"0.695349",
"0.6889278",
"0.68580693",
"0.6736823",
"0.6689245",
"0.6615782",
"0.65720016",
"0.6547961",
"0.65478575",
"0.6534164",
"0.6530391",
"0.6463948",
"0.6453337",
"0.6426276",
"0.6411735",
"0.64014214",
"0.639231",
"0.6366248",
"0.6346506",
"0.6341107",
"0.6334117",
"0.63265306",
"0.63235915",
"0.631938",
"0.63097215",
"0.6307833",
"0.62960374"
] | 0.7520749 | 0 |
Returns frames, validates shape of first frame. | def get_frames(self):
if not self.video:
return []
# We cannot validate shape on construction as that happens inside graph
# mode as we construct from a tf.data.Dataset, so we validate here.
self.video[0].validate_shape_and_dtype()
return self.video | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_frame_batch(frames: list, *, format):\n frame_count = len(frames)\n assert frame_count > 0\n\n f1_idx = 0\n f1 = None\n while f1_idx < frame_count and f1 is None:\n try:\n f1 = _read_frame(frames[f1_idx], format=format)\n except SkipFrameException:\n f1 = None\n f1_idx += 1\n ret = [[] for _ in range(len(f1))]\n\n for idx in range(len(f1)):\n # ret[idx] = np.empty(shape=(frame_count,)+np.shape(f1[idx]), dtype=f1[idx].dtype)\n ret[idx].append(f1[idx])\n\n for frameidx in range(1+f1_idx, frame_count):\n try:\n fn = _read_frame(frames[frameidx], format=format)\n except SkipFrameException:\n continue\n for compidx in range(len(fn)):\n # ret[compidx][frameidx] = fn[compidx]\n ret[compidx].append(fn[compidx])\n\n # return ret\n return [np.array(comp, dtype=comp[0].dtype) for comp in ret]",
"def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()",
"def _read_frames(self):\n cap = self._read_file()\n\n frame_list = []\n ret_list = []\n\n while True:\n ret, frame = cap.read()\n if ret:\n frame_list.append(np.array(frame))\n ret_list.append(ret)\n else:\n break\n if self.mode==\"np\":\n frame_list = np.array(frame_list)\n return frame_list",
"def _select_frames(self, frames):\n converted_frames = list()\n # Ignore some frame at begin and end.\n for i in np.linspace(0, self.video_size, self.frame_num + 2)[1:self.frame_num + 1]:\n img = frames[int(i)]\n img = img.resize((224, 224), Image.BILINEAR)\n frame_data = np.array(img)\n converted_frames.append(frame_data)\n return converted_frames",
"def _validate_frames(frames: Sequence[int]) -> None:\n if not frames:\n raise ValueError('`frames` cannot be empty.')\n\n non_positive_frame_numbers = tuple(\n frame_number for frame_number in frames if frame_number < 1)\n if non_positive_frame_numbers:\n raise ValueError('Frame numbers must be positive. Found violations: '\n f'{non_positive_frame_numbers!r}')\n\n # Python uses Timsort which is `O(n)` in the best case, i.e., the overhead\n # is negligible assuming most inputs meet this specification. If the\n # specification is violated, `n` is small in case of DICOMs (few hundreds\n # in the worst case?). Here, the simplicity of the implementation outweighs\n # the (roughly constant time) overhead.\n if tuple(sorted(frames)) != tuple(frames):\n raise ValueError('Frame numbers must be in ascending order. Actual '\n f'order: {frames!r}')",
"def frames(self) -> Optional[Tuple[int, ...]]:\n return self._frames",
"def get_frames(self):\n\n log(\"Getting frames for {} at {}\".format(self._location, self._t0))\n fn_get = lambda time_str: self.get_wximg(time_str)\n pool0 = multiprocessing.dummy.Pool(self._frames)\n raw = pool0.map(fn_get, self.get_time_strs())\n wximages = [x for x in raw if x is not None]\n if not wximages:\n return None\n pool1 = multiprocessing.dummy.Pool(len(wximages))\n background = self.get_background()\n if background is None:\n return None\n fn_composite = lambda x: self._pilimg.alpha_composite(background, x)\n composites = pool1.map(fn_composite, wximages)\n legend = self.get_legend()\n if legend is None:\n return None\n loop_frames = pool1.map(lambda _: legend.copy(), composites)\n fn_paste = lambda x: x[0].paste(x[1], (0, 0))\n pool1.map(fn_paste, zip(loop_frames, composites))\n return loop_frames",
"def _construct_frame_list(self):\n\n chunks = self._chunk_list(list(range(self._start_frame, self._max_frame + 1)),\n int((self._max_frame - self._start_frame) / self._sub))\n frame_list = [[c[0], c[len(c) - 1]] for c in chunks]\n\n # Fix the frame_list in case of rounding errors\n if len(frame_list) > self._sub:\n frame_list = self._restructure_frames(frame_list)\n return frame_list",
"def frames(self):\n return list(self._frames)",
"def frames(self):\n while True:\n ret, frame = self.classification()\n if ret == True:\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n else:\n break",
"def available_frames(self):\n if self._pipeline:\n #return [getattr(frame[0], \"name\", frame[0]) for frame in self._pipeline]\n return [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline ]\n else:\n return None",
"def valid_filtering_frames(self):\n if self.frames is None:\n return np.empty(0, dtype=bool)\n return self.frames.is_unflagged(\n self.flagspace.flags.MODELING_FLAGS) & self.frames.valid",
"def CHECK_transition_frames(self):\n tr_frames = []\n for i, frame in enumerate(self.y):\n if not np.all(frame == frame[0]):\n tr_frames.append(frame)\n\n print('there are ', len(tr_frames), ' frames containing a transition')\n return tr_frames",
"def frames(self):\n return self._frames",
"def _crop_frames(self, frames, center_crop=True):\n cropped_frames = []\n crop_location = 0.5 if center_crop else np.random.random_sample()\n for frame in frames:\n cropped_frame = self._crop_frame(frame, crop_location)\n cropped_frames.append(cropped_frame)\n\n return np.array(cropped_frames)",
"def frames(self):\n if self.integration is None:\n return None\n return self.integration.frames",
"def iterframes(self):\n # self.check_ground_truth_availability()\n\n # Iterate through both poses and images, and construct frames\n # with look up table for filename str -> (timestamp, pose, annotation) \n for img_msg, frame in self.frame_index_.iteritems(): \n yield (frame.timestamp, img_msg, frame)",
"def crosslyGenerateFrames(self):\n fail = set()\n try:\n while self.alive:\n for name, video in self._videos.items():\n video: cv2.VideoCapture\n success, frame = video.read()\n if self.longFirst:\n if len(fail) == len(self._videos): # 长视频优先,视频长度由最长决定\n return\n elif not success:\n print(f'Read {name} Over')\n fail.add(video)\n else:\n yield frame\n else:\n if success: # 短视频优先,视频长度由最短决定\n yield frame\n else:\n return\n print('Reading Completed!')\n except Exception as e:\n raise e\n finally:\n self.close()",
"def frame_list_fixture():\n return [[4, 3, 5, 7], [8, 6, 3], [6, 7]]",
"def get_all_frames(self):\n #pdb.set_trace()\n frame_size=(512,512)\n frame_data = []\n variant_type = (pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_UI2)\n for i in xrange(self.defaults['EXP_SEQUENTS']):\n frame_data.append(win32com.client.VARIANT(variant_type, numpy.empty(frame_size)))\n frame_data[i]=self.appdoc.GetFrame(i+1,frame_data[i])\n \n return numpy.array(frame_data, dtype=numpy.uint16)",
"def frame_generator(self, batch_size, train_test, concat=False):\n # Get the right dataset for the generator.\n data, flow_data, valid_data = self.data\n if(train_test == 'validation'):\n data = valid_data\n # print(data.shape) # (48, 2)\n # print(data[0].shape) # (2,)\n\n print(\"Creating %s generator with %d samples.\" % (train_test, len(data))) # should be 1018\n \n while True:\n X = []\n F = []\n\n # Generate batch_size samples.\n for _ in range(batch_size):\n # Get a random sample. ## TODO: is it a problem to always take random samples?\n ri = random.randint(0, len(data) - 1) # random integer s.t. a <= N <= b.\n sample = data[ri] # shape (2,), 2 frame filenames are in there\n \n # Given a set of filenames, build a sequence.\n sequence = [process_and_crop_image(x, self.image_shape) for x in sample] # processor..\n\n # get corresponding flows\n flow_filenames = flow_data[ri] # shape (2,), 2 flow filenames are in there\n flow_array = [read_and_crop_flow(str(x), self.image_shape_flow) for x in flow_filenames] # returns np: (2, h*w*2) ?\t \n\n if concat:\n # pass sequence back as single array (into an MLP rather than an RNN)\n sequence = np.concatenate(sequence).ravel()\n flows = np.concatenate(flow_array).ravel() # need?\n\n X.append(sequence) # (n, 6144) -- 3072 + 3072 = two images\n F.append(flows) # (n, 4096) -- 2048 + 2048 = two flows\n\n yield (np.array(X), np.array(F))",
"def ensure_loaded(self, frames):\n if isinstance(frames, list):\n return [self.ensure_np_array(frame) for frame in frames]\n\n elif isinstance(frames, str):\n return Image.open(frames)\n\n elif isinstance(frames, np.ndarray):\n return Image.fromarray(frames)\n \n return frames",
"def get_frame_list(self):\r\n\r\n logger.debug('Executing frame extraction')\r\n\r\n frames_loaded = False\r\n\r\n # Try to load YAML file with frame list\r\n if os.path.exists(self.frames_file_path):\r\n\r\n print 'Loading YAML file with frame list'\r\n logger.debug('Loading YAML file with frame list')\r\n\r\n f_list = utils.load_YAML_file(self.frames_file_path)\r\n\r\n if f_list:\r\n self.frame_list = f_list\r\n\r\n print 'YAML file with frame_list loaded'\r\n logger.debug('YAML file with frame_list loaded')\r\n\r\n frames_loaded = True\r\n\r\n if not frames_loaded:\r\n\r\n print '\\n\\n### Frame extraction ###\\n'\r\n logger.debug('\\n\\n### Frame extraction ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n if not (os.path.exists(self.frames_path)):\r\n os.makedirs(self.frames_path)\r\n\r\n # Counter for all frames\r\n frame_counter = 0\r\n\r\n # Value of frame_counter for last analyzed frame\r\n last_anal_frame = 0\r\n\r\n # Open video file\r\n capture = cv2.VideoCapture(self.resource_path)\r\n\r\n self.frame_list = []\r\n\r\n # Save parameters for this video\r\n param_dict = {}\r\n\r\n if capture is None or not capture.isOpened():\r\n\r\n error = 'Error in opening video file'\r\n\r\n print error\r\n logger.debug(error)\r\n\r\n return\r\n\r\n else:\r\n\r\n video_fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)\r\n\r\n param_dict[c.VIDEO_FPS_KEY] = video_fps\r\n\r\n # Original number of frames\r\n tot_frames = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\r\n\r\n param_dict[c.VIDEO_TOT_FRAMES_KEY] = tot_frames\r\n\r\n self.fps = video_fps\r\n\r\n self.video_frames = float(tot_frames)\r\n\r\n # Saved frames\r\n saved_frames = 0\r\n\r\n while True:\r\n\r\n # Read frame\r\n ret, frame = capture.read()\r\n\r\n # If no frame is read, abort\r\n if not ret:\r\n break\r\n\r\n used_fps = c.USED_FPS\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n use_or_res = c.USE_ORIGINAL_RES\r\n used_res_scale_factor = c.USED_RES_SCALE_FACTOR\r\n\r\n if self.params is not None:\r\n\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_RES_KEY in self.params:\r\n use_or_res = self.params[c.USE_ORIGINAL_RES_KEY]\r\n\r\n if c.USED_RES_SCALE_FACTOR_KEY in self.params:\r\n used_res_scale_factor = self.params[\r\n c.USED_RES_SCALE_FACTOR_KEY]\r\n\r\n # Next frame to be analyzed\r\n next_frame = last_anal_frame + (video_fps / used_fps) - 1\r\n\r\n if use_or_fps or (frame_counter > next_frame):\r\n\r\n # Frame position in video in milliseconds\r\n elapsed_ms = capture.get(cv2.cv.CV_CAP_PROP_POS_MSEC)\r\n\r\n # print 'elapsed video s =', elapsed_video_s\r\n\r\n fr_name = '%07d.png' % frame_counter\r\n\r\n frame_path = os.path.join(self.frames_path, fr_name)\r\n\r\n # Resize frame\r\n if not use_or_res:\r\n fx = used_res_scale_factor\r\n\r\n fy = used_res_scale_factor\r\n\r\n interp = cv2.INTER_AREA\r\n\r\n frame = cv2.resize(src=frame, dsize=(0, 0),\r\n fx=fx, fy=fy,\r\n interpolation=interp)\r\n\r\n cv2.imwrite(frame_path, frame,\r\n [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n frame_dict = {c.SAVED_FRAME_NAME_KEY: fr_name,\r\n c.ELAPSED_VIDEO_TIME_KEY: int(elapsed_ms)}\r\n\r\n self.frame_list.append(frame_dict)\r\n\r\n last_anal_frame = frame_counter\r\n\r\n saved_frames += 1\r\n\r\n frame_counter += 1\r\n\r\n self.progress = 100 * (frame_counter / self.video_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n del capture\r\n\r\n self.saved_frames = float(saved_frames)\r\n\r\n param_dict[c.VIDEO_SAVED_FRAMES_KEY] = self.saved_frames\r\n\r\n # Save frame list in YAML file\r\n utils.save_YAML_file(self.frames_file_path, self.frame_list)\r\n\r\n # Save video parameters in YAML file\r\n\r\n utils.save_YAML_file(self.params_file_path, param_dict)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for frame extraction:', str(time_in_seconds), 's\\n'\r\n logger.debug(\r\n 'Time for frame extraction:', str(time_in_seconds), 's\\n')\r\n\r\n self.anal_times[c.FRAME_EXTRACTION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)",
"def _collectFrames(self):\n self._sources = sources = self._resolveFramePaths(self._info['sources'])\n self.logger.debug('Sources: %r', sources)\n\n frameDict = {'byFrame': {}, 'byAxes': {}, 'axesAllowed': True}\n numChecked = 0\n\n self._associatedImages = {}\n self._sourcePaths = {}\n self._channels = self._info.get('channels') or []\n\n absLargeImagePath = os.path.abspath(self._largeImagePath)\n computedWidth = computedHeight = 0\n self.tileWidth = self._info.get('tileWidth')\n self.tileHeight = self._info.get('tileHeight')\n self._nativeMagnification = {\n 'mm_x': self._info.get('scale', {}).get('mm_x') or None,\n 'mm_y': self._info.get('scale', {}).get('mm_y') or None,\n 'magnification': self._info.get('scale', {}).get('magnification') or None,\n }\n # Walk through the sources, opening at least the first two, and\n # construct a frame list. Each frame is a list of sources that affect\n # it along with the frame number from that source.\n lastSource = None\n for sourceIdx, source in enumerate(sources):\n path = source['path']\n if os.path.abspath(path) == absLargeImagePath:\n msg = 'Multi source specification is self-referential'\n raise TileSourceError(msg)\n similar = False\n if (lastSource and source['path'] == lastSource['path'] and\n source.get('params') == lastSource.get('params')):\n similar = True\n if not similar and (numChecked < 2 or not self._info.get('uniformSources')):\n # need kwargs of frame, style?\n ts = self._openSource(source)\n self.tileWidth = self.tileWidth or ts.tileWidth\n self.tileHeight = self.tileHeight or ts.tileHeight\n if not numChecked:\n tsMag = ts.getNativeMagnification()\n for key in self._nativeMagnification:\n self._nativeMagnification[key] = (\n self._nativeMagnification[key] or tsMag.get(key))\n numChecked += 1\n tsMeta = ts.getMetadata()\n if 'bands' in tsMeta:\n if not hasattr(self, '_bands'):\n self._bands = {}\n self._bands.update(tsMeta['bands'])\n lastSource = source\n bbox = self._sourceBoundingBox(source, tsMeta['sizeX'], tsMeta['sizeY'])\n computedWidth = max(computedWidth, int(math.ceil(bbox['right'])))\n computedHeight = max(computedHeight, int(math.ceil(bbox['bottom'])))\n # Record this path\n if path not in self._sourcePaths:\n self._sourcePaths[path] = {\n 'frames': set(),\n 'sourcenum': set(),\n }\n # collect associated images\n for basekey in ts.getAssociatedImagesList():\n key = basekey\n keyidx = 0\n while key in self._associatedImages:\n keyidx += 1\n key = '%s-%d' % (basekey, keyidx)\n self._associatedImages[key] = {\n 'sourcenum': sourceIdx,\n 'key': key,\n }\n source['metadata'] = tsMeta\n source['bbox'] = bbox\n self._sourcePaths[path]['sourcenum'].add(sourceIdx)\n # process metadata to determine what frames are used, etc.\n self._addSourceToFrames(tsMeta, source, sourceIdx, frameDict)\n # Check frameDict and create frame record\n self._frames = self._frameDictToFrames(frameDict)\n self.tileWidth = min(max(self.tileWidth, self._minTileSize), self._maxTileSize)\n self.tileHeight = min(max(self.tileHeight, self._minTileSize), self._maxTileSize)\n self.sizeX = self._info.get('width') or computedWidth\n self.sizeY = self._info.get('height') or computedHeight\n self.levels = int(max(1, math.ceil(math.log(\n max(self.sizeX / self.tileWidth, self.sizeY / self.tileHeight)) / math.log(2)) + 1))",
"def sample_frames(frame_dir, fps, visualize_sample_rate):\n visualize_every_x_frames = visualize_sample_rate * int(fps)\n sampled_frames = np.empty((0, 3, IMG_DIM, IMG_DIM), dtype=np.float32) # B, C, H, W\n i = 0\n for file in sorted(os.listdir(frame_dir)):\n if i % visualize_every_x_frames == 0:\n img = skimage.img_as_float(skimage.io.imread(os.path.join(frame_dir, file))).astype(np.float32)\n img = skimage.transform.resize(img, (IMG_DIM, IMG_DIM)) # H, W, C\n img = img.swapaxes(1, 2).swapaxes(0, 1) # C, H, W\n sampled_frames = np.append(sampled_frames, np.array([img]), axis=0)\n i += 1\n logger.debug(\"total number of frames: {}\".format(i))\n return sampled_frames",
"def split_frames(flat):\n \n frames = [[]]\n for expr in flat:\n # A new frame starts with a .loc expression.\n if type(expr) == Raw and expr.expr[:4] == '.loc':\n frames.append([expr])\n else:\n frames[-1].append(expr)\n return frames",
"def getouterframes(frame, context=1):\r\n framelist = []\r\n while frame:\r\n framelist.append((frame,) + getframeinfo(frame, context))\r\n frame = frame.f_back\r\n return framelist",
"def frames(self) -> Set[int]:\n return self._frames",
"def shapes(self):\n return self._frame.getshapes()",
"def _process_frames(dataset_info, example):\n frames = tf.concat(example['frames'], axis=0)\n frames = tf.map_fn(_convert_frame_data, tf.reshape(frames, [-1]), dtype=tf.float32, back_prop=False)\n img_dims = (dataset_info.frame_size, dataset_info.frame_size, 3)\n frames = tf.reshape(frames, (-1, dataset_info.sequence_size) + img_dims)\n\n if (dataset_info.frame_size != 64):\n frames = tf.reshape(frames, (-1, ) + img_dims) # (B * S, W, H, C)\n default_img_dims = (64, 64, 3)\n frames = tf.image.resize_bilinear(frames, default_img_dims[:2], align_corners=True)\n frames = tf.reshape(frames, (-1, dataset_info.sequence_size) + default_img_dims)\n \n return frames"
] | [
"0.6609018",
"0.6322371",
"0.63184637",
"0.62587166",
"0.6191532",
"0.61825275",
"0.6118202",
"0.602572",
"0.5990667",
"0.5902788",
"0.5898221",
"0.5883571",
"0.58467305",
"0.58325005",
"0.58138317",
"0.580956",
"0.57896084",
"0.5769102",
"0.56955373",
"0.5684224",
"0.5657231",
"0.5652181",
"0.56399834",
"0.56336474",
"0.56234413",
"0.56203884",
"0.5602829",
"0.55945164",
"0.556502",
"0.5564938"
] | 0.67747474 | 0 |
Return the number of frames in the video. | def num_frames(self):
return len(self.video) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def frames(self):\n frame_count = 0\n if self.is_video() or self.is_audio():\n if self.__dict__['nb_frames']:\n try:\n frame_count = int(self.__dict__['nb_frames'])\n except ValueError:\n raise FFProbeError('None integer frame count')\n return frame_count",
"def realFrameNumber(self, callback=None):\n count = 0\n theoreticalFrameNumber = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n if theoreticalFrameNumber > 30000:\n return theoreticalFrameNumber\n while(True):\n # Capture frame-by-frame\n ret, frame = self.video.read()\n if not ret:\n break\n if callback != None:\n callback(0.1 + (count / theoreticalFrameNumber) * 0.75, \"Calculating the number of frame\")\n count += 1\n return count",
"def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames",
"def get_num_frames(self):\n return self._frames.shape[0]",
"def FrameCount(self):\r\n\t\treturn self._get_attribute('frameCount')",
"def get_total_frames(self) -> int:\n return self.num_frames",
"def num_frames(self):\n return self._first_rgb.shape[1]",
"def size(self):\n if self.frames is None:\n return 0\n return self.frames.size",
"def get_nb_frames_for_video(video_parts):\n filename_no_ext, _ = video_parts\n generated_files = glob.glob(os.path.join(output_dir, filename_no_ext + '*.jpg'))\n return len(generated_files)",
"def count_videos(self):\n return len(self.videos)",
"def get_nb_frames_for_video(video_parts):\n train_or_test, filename_no_ext, _ = video_parts\n generated_files = glob.glob(os.path.join('demo_frames',\n filename_no_ext + '*.jpg'))\n return len(generated_files)",
"def get(self, flag: int):\n if flag == cv2.CAP_PROP_FRAME_COUNT:\n return self.length",
"def get_nb_frames_for_video(video_parts):\n train_or_test, classname, filename_no_ext, _ = video_parts\n generated_files = glob.glob(os.path.join(\"/data/niteshku001/Ravdess/data\", train_or_test, classname,\n filename_no_ext + '*.jpg'))\n return len(generated_files)",
"def count_frames():\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)",
"def frame_length(self):\r\n return self.config.frame_length",
"def frame_size(self):\n size = None\n if self.is_video():\n width = self.__dict__['width']\n height = self.__dict__['height']\n if width and height:\n try:\n size = (int(width), int(height))\n except ValueError:\n raise FFProbeError(\"None integer size %s:%s\" % (width, height))\n\n return size",
"def get_num_frames(filename, ext='*.jpg'):\n if os.path.isdir(filename):\n return len(glob.glob(os.path.join(filename, ext)))\n elif os.path.isfile(filename):\n cmd = ('ffprobe -v 0 -count_frames -select_streams v:0 '\n '-show_entries stream=nb_read_frames -of '\n 'default=nokey=1:noprint_wrappers=1 ' + filename).split()\n pid = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n if pid.returncode != 0:\n return None\n nframes_expr = pid.stdout\n nframes = int(nframes_expr.rstrip())\n return nframes\n else:\n raise ValueError('Unexpect filename: {}'.format(filename))",
"def video(path):\n cap = cv2.VideoCapture(path)\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n return frame_count, cap",
"def video_playback_width(self):\n # type: () -> int\n return self._video_playback_width",
"def get_frame_size(self) -> Tuple[int, int]:\n return self.__sim.frame_size()",
"def get_video_fps(self):\n fps = self.video.get(cv2.CAP_PROP_FPS)\n logging.info('Video FPS: {}'.format(fps))\n return fps",
"def get_video_count(self):\n done = self.cur.execute(\"SELECT video_ID FROM videos\")\n return done",
"def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)",
"def __calculate_number_of_frames(self):\n # Save current position\n current_pos = self.__file_object.tell()\n\n # Go to start of first frame\n self.__file_object.seek(self.__first_frame_raw_data_position)\n self.number_of_frames = 0\n\n while True:\n if not self.__file_object.read(self.__frame_raw_data_size):\n break\n\n self.__file_object.readline()\n self.number_of_frames += 1\n\n # Restore file pointer\n self.__file_object.seek(current_pos)\n print('Number of frames:', self.number_of_frames)",
"def video_playback_height(self):\n # type: () -> int\n return self._video_playback_height",
"def get_frame_size(self):\n return self._frames.shape[-1]",
"def _frameLen(self):\n return self.numCols * self.numRows",
"def frame_size(self):\n return self._frame_size",
"def video_duration(self):\n # type: () -> int\n return self._video_duration",
"def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))"
] | [
"0.8149587",
"0.8000634",
"0.79408365",
"0.77318466",
"0.7711401",
"0.76985836",
"0.7361139",
"0.73375314",
"0.7288898",
"0.7199668",
"0.71541166",
"0.7113876",
"0.7107059",
"0.707449",
"0.7064645",
"0.703789",
"0.69904053",
"0.697358",
"0.68650484",
"0.6846845",
"0.68324274",
"0.6811761",
"0.6807661",
"0.67943984",
"0.6783312",
"0.6753559",
"0.67271113",
"0.6714207",
"0.67129",
"0.6665878"
] | 0.87998253 | 0 |
determine_category Return a category depending on a weight given. | def determine_category(weight):
if weight < 52:
return Category.FLY
elif 52 <= weight < 57:
return Category.FEATHER
elif 57 <= weight < 63:
return Category.LIGHT
elif 63 <= weight < 69:
return Category.WELTER
elif 69 <= weight < 75:
return Category.MEDIUM
elif 75 <= weight < 81:
return Category.MEDIUM_HEAVY
elif 81 <= weight < 91:
return Category.HEAVY
elif weight >= 91:
return Category.SUPER_HEAVY
else:
return TypeError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_weight_category(self) -> WeightCategory:\n return WeightCategory.light if self.weight < 100 else WeightCategory.heavy",
"def get_weight(val):\n if val < 2000:\n category = 1\n elif val < 2500:\n category = 2\n elif val < 3000:\n category = 3\n elif val < 3500:\n category = 4\n else:\n category = 5\n return category",
"def get_weight_class(weight):\n\n if(weight >= 3500):\n return 5\n elif(weight >= 3000 and weight < 3500):\n return 4\n elif(weight >= 2500 and weight < 3000):\n return 3\n elif(weight >= 2000 and weight < 2500):\n return 2\n else:\n return 1",
"def weights_by_category(self):\n cate_weights = {}\n for cate in self.unique_category:\n cate_weights[cate] = self.weights[self.category == cate].sum()\n return pd.Series(cate_weights, index=self.unique_category)",
"def lookup_relevant(score):\n category = \"\"\n if score > 2.0:\n category = \"RELEVANT\"\n elif score > 0.0:\n category = \"PARTIALLY RELEVANT\"\n else:\n category = \"NOT RELEVANT\"\n return category",
"def wn_category(word):\n cats = ['transport', 'food', 'building', 'animal', 'appliance', 'action', 'clothes', 'utensil', 'body', 'color',\n 'electronics', 'number', 'human']\n cat_synsets = dict(zip(cats, map(wn.synsets, cats)))\n hyper = lambda s: s.hypernyms()\n synsets = wn.synsets(word)\n closures = list(chain.from_iterable([list(sns.closure(hyper, depth=3)) for sns in synsets])) + synsets\n max_overlap = 0\n category = None\n for cat, csns in cat_synsets.items():\n if len(set(closures).intersection(set(csns))) > max_overlap:\n category = cat\n return category",
"def get_y_category(y_actual, min_edge, bin_width):\n # Assertions\n assert max(y_actual) > min_edge, \\\n 'min_edge has to be smaller than maximum y_actual'\n assert isinstance(y_actual, np.ndarray), \\\n 'Input array must be a numpy ndarray.'\n assert isinstance(min_edge, (int, float)), \\\n 'min_edge should be a float or an int.'\n assert isinstance(bin_width, (int, float)), \\\n 'bin_width should be a float or an int.'\n # Functionality\n Y_category = (y_actual - min_edge) // bin_width\n\n return Y_category",
"def category(self):\n\n for category, match_list in rule_list:\n for match in match_list:\n if match.match(self):\n return category\n\n return None",
"def _get_category(identifier: str) -> str:\n for category, keywords in categories.items():\n # Check for each keyword\n for k in keywords:\n # Check if lower-case keyword is substring of lower-case identifier\n if identifier.lower().find(k.lower()) != -1:\n return category\n # Default value if no category was found\n return 'other'",
"def getCategory():",
"def weighted_by(self, weight_rel_name,\n category_name):\n self._check_type_compatibility(\n self.context.get_domain(weight_rel_name), self._type_name,\n 'weighted_by')\n category_type = self.context.get_range(weight_rel_name)\n with tf.name_scope('weighted_by_%s_%s' % (weight_rel_name, category_name)):\n weight_vector = self.context.one(category_name, category_type).follow(\n weight_rel_name, -1)\n return self.__mul__(weight_vector)",
"def get_category(self):\n\n\t\treturn self.__category",
"def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384",
"def centrality_weights_classification(weighted=True):\n results = {'_is_weighted':weighted, '_evaluation':'classification'}\n graph_metrics = graph_representation.get_metrics(weighted)\n\n print '> Reading cases..'\n descriptions_path = '../data/tasa/TASA900_dependencies'\n texts, labels = data.read_files(descriptions_path)\n\n rep = {}\n for metric in graph_metrics:\n rep[metric] = []\n\n print '> Creating graph representations..'\n for i, text in enumerate(texts):\n if i%10==0: print ' ',str(i)+'/'+str(len(texts))\n g = graph_representation.construct_dependency_network(text, weighted=weighted)\n for metric in graph_metrics:\n d = graph_representation.graph_to_dict(g, metric)\n rep[metric].append(d)\n g = None # just to make sure..\n if i%100==0:\n if weighted:\n postfix = '_weighted'\n else:\n postfix = '_unweighted'\n data.pickle_to_file(rep, 'output/dependencies/exp1_class_tmp_'+str(i)+'_'+postfix)\n\n print '> Creating vector representations..'\n for metric in graph_metrics:\n rep[metric] = graph_representation.dicts_to_vectors(rep[metric])\n\n print '> Evaluating..'\n for metric in graph_metrics:\n vectors = rep[metric]\n score = evaluation.evaluate_classification(vectors, labels)\n print ' ', metric, score\n results[metric] = score\n\n if weighted:\n postfix = '_weighted'\n else:\n postfix = '_unweighted'\n data.pickle_to_file(results, 'output/dependencies/exp1_class'+postfix)\n\n pp.pprint(results)\n return results",
"def weighted_categorical_crossentropy(weights):\r\n\r\n weights = K.variable(weights)\r\n\r\n def loss(y_true, y_pred):\r\n # scale predictions so that the class probas of each sample sum to 1\r\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\r\n # clip to prevent NaN's and Inf's\r\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\r\n # calc\r\n loss = y_true * K.log(y_pred) * weights\r\n loss = -K.sum(loss, -1)\r\n return loss\r\n\r\n return loss",
"def category_of(element):\n if element in COLORS:\n return 'colors'\n if element in PETS:\n return 'pets'\n if element in BEVERAGES:\n return 'beverages'\n if element in CIGARETTES:\n return 'cigarettes'\n if element in NATIONALITY:\n return 'nationality'\n return 'numbers'",
"def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best",
"def return_category_from_value_HML(val):\n val = int(val.values[0])\n if val == 5:\n return 'H'\n elif val == 4:\n return 'M'\n else:\n return 'L'",
"def predict_category(self):\n pass",
"def get_category(self) -> str:\n return self.category",
"def bmi_categories():\n # Variable to ensure PEP8 convention pass (amount of characters in line)\n your_bmi = \"your BMI Category is \"\n # Result to user in age below 18\n if int(age) < 18:\n \"\"\"\n If age of user is below 18\n \"\"\"\n if float(bmi) <= 18.5:\n print(f\"{name.capitalize()} {your_bmi}underweight\")\n elif float(bmi) >= 18.5 and float(bmi) <= 24.9:\n print(f\"{name.capitalize()} {your_bmi}normal\")\n elif float(bmi) >= 25 and float(bmi) <= 29.9:\n print(f\"{name.capitalize()} {your_bmi}overweight\")\n else:\n print(f\"{name.capitalize()} {your_bmi}obesity\")\n # Else result to user in age is over 18\n else:\n \"\"\"\n If age of user is equal or over 18 - adults\n \"\"\"\n if float(bmi) <= 16:\n print(f\"{name.capitalize()} {your_bmi}Severe Thinness\")\n elif float(bmi) >= 16 and float(bmi) <= 17:\n print(f\"{name.capitalize()} {your_bmi}Moderate Thinness\")\n elif float(bmi) >= 17 and float(bmi) <= 18.5:\n print(f\"{name.capitalize()} {your_bmi}Mild Thinness\")\n elif float(bmi) >= 18.5 and float(bmi) <= 25:\n print(f\"{name.capitalize()} {your_bmi}Normal\")\n elif float(bmi) >= 25 and float(bmi) <= 30:\n print(f\"{name.capitalize()} {your_bmi}Overweight\")\n elif float(bmi) >= 30 and float(bmi) <= 35:\n print(f\"{name.capitalize()} {your_bmi}Obese Class I\")\n elif float(bmi) >= 35 and float(bmi) <= 40:\n print(f\"{name.capitalize()} {your_bmi}Obese Class II\")\n else:\n print(f\"{name.capitalize()} {your_bmi}Obese Class III\")",
"def get_cyldiam(weight, max_w, min_w):\n\n # Choose colorscale and minimum dpending if weight is negative or positive\n if weight >= 0:\n min_w = min_w if min_w >= 0 else 0\n else: \n weight = abs(weight)\n max_w = abs(min_w)\n min_w = abs(max_w) if max_w < 0 else 0\n\n dif_range = max_w - min_w\n max_diam = 0.9\n if min_w == max_w: \n return(max_diam)\n else:\n dif_weight = weight-min_w\n diam = (max_diam*dif_weight)/dif_range\n return(diam)",
"def weighted_categorical_crossentropy(weights):\n weights = K.variable(weights)\n\n def loss(y_true, y_pred):\n y_hat = y_pred / K.sum(y_pred, axis=-1, keepdims=True)\n y_hat = K.clip(y_hat, K.epsilon(), 1 - K.epsilon())\n loss = y_true * K.log(y_hat) * weights\n loss = - K.sum(loss) / K.cast(K.shape(y_true)[0] * K.shape(y_true)[1] * K.shape(y_true)[2], 'float')\n return loss\n\n return loss",
"def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )",
"def category(self) -> Optional[str]:\n return pulumi.get(self, \"category\")",
"def rpc_category_to_super_category(category_id, num_classes):\n cat_id = -1\n assert num_classes in RPC_SUPPORT_CATEGORIES, 'Not support {} density categories'.format(num_classes)\n if num_classes == 17:\n cat_id = _categories[category_id]\n elif num_classes == 1:\n cat_id = 0\n elif num_classes == 200:\n cat_id = category_id - 1\n assert 199 >= cat_id >= 0\n return cat_id",
"def classify(self, mutation) -> Set['Category']:\n pass",
"def categoria(cad):\n \n # Vai ser usado o tuplo que contem todas as informacoes sobre as categorias das entidades definido nas linhas de codigo iniciais.\n # Sao acedidas as informacoes no indice 0 (Digito Inicial) e 1 (Categoria). \n \n # Caso o primeiro caracter nao seja um algarismo, chamamos a atencao ao utilizador para o erro. Caso seja, percorremos o tuplo com as informacoes sobre as categorias das entidades, e devolvemos a entidade correspondente ao digito inicial.\n \n \n if '0' <= cad[0] <= '9':\n\n c1=eval(cad[0])\n\n for e in t_cat_ent:\n \n if c1==e[0]:\n return e[1]\n \n \n else:\n raise ValueError ('function categoria(): O primeiro digito da cadeia nao e valido')",
"def get_knowledge_category(self):\n if not self.has_knowledge_category():\n raise IllegalState()\n else:\n return Grade(self._get_grade_map(self._my_map['knowledgeCategoryId'])),",
"def categorize_distances(row):\n category_name = \"\"\n try:\n distance_value = row[\"dist\"]\n value = float(distance_value)\n if value == 0.000000:\n category_name = \"Match\"\n elif 0.000001 <= value <= 0.1:\n category_name = \"Possible Match\"\n elif value > 0.1:\n category_name = \"None Match\"\n except:\n category_name = \"Not Categorised,Invalid Distance Value\"\n return category_name"
] | [
"0.745884",
"0.6875626",
"0.60673094",
"0.57147753",
"0.5712677",
"0.56384104",
"0.56335866",
"0.5583973",
"0.5543692",
"0.54811215",
"0.5376456",
"0.5367497",
"0.53546196",
"0.5319133",
"0.5300997",
"0.52614963",
"0.5251178",
"0.5231584",
"0.5230955",
"0.519492",
"0.51848376",
"0.5122847",
"0.51160026",
"0.5094066",
"0.505752",
"0.5023558",
"0.5019431",
"0.49921665",
"0.49918813",
"0.49914652"
] | 0.81798506 | 0 |
initializes the particle filter with a normal distribution | def __init__(self, init_pos, init_stdev, num_particles, sense_noise):
self.particles = np.random.multivariate_normal(
init_pos, [[init_stdev**2, 0], [0, init_stdev**2]], num_particles)
self.weights = np.array(
[1. / num_particles for _ in range(num_particles)])
self.n = num_particles
self.sense_noise = sense_noise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_particles(self):\n \n # Each particle is a dimension-K vector. We generate each particle \n # uniformly at random from the space [0,1]^K. \n self.Particles = np.random.uniform(0, 1, (self.Npar, self.K))\n #print(\"Particles: \", self.Particles) \n return None",
"def init_particle_filter(self, motion_prior, n_p):\n # Define necessary components for the particle filter\n if motion_prior['mode'] == 'PositionDiffusion':\n # Diffusion\n dc_infer = motion_prior['dc']\n d_h = 2 # Dimension of hidden state (i.e. x,y = 2 dims)\n sdev = np.sqrt(dc_infer * self.dt / 2) * np.ones((d_h,))\n ipd = pf.GaussIPD(d_h, self.n_n, sdev * 0.001)\n tpd = pf.GaussTPD(d_h, self.n_n, sdev)\n ip = pf.GaussIP(d_h, sdev * 0.001)\n tp = pf.GaussTP(d_h, sdev)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n\n elif motion_prior['mode'] == 'VelocityDiffusion':\n # FIXME: save these params\n d_h = 4 # Hidden state dim, x,y,vx,vy\n\n v0 = motion_prior['v0'] # Initial Estimate for velocity\n dcv = motion_prior['dcv'] # Velocity Diffusion Constant\n st = np.sqrt(dcv * self.dt)\n adj = np.sqrt(1 - st ** 2 / v0 ** 2)\n\n eps = 0.00001 # Small number since cannot have exact zero\n sigma0 = np.array([eps, eps, v0, v0]) # Initial sigmas\n sigma_t = np.array([eps, eps, st, st]) # Transition sigmas\n\n # Transition matrix\n a = np.array([[1, 0, self.dt, 0],\n [0, 1, 0, self.dt],\n [0, 0, adj, 0],\n [0, 0, 0, adj]])\n\n ipd = pf.GaussIPD(d_h, self.n_n, sigma0)\n tpd = pf.GaussTPD(d_h, self.n_n, sigma_t, A=a)\n ip = pf.GaussIP(d_h, sigma0)\n tp = pf.GaussTP(d_h, sigma_t, A=a)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n # Note trick where PoissonLP takes 0,1 components of the\n # hidden state which is the same for both cases\n\n else:\n raise ValueError(\n 'Unrecognized Motion Prior ' + str(motion_prior))\n\n r = np.zeros((self.n_n, self.n_t)).astype('float32')\n return pf.ParticleFilter(\n ipd, tpd, ip, tp, lp, r.transpose(), n_p)",
"def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicLogNormalDistribution(self.mean,self.sigma,self.low)\n self.lowerBound = 0.0\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicLogNormalDistribution(self.mean,self.sigma,self.lowerBound,self.upperBound, self.low)",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))",
"def initSamples(self):\n # Define this (note random.uniform is helpful here!)\n for i in range(self.numParticles):\n # Create particles uniformly and generate same weights for all particles.\n particle = random.uniform(self.minValue, self.maxValue)\n self.samples.append(particle)\n self.weights.append(1/self.numParticles)",
"def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_",
"def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_",
"def __init__(self,nparticles,initial_condition):\n self.nparticles = nparticles\n self.particles = np.array([Particle(mass,x,y) for x,y,mass in initial_condition])\n self.mass = np.array([self.particles[i].mass for i in range(len(self.particles))])\n self.position = np.array([self.particles[i].position for i in range(len(self.particles))])\n self.momentum = np.array([self.particles[i].momentum for i in range(len(self.particles))])",
"def __InitializeNorms(self, norm_SF, norm_plane):\n # Initializes values \n nc = self.__nc_RSoft_I\n n_SF = self.__n_SF_rad+self.__n_SF_ang\n mean_SF = np.zeros(n_SF)\n std_SF = np.zeros(n_SF)\n cov_SF = np.zeros((n_SF,n_SF))\n n_parts = 0\n\n for idx_type, type_ in enumerate(self.__types_unique):\n n_parts = 0\n idx_type_SF = np.where(self.__types==type_)[0]\n for f in range(self._n_f):\n # Finds particle typtes for each particle. \n particle_types = self.__NcIO_dyn.GetDataCol(f,'type')\n type_ids = np.where(particle_types==type_)[0]\n\n # Obtains radial and angular SFs for f\n if self.__containsRadial:\n rSF = nc.variables['radial_structures'][f][type_ids]\n else:\n rSF = np.zeros((len(type_ids),0))\n if self.__containsAngular:\n aSF = nc.variables['angular_structures'][f][type_ids]\n else:\n aSF = np.zeros((len(type_ids),0))\n SF = np.hstack((rSF,aSF))\n SF = SF[~np.isnan(np.sum(SF,axis=1))] # SHOULD REMOVE NaNs\n\n # Counts number of SFs in frame and sums particles to find\n # mean. We do not use mean function in case number of \n # particles changes between frames\n n_parts += len(SF)\n mean_SF[idx_type_SF] += np.sum(SF[:,idx_type_SF],axis=0)\n cov_SF[idx_type_SF[:,None],idx_type_SF[None,:]] += \\\n np.dot(SF[:,idx_type_SF].T,SF[:,idx_type_SF])\n\n # Calculates mean and covariance\n mean_SF[idx_type_SF] /= float(n_parts)\n cov_SF[idx_type_SF[:,None],idx_type_SF[None,:]] /= \\\n float(n_parts)\n cov_SF[idx_type_SF[:,None],idx_type_SF[None,:]] -= \\\n np.outer(mean_SF[idx_type_SF],mean_SF[idx_type_SF])\n std_SF = np.sqrt(np.diagonal(cov_SF))\n\n # Checks if std_SF == 0 for any structure functions\n if np.any(std_SF==0):\n print('WARNING: stdev of following structure functions is 0')\n idx_0s = np.where(std_SF==0)[0]\n for idx_0 in idx_0s:\n std_SF[idx_0] = 1\n if idx_0 < self.__n_SF_rad:\n mu = self.mus[idx_0]\n L = self.Ls[idx_0]\n X = self.radial_Xs[idx_0]\n Y = self.radial_Ys[idx_0]\n print(' radial structure function: mu = '+str(mu)+\\\n ', L = '+str(L)+', X = '+str(X)+', Y = '+str(Y))\n else:\n idx_0 -= self.__n_SF_rad\n xi = self.xis[idx_0]\n l = self.lambdas[idx_0] \n z = self.zetas[idx_0]\n X = self.angular_Xs[idx_0]\n Y = self.angular_Ys[idx_0]\n Z = self.angular_Za[idx_0]\n print(' angular structure function: xi = '+str(xi)+\\\n ', lambda = '+str(l)+', zeta = '+str(z)+\\\n ', X = '+str(X)+', Y = '+str(Y)+', Z = '+str(Z))\n\n self._mean_SF = mean_SF\n self._cov_SF = cov_SF\n self._std_SF = std_SF",
"def __init__(self, mean,std):\n stdinv = 1.0/std\n normalc = stdinv*(1.0/np.sqrt(np.pi))\n self.sigma = std\n self.mean = mean\n self._normconst = normalc\n return",
"def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicPoissonDistribution(self.mu)\n self.lowerBound = 0.0\n self.upperBound = sys.float_info.max\n else:\n self.raiseAnError(IOError,'Truncated poisson not yet implemented')",
"def __init__(self, particles):\n self.particles = particles",
"def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)",
"def initialize_filter(self):\n shape = self.filter_size + (self.input_shape[-1], self.channels)\n self.filter = self.filter_initializer(shape)",
"def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale)\n else:\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale,a,b)",
"def initialize(self):\n\t\tmu = 0\n\t\tsigma = np.sqrt(2 / self.dataset[\"d\"])\n\n\t\tself.F1 = np.random.normal(mu, sigma, self.F1.shape)\n\t\tself.F2 = np.random.normal(mu, sigma, self.F2.shape)\n\t\tself.W = np.random.normal(mu, sigma, self.W.shape)\n\n\t\tself.F1_momentum = np.zeros(self.F1.shape)\n\t\tself.F2_momentum = np.zeros(self.F2.shape)\n\t\tself.W_momentum = np.zeros(self.W.shape)",
"def __init__(self, std: Union[torch.Tensor, float, Distribution], initial_dist: Distribution = None):\n\n if not isinstance(std, torch.Tensor):\n normal = Normal(0., 1.)\n else:\n normal = Normal(0., 1.) if std.shape[-1] < 2 else Independent(Normal(torch.zeros_like(std), std), 1)\n\n super().__init__((_f, _g), (std,), initial_dist or normal, normal)",
"def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.lowerBound,self.upperBound,self.low)",
"def __init__(self, scale, **kwargs):\n super(NormalNoise, self).__init__(**kwargs)\n self._scale = scale",
"def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_",
"def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_",
"def __init__(self, sigma):\n self.sigma = float(sigma)\n super().__init__(domain=functional.domain,\n range=functional.domain, linear=False)\n\n # Setting up parameters\n self.const = 1 / (functional.epsilon * sigma)",
"def _WeightInit(self, stddev):\n return init_ops.truncated_normal_initializer(stddev=stddev)",
"def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01",
"def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.__iteration_number = kwargs['iteration_number']\n self.__particles = [\n PSOParticle(**kwargs, bit_generator=self._random)\n for _ in range(kwargs['particles'])\n ]\n\n # The library stores particles in the visualizer .... groan\n positions = [particle.position for particle in self.__particles]\n self._visualizer = NoVisualizer(**kwargs)\n self._visualizer.add_data(positions=positions)",
"def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]"
] | [
"0.64824027",
"0.6470281",
"0.63342357",
"0.632274",
"0.632274",
"0.632274",
"0.632274",
"0.632274",
"0.62671024",
"0.6255693",
"0.6255693",
"0.6152836",
"0.6148171",
"0.61125165",
"0.61011827",
"0.6100128",
"0.6099897",
"0.6076007",
"0.60639673",
"0.60597944",
"0.6055383",
"0.60390997",
"0.602082",
"0.6010557",
"0.6010557",
"0.59979135",
"0.5995256",
"0.59801537",
"0.5979354",
"0.59400445"
] | 0.75238234 | 0 |
returns particle with the highest weight | def get_best_particle(self):
index = self.weights.argmax()
return self.particles[index, :] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_greatest_probability(self):\n greatest = -1\n for i in range(self.dim):\n for j in range(self.dim):\n if self.kb[i][j] > greatest:\n greatest = self.kb[i][j]\n\n return greatest",
"def get_best_value(self):\n # Todo: implement\n best_value_global = -inf\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n best_value_global = particle.best_value\n return best_value_global # Remove this line",
"def getMostLikelyPos(self):\n mostLikelyPos = None\n mostLikelyProb = None\n beliefDist = self.getBeliefDistribution()\n for part in self.particles:\n currProb = beliefDist[part]\n if mostLikelyPos is None or currProb > mostLikelyProb:\n mostLikelyPos = part \n mostLikelyProb = currProb\n return mostLikelyPos",
"def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy",
"def _best_individual(self):\n return max(self._population, key=attrgetter(\"fitness\"))",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth",
"def best(self):\n if len(self) == 0:\n return None\n return max_elems(self, key=attr.getter(\"value\"), gt=self.solver.sense.is_better)[0]",
"def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density",
"def produce(self, key=lambda x: 1.0):\n return max(self.data[0], key=key)",
"def max_weight(self):\n raise NotImplementedError(\"subclasses need to override this method\")",
"def find_max(self):\n return max(self.nodes, key=int)",
"def max(self):\r\n\t\treturn max(self.sample)",
"def get_best_position(self):\n # Todo: implement\n best_value_global = -inf\n position = None\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n position = particle.best_position\n best_value_global = particle.best_value\n return position",
"def __findBestLogProbability(self):\n best_model = None\n highest_log_probability = -sys.maxsize# (np.finfo(float).eps)\n\n # Find the highest model\n for item in self.data_array:\n if item[1] > highest_log_probability:\n best_model = item\n highest_log_probability = item[1]\n\n return best_model",
"def mod_max(self):\n phs = set()\n for k in self.get_fd_part_j(self.id):\n phs.add(self.get_prp_j(k)[0])\n if (1 in phs) and (2 not in phs) and (self.get_prp_j(self.id)[0] != max(phs)):\n self.all_seen = set()\n return max(phs)\n else:\n return self.get_prp_j(self.id)[0]",
"def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]",
"def get_worst_fitness(self):\n f = min(self.characters, key=operator.attrgetter('fitness'))\n self.worst_fitness = round(f.fitness, 3)",
"def personal_best(scores):\n return max(scores)",
"def max(self) -> float:\n return stats.max(self)",
"def max(self):\n no = self.root\n if no:\n no = self.__search_node_max_esq(no)\n if no:\n return no.valor\n return None",
"def get_bprop_maximum(self):\n input_grad = G.MaximumGrad()\n\n def bprop(x, y, out, dout):\n dx, dy = input_grad(x, y, dout)\n return dx, dy\n return bprop",
"def get_highest_wavenumber_beam(self):\n return Beam(\n wavenumber=self.wavenumber*(1+(5*self.sigma/(self.photon_energy-5*self.sigma))), # Gaussian truncates after 4-5 sigmas\n focus_x=self._focus_xFWHM,\n focus_y=self._focus_yFWHM,\n focus_shape=self._focus_shape,\n fluence=self.get_photons_per_pulse()\n )",
"def max(self):\n most = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] > least:\n most = self.data[i]\n return most",
"def dirichlet_max(sampled_probas):\n\talphas = dirichlet_fit(sampled_probas)\n\treturn alphas.max(1)",
"def getNextHighest(self):\r\n maxScore = -1\r\n idx = -1\r\n for i, s in enumerate(self.scores):\r\n if s.score > maxScore:\r\n maxScore = s.score\r\n idx = i\r\n if idx != -1:\r\n score = self.scores[idx]\r\n del self.scores[idx]\r\n return score\r\n else:\r\n return None"
] | [
"0.6973724",
"0.6712545",
"0.66970956",
"0.65469986",
"0.62083495",
"0.6188585",
"0.6188585",
"0.6188585",
"0.6188585",
"0.6188585",
"0.6188585",
"0.6130807",
"0.6107099",
"0.6070922",
"0.6058072",
"0.6015375",
"0.59714365",
"0.59499085",
"0.5919791",
"0.5918699",
"0.58809555",
"0.58807445",
"0.58589506",
"0.5858778",
"0.58501565",
"0.5846064",
"0.58434445",
"0.58335245",
"0.5815386",
"0.5814076"
] | 0.7869971 | 0 |
move particle with a random gaussian | def __move(particle, motion):
particle[0] += random.gauss(0.0, motion)
particle[1] += random.gauss(0.0, motion)
return particle | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutate(self):\n #mutation_size = max(1,int(round(random.gauss(15,4))))/100\n\n\n\n mutation_size = max(1,int(round(random.gauss(15,4))))/100\n \"\"\"\n Changed the mutation by using random.randint rather than the gaussian one \n after observing that the gaussian random never really gave an output of more than 0.25\n \"\"\"\n\n #Decide what will be mutated, just randomly picking onr of the three params\n mutation_type = random.choice(self.params)\n\n #Mutate the thing\n if mutation_type == \"diameter\":\n \"\"\"\n Over here, what we are providing a range between self.diameter*x where x=1-mutation size and self.diameter*y where =1+mutation size\n Basically we add or subtract from 1 because the mutation has to be small\n \"\"\"\n self.diameter = max(1,random.randint(int(self.diameter*(1-mutation_size)),int(self.diameter*(1+mutation_size))))\n return self.diameter\n #same thing here\n elif mutation_type == \"pos\":\n x = max(0,random.randint(int(self.pos.x*(1-mutation_size)),int(self.pos.x*(1+mutation_size))))\n y = max(0,random.randint(int(self.pos.y*(1-mutation_size)),int(self.pos.y*(1+mutation_size))))\n self.pos = Point(min(x,self.size[0]),min(y,self.size[1]))\n return self.pos\n elif mutation_type == \"color\":\n r = min(max(0,random.randint(int(self.color.r*(1-mutation_size)),int(self.color.r*(1+mutation_size)))),255)\n g = min(max(0,random.randint(int(self.color.g*(1-mutation_size)),int(self.color.g*(1+mutation_size)))),255)\n b = min(max(0,random.randint(int(self.color.b*(1-mutation_size)),int(self.color.b*(1+mutation_size)))),255)\n self.color = Color(r,g,b)\n return self.color",
"def throw(self):\n\n self.vx = (2 * random.random()) - 1\n self.vy = (4 * random.random()) + 4",
"def moveRandomly(sigma, Walkers):\n ###For each walker, we want to move their coordinate by an amount determined\n ###by a gaussian (np.random.normal might be useful here). Where should the gaussian be centered? Use sigma\n ###for width scaling.",
"def move_rand_particle(particles, lj_pot):\n\n # Make a copy of arrays because Python\n particles_new = particles.copy()\n lj_pot_new = lj_pot.copy()\n\n # Pick a random particle and move it slightly\n pIdx = np.random.randint(n_particles)\n particles_new[pIdx, :] += dL * np.random.rand(3)\n dx = dx_particles(particles_new, pIdx)\n\n dU = 0.0\n for i in range(pIdx):\n rsq = np.dot(dx[i, :], dx[i, :])\n lj_pot_new[i, pIdx] = compute_lj_potential(rsq, rc ** 2)\n dU = lj_pot_new[i, pIdx] - lj_pot[i, pIdx]\n for i in range(pIdx + 1, n_particles):\n rsq = np.dot(dx[i, :], dx[i, :])\n lj_pot_new[i, pIdx] = compute_lj_potential(rsq, rc ** 2)\n dU = lj_pot_new[i, pIdx] - lj_pot[i, pIdx]\n\n return particles_new, lj_pot_new, dU",
"def step_particles(particle,self):\n\n self.models[particle].step()\n\n self.states[particle] = (self.models[particle].agents2state()\n\n + np.random.normal(0, self.particle_std**2, \n\n size=self.states[particle].shape))\n\n self.models[particle].state2agents(self.states[particle])\n\n return self.models[particle], self.states[particle]",
"def throw(target, sigma):\n \n x = random.gauss(target[0], sigma)\n y = random.gauss(target[1], sigma)\n\n dart_pos = np.array([x, y])\n \n return dart_pos",
"def sample(self):\n # CITATION corrected by : https://github.com/l5shi/Multi-DDPG-with-parameter-noise/blob/master/Multi_DDPG_with_parameter_noise.ipynb\n x = self.x_prev\n dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.sqrt(\n self.dt\n ) * np.random.normal(size=self.mu.shape)\n self.x_prev = x + dx\n return self.x_prev",
"def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1",
"def metropolis_step(self, positions):\n \"\"\"with brute-force sampling of new positions.\"\"\"\n\n # r = random.random()*random.choice((-1, 1))\n # r is a random number drawn from the uniform prob. dist. in [0,1]\n r = np.zeros(self.num_d)\n for i in range(self.num_d):\n r[i] = np.random.uniform(-1, 1)\n # Pick a random particle\n random_index = np.random.randint(0, high=len(positions))\n new_positions = np.array(positions)\n new_random_position = new_positions[random_index, :]\n # Suggest a new move\n new_positions[random_index, :] = new_random_position + r*self.delta_R\n # Old system and wavefunction\n wavefunction = self.w.wavefunction(positions)\n old_wavefunction_squared = wavefunction**2\n\n # Test the new position with a new system and wavefunction\n # sys_test = System(self.num_p, self.num_d)\n # sys_test.positions_distances(new_positions)\n # alpha = self.w.alpha\n # beta = self.w.beta\n # a = self.w.a\n # wave_test = Wavefunction(self.num_p, self.num_d, alpha, beta, a, sys_test)\n # test_wavefunction = wave_test.wavefunction(new_positions)\n test_wavefunction = self.w.wavefunction(new_positions)\n\n new_wavefunction_squared = test_wavefunction**2\n # print ('Old = ', positions)\n\n if new_wavefunction_squared <= 1e-14:\n pass\n else:\n # acceptance_ratio = self.w.wavefunction_ratio(positions,\n # new_positions)\n acceptance_ratio = new_wavefunction_squared/old_wavefunction_squared\n epsilon = np.random.sample()\n\n if acceptance_ratio > epsilon:\n positions = new_positions\n # print ('New = ', positions)\n # self.s.distances_update(positions, random_index)\n # self.s.positions_distances(new_positions)\n self.c += 1.0\n\n else:\n pass\n\n return positions",
"def advance_generation(self):\n # Todo: implement\n for particle in self.particles:\n if particle.value > particle.best_value:\n particle.best_position = particle.x\n particle.best_value = particle.value\n rp = random.uniform(0.0, 1.0)\n rg = random.uniform(0.0, 1.0)\n particle.v = self.w * particle.v + self.phip * rp * (particle.best_position - particle.x) + self.phig * rg * (self.get_best_position() - particle.x)\n particle.x = particle.x + particle.v\n particle.evaluated = False",
"def _move_particle(self, ip):\n # RANDOM WALK\n # ADVANCE ONLY THE PARTICLES THAT ARE \"ON\" (i.e. ABOVE STTHR).\n #\n particle = self.particles[ip] # get particle\n props = [\"state\", \"type\", \"x\", \"y\", \"ux\", \"vy\", \"factor\", \"tmem\"]\n state, pType, x, y, ux, vy, factor, tmem = particle.get_from_keys(props)\n if state > STTHR and pType == 1:\n DU = -(ux - UXM)*2.0*TFREQ*self.DT + CLANG*self.SQRTDT*normal()\n DV = -(vy - VYM)*2.0*TFREQ*self.DT + CLANG*self.SQRTDT*normal()\n UXP = ux + DU\n VYP = vy + DV\n XP = x + UXP*self.DT*factor\n YP = y + VYP*self.DT*factor\n STP = state*np.exp(-self.DT/tmem)\n particle.update(ux=UXP, vy=VYP, x=XP, y=YP, state=STP)\n elif (state > STTHR) and pType == 2:\n DU = ULAM*normal()\n DV = ULAM*normal()\n XP = x + DU*self.DT\n YP = y + DV*self.DT\n STP = state*np.exp(-self.DT/ TMEMRAD)\n particle.update(x=XP, y=YP, state=STP)\n if x > self.grid.XMAX - self.grid.DX:\n particle.update(x=self.grid.XMAX - self.grid.DX, state=0.)\n elif x < self.grid.XMIN + self.grid.DX:\n particle.update(x=self.grid.XMIN + self.grid.DX, state=0.)\n if y > self.grid.YMAX - self.grid.DY:\n particle.update(y=self.grid.YMAX - self.grid.DY, state=0.)\n elif y < self.grid.YMIN + self.grid.DY:\n particle.update(y=self.grid.YMIN + self.grid.DY, state=0.)",
"def move_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point",
"def distribute_Gaussian(self):\n\n sigma_x = np.sqrt(self.emitx*self._betax)\n sigma_xp = np.sqrt(self.emitx*self._gammax)\n\n sigma_y = np.sqrt(self.emity*self._betay)\n sigma_yp = np.sqrt(self.emity*self._gammay)\n\n self.particles[:,0] = np.random.randn(self.npart)*sigma_x #set x-coordinates\n self.particles[:,1] = np.random.randn(self.npart)*sigma_xp #set xp-coordinates\n self.particles[:,2] = np.random.randn(self.npart)*sigma_y #set y-coordinates\n self.particles[:,3] = np.random.randn(self.npart)*sigma_yp #set yp-coordinates",
"def _sample_epislon(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_epislon = self.epislon\n \n # modify the feature ownership matrix\n self.epislon = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.epislon = old_epislon",
"def _randomVelocity(self):\n\t\treturn random.choice([-1, 1]) * random.randint(10, 50)",
"def mutate(vector, deviation=MUTATE_DEVIATION):\n for i in range(VECTOR_SIZE):\n # Each value is sampled from gaussian distribution where the previous value is the mean:\n vector[i] = random.gauss(vector[i], deviation)",
"def mutate(x, mu, sigma):\n y = x.deepcopy()\n str_x = str(int(y.position))\n # flag = np.random.rand(*x.position.shape) <= mu\n # ind = np.argwhere(flag)\n # y.position[ind] += sigma*np.random.rand(*ind.shape)\n flag = np.random.rand(*(len(str_x),)) <= mu\n ind = np.argwhere(flag)\n str_y = \"0\"\n for i in ind:\n val = float(str_x[i[0]])\n val += sigma * np.random.uniform(0, 1, 1)\n if i[0] == 0:\n str_y = \"\".join((\"\", \"{}\".format(int(val)), str_x[1:]))\n else:\n str_y = \"\".join((str_x[:i[0] - 1], \"{}\".format(int(val)), str_x[i[0]:]))\n str_x = str_y\n y.position = int(str_y)\n return y",
"def bigaussian(\n n_particles: int,\n mean: Tuple[float, float, float, float, float],\n geometric_emittance_h: float,\n geometric_emittance_v: float,\n sigma_p: float,\n) -> np.array:\n cov = np.diag(\n (\n geometric_emittance_h,\n geometric_emittance_h,\n geometric_emittance_v,\n geometric_emittance_v,\n sigma_p ** 2,\n )\n )\n return np.random.multivariate_normal(mean, cov, n_particles).T",
"def PSO(m, n, alpha1, alpha2, omega, lower_limit, upper_limit, iterations, fitness, o):\n\n ##initialize particles. Each row is one particle.\n f_g = np.Inf\n x = np.random.uniform(lower_limit, upper_limit, (n, m))\n v = np.zeros(x.shape)\n f_p = fitness(x, o)\n\n # p vector is the personal best vector\n p = x\n # g vector is a single vectot that is the global best\n g = x[np.argmin(f_p)]\n\n track = []\n time_track = []\n \n for i in range(iterations):\n time_first = time.time()\n f_i = fitness(x, o)\n\n # Update personal bests\n cond = f_i < f_p\n p[cond] = x[cond]\n f_p[cond] = f_i[cond]\n\n # update global best (all time)\n if np.min(f_p) < f_g:\n f_g = np.min(f_p)\n g = g = x[np.argmin(f_p)]\n\n # compute velocity\n v = omega*v + alpha1*np.random.uniform(0, 1, (n, m))*(p - x) + \\\n alpha2*np.random.uniform(0, 1, (n, m))*(g - x)\n\n # update positions \n x = x + v\n \n track.append([f_g])\n time_track.append(time.time() - time_first)\n\n time_track = np.array(time_track)\n # print(\"mean_time: {:.2e}\".format(np.mean(time_track)))\n # print(\"mean_time: {:.2e}\".format(np.std(time_track)))\n \n return np.resize(np.array(track), (3000,))",
"def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background",
"def random_glove_generator(emb_mean, emb_stddev):\n x = np.random.normal(loc=0.0, scale=1.0, size=len(emb_mean))\n x_rand = np.multiply(x, emb_stddev) + emb_mean\n return x_rand",
"def propose(x, jump = 0.1):\n\treturn (x[0] + random.gauss(0, jump), x[1] + random.gauss(0, jump))",
"def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1",
"def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)",
"def random_position():\n pos = np.random.randn(3)\n pos[2] = 0\n return pos",
"def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100",
"def sample(self):\n x = self.state\n# dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state",
"def update_position(self):\n new_position = []\n for i in range(self.num_literals):\n r = random()\n position_i = 1 if r < self.sigmoid(self.velocity[i]) else 0\n new_position.append(position_i)\n self.position = new_position",
"def __init__(self):\n #random.uniform(1, 5) = random float values from 1-5 which will determine the velocity \n self.dx = random.uniform(1, 5)\n self.dy = random.uniform(1, 5)",
"def mutation(self):\n\n for r in range(self.pop_num*3, 5): # Mutation.\n for w in range(0,self.length): \n if random.random()<0.2: \n self.par_and_sons[r].A[w] = self.par_and_sons[r].A[w] + np.random.randint(-20, 20) # Offset + -20 pixels."
] | [
"0.6503776",
"0.63839597",
"0.63701814",
"0.63592017",
"0.62733364",
"0.6221267",
"0.6208915",
"0.62070596",
"0.61650854",
"0.6142163",
"0.6131444",
"0.6105582",
"0.60798484",
"0.607984",
"0.6073047",
"0.60154426",
"0.60147464",
"0.60069877",
"0.5957154",
"0.59550226",
"0.59444344",
"0.59425426",
"0.59412724",
"0.5939836",
"0.5937778",
"0.591496",
"0.5900153",
"0.589583",
"0.588893",
"0.58839774"
] | 0.7620571 | 0 |
measures the angle between the particle and the robot | def __measurement(particle_pos, robot_pos):
return np.rad2deg(
math.atan2(particle_pos[1] - robot_pos[1],
particle_pos[0] - robot_pos[0])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle(self) -> float:\n ...",
"def _joint_angle_control(self):\n\n error = self.target_pos - self.robot_arm_pos\n return self._pd_control(error) + self.torque",
"def angle(self) -> int:",
"def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)",
"def getAngle(self):\n return self.vector.angle",
"def angle(self):\n return 0",
"def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360",
"def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))",
"def get_rotationalAngularPosition(self, t): # returns [rad]\n angle = self.theta0 + self.rotationalAngularVelocity * t # angular position [rad]\n return angle",
"def avl_angle(self):\n dif_height = (self.heights[5] - self.heights[7])\n dif_position = (self.positions[0][7] - self.positions[0][5])\n angle = atan(dif_height / dif_position) / 1.5 * 180 / pi\n return angle",
"def getAngle(p1, p2, p3):\n\tv1 = p1 - p2\n\tv2 = p3 - p2\n\tmag = la.norm(v1) * la.norm(v2)\n\tc = np.dot(v1, v2) / mag\n\tcross = np.cross(v1,v2)\n\ts = la.norm(cross)/mag\n\tatang = math.atan2(s,c)\n\tang = atang * 180 / math.pi\n\treturn ang",
"def angle(self):\n return atan2(self.v.y, self.v.x)",
"def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)",
"def update_angle(self, mouse):\n offset = (mouse[1]-self.player.rect.centery, mouse[0]-self.player.rect.centerx)\n self.angle = degrees(atan2(*offset))\n print(\"angle:\", self.angle)",
"def do_azangle(self):\n angle_1, angle_2 = cbp.potentiometer.main()\n current_angle = angle_2\n #print(current_angle)\n self.azangle = current_angle\n return current_angle",
"def motor_angles(self):\n return np.asarray(self._robot_state.position)",
"def angle(self) -> float:\n return self._angle",
"def angle(self) -> float:\n return self._angle",
"def angle(self, angle: int, time: int = 0, /) -> None:",
"def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)",
"def angle(self):\r\n return self.model.angle",
"def angle(self):\n if self.__trigger == gyro_trigger_mode.GET_ANGLE_TRIGGER_READ:\n self.read_and_update_angle()\n return self.__angle",
"def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))",
"def getAngle(self):\n return self.articulateEncoder.getDistance()+self.angleOffset",
"def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle",
"def angle( nt1, nt2, nt3 ):\n if vector(nt1, nt2) == [0,0]:\n print(\"nt1\", nt1.seqpos, \" at \", nt1.x, nt1.y, \" is at the same position as nt2\", nt2.seqpos)\n if vector(nt2, nt3) == [0,0]:\n print(\"nt2\", nt2.seqpos, \" at \", nt2.x, nt2.y, \" is at the same position as nt3\", nt3.seqpos)\n #print(vector(nt1, nt2), vector(nt2, nt3))\n if vectors_close(vector(nt1, nt2), vector(nt2, nt3)):\n # These vectors are identical and that is messing with the ability to call two things parallel?\n return 180.0\n return 180.0 - math.degrees(math.acos(dot(vector(nt1, nt2), vector(nt2, nt3)) / (mod(vector(nt1, nt2)) * mod(vector(nt2, nt3)))))",
"def get_angle(self):\n return self.bot_client.send_command(_Command.GetAngle)",
"def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle",
"def theta(self):\n return float(np.arctan2(self.y, self.x))",
"def deltaAngle(x, y):\n return math.atan2(math.sin(x-y), math.cos(x-y))"
] | [
"0.728576",
"0.7053665",
"0.6753263",
"0.673359",
"0.66854227",
"0.66108036",
"0.6571537",
"0.64781874",
"0.64476633",
"0.6414931",
"0.6412715",
"0.6409492",
"0.6391302",
"0.63797784",
"0.6360373",
"0.6325254",
"0.6285672",
"0.6285672",
"0.6236196",
"0.6235026",
"0.62317914",
"0.6230336",
"0.6225495",
"0.61815536",
"0.6171766",
"0.6168167",
"0.6154032",
"0.6142318",
"0.61289746",
"0.6128114"
] | 0.78197676 | 0 |
Calculate the yearly load factor for every fueltype by dividing the yearly average load by the peak hourly load in a year. Arguments | def calc_lf_y(fuel_yh, average_fuel_yd):
# Calculate average yearly fuel per fueltype
average_load_y = np.average(average_fuel_yd, axis=1)
# Calculate maximum hour in every day of a year
max_load_h_days = np.max(fuel_yh, axis=2)
max_load_h = np.max(max_load_h_days, axis=1)
# Caclualte yearly load factor for every fueltype
with np.errstate(divide='ignore', invalid='ignore'):
load_factor_y = (average_load_y / max_load_h) * 100 #convert to percentage
load_factor_y[np.isnan(load_factor_y)] = 0
return load_factor_y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]",
"def calc_base_year_data(base_year_vehicles_df):\n pass",
"def load_factor_d(self, data):\n lf_d = np.zeros((data['nr_of_fueltypes']))\n\n # Get day with maximum demand (in percentage of year)\n peak_d_demand = self.fuels_peak_d\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for k, fueldata in enumerate(self.rs_fuels_tot_enduses_d):\n average_demand = np.sum(fueldata) / 365 # Averae_demand = yearly demand / nr of days\n\n if average_demand != 0:\n lf_d[k] = average_demand / peak_d_demand[k] # Calculate load factor\n\n lf_d = lf_d * 100 # Convert load factor to %\n\n return lf_d",
"def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]\n #~ print 'self.average_load',self.average_load",
"def load_factor_h_non_peak(self, data):\n load_factor_h = np.zeros((data['nr_of_fueltypes'], 1)) # Initialise array to store fuel\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for fueltype, fueldata in enumerate(self.fuels_tot_enduses_h):\n\n '''all_hours = []\n for day_hours in self.fuels_tot_enduses_h[fueltype]:\n for h in day_hours:\n all_hours.append(h)\n maximum_h_of_day_in_year = max(all_hours)\n '''\n maximum_h_of_day_in_year = self.rs_fuels_peak_h[fueltype]\n\n average_demand_h = np.sum(fueldata) / (365 * 24) # Averae load = yearly demand / nr of days\n\n # If there is a maximum day hour\n if maximum_h_of_day_in_year != 0:\n load_factor_h[fueltype] = average_demand_h / maximum_h_of_day_in_year # Calculate load factor\n\n # Convert load factor to %\n load_factor_h *= 100\n\n return load_factor_h",
"def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)",
"def estimate_year_data(self, years, frequency):\n data_year = self.price.index.year.unique()\n no_data_year = {pd.Period(year) for year in years} - {pd.Period(year) for year in data_year} # which years do we not have data for\n\n if len(no_data_year) > 0:\n for yr in no_data_year:\n source_year = pd.Period(max(data_year))\n\n source_data = self.price[self.price.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.energy_growth, source_year, yr, frequency)\n self.price = pd.concat([self.price, new_data], sort=True) # add to existing\n\n source_data = self.p_regu[self.p_regu.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency)\n self.p_regu = pd.concat([self.p_regu, new_data], sort=True) # add to existing\n\n source_data = self.p_regd[self.p_regd.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency)\n self.p_regd = pd.concat([self.p_regd, new_data], sort=True) # add to existing",
"def load_factor_d_non_peak(self, data):\n lf_d = np.zeros((data['nr_of_fueltypes']))\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for k, fueldata in enumerate(self.rs_fuels_tot_enduses_d):\n\n average_demand = sum(fueldata) / 365 # Averae_demand = yearly demand / nr of days\n max_demand_d = max(fueldata)\n\n if max_demand_d != 0:\n lf_d[k] = average_demand / max_demand_d # Calculate load factor\n\n lf_d = lf_d * 100 # Convert load factor to %\n\n return lf_d",
"def get_yearly_energies_prices(\n energies,\n years_range,\n is_collective=False,\n ref_year=2002,\n scenario=\"normal\",\n price_variation_func=calculate_price_variation,\n):\n energies_data = Energy.objects.filter(identifier__in=energies)\n yearly_datum = list(\n YearlyEnergyPrice.objects.filter(\n energy__in=energies_data, year__range=years_range\n ).order_by(\"year\")\n )\n\n series = []\n for energy_data in energies_data:\n ratio = energy_data.price_multi_unit_discount if is_collective else 1\n yearly_energy_data = [x for x in yearly_datum if x.energy_id == energy_data.pk]\n series.append(\n {\n \"identifier\": energy_data.identifier,\n \"priceVariation\": price_variation_func(\n energy_data, years_range, ref_year, yearly_energy_data\n ),\n \"yearlyPrices\": [multiply(x.price, ratio) for x in yearly_energy_data],\n }\n )\n\n return series",
"def calc_load_factor_h(data, fuels_tot_enduses_h, rs_fuels_peak_h):\n load_factor_h = np.zeros((data['nr_of_fueltypes']))\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for fueltype, fuels in enumerate(fuels_tot_enduses_h):\n\n # Maximum fuel of an hour of the peak day\n maximum_h_of_day = rs_fuels_peak_h[fueltype]\n\n #Calculate average in full year\n average_demand_h = np.mean(fuels)\n\n # If there is a maximum day hour\n if maximum_h_of_day != 0:\n load_factor_h[fueltype] = average_demand_h / maximum_h_of_day # Calculate load factor\n\n # Convert load factor to %\n load_factor_h *= 100\n\n return load_factor_h",
"def load_shifting(\n fuel_yh,\n mode_constrained,\n param_lf_improved_cy,\n make_all_flat=False\n ):\n if param_lf_improved_cy == 0:\n pass # no load management\n else:\n\n # Calculate average for every day\n if mode_constrained:\n average_fuel_yd = np.average(fuel_yh, axis=1)\n else:\n average_fuel_yd = np.average(fuel_yh, axis=2)\n\n # Calculate load factors (only inter_day load shifting as for now)\n loadfactor_yd_cy = lf.calc_lf_d(\n fuel_yh, average_fuel_yd, mode_constrained)\n\n # Calculate current year load factors\n lf_improved_cy = calc_lf_improvement(\n param_lf_improved_cy,\n loadfactor_yd_cy)\n\n fuel_yh = lf.peak_shaving_max_min(\n lf_improved_cy,\n average_fuel_yd,\n fuel_yh,\n mode_constrained)\n\n # -------------------------------------------------\n # Convert all load profiles into flat load profiles\n # -------------------------------------------------\n if make_all_flat:\n if mode_constrained:\n sum_fueltypes_days = np.sum(fuel_yh) #sum over all hours\n average_fueltype = sum_fueltypes_days / 8760 # Average\n fuel_yh_empty = np.ones((fuel_yh.shape))\n fuel_yh = fuel_yh_empty * average_fueltype\n else:\n sum_fueltypes_days_h = np.sum(fuel_yh, 2) #sum over all hours\n sum_fueltypes_days = np.sum(sum_fueltypes_days_h, 1) #sum over all days\n average_fueltype = sum_fueltypes_days / 8760 #Average per fueltype\n fuel_yh_empty = np.ones((fuel_yh.shape))\n fuel_yh = fuel_yh_empty * average_fueltype[:, np.newaxis, np.newaxis]\n\n return fuel_yh",
"def year_emissions_intensity_rule(_m, y):\r\n\r\n return m.YEAR_EMISSIONS[y] / m.YEAR_DEMAND[y]",
"def run_year(self, year):\n pass",
"def max_drawdown_cal_year(self) -> float:\n return float(self.tsdf.groupby([self.tsdf.index.year]).apply(\n lambda x: (x / x.expanding(min_periods=1).max()).min() - 1).min())",
"def calc_lf_d(fuel_yh, average_fuel_yd, mode_constrained):\n # Get maximum hours in every day\n if mode_constrained:\n max_load_yd = np.max(fuel_yh, axis=1) #single fueltype\n else:\n max_load_yd = np.max(fuel_yh, axis=2) #multiple fueltypes\n\n # Unable local RuntimeWarning: divide by zero encountered\n with np.errstate(divide='ignore', invalid='ignore'):\n\n #convert to percentage\n daily_lf = (average_fuel_yd / max_load_yd) * 100\n\n # Replace by zero\n daily_lf[np.isinf(daily_lf)] = 0\n daily_lf[np.isnan(daily_lf)] = 0\n\n return daily_lf",
"def calc_perc_reducts():\n #Load RCP2.6 datq\n cubes = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')\n #Get the surface and high level SO2 emissions\n surf_cube = cubes[3][:,0]\n high_cube = cubes[1][:,0]\n cubes = iris.cube.CubeList([surf_cube,high_cube])\n\n for i in range(0,len(cubes)):\n #Add the year and month to the cube and extract for 2009 onwards\n iris.coord_categorisation.add_year(cubes[i],'time',name='year')\n iris.coord_categorisation.add_month(cubes[i],'time',name='month')\n cubes[i] = cubes[i].extract(iris.Constraint(year = lambda y: y >=2009))\n\n #Make the year-on-year reduction rates\n yoy_rates = []\n for cube in cubes:\n #Calculate the global mean timeseries\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n area_weights = iris.analysis.cartography.area_weights(cube)\n cube_mean = cube.collapsed(['latitude','longitude'],iris.analysis.MEAN,weights=area_weights)\n\n cube_rates = np.ones((cube_mean.shape))\n #Loop over the months and calculate the changes from the previous year\n #Calculate the year on year proportional changes in the global mean\n for i in range(12,cube_mean.shape[0]):\n cube_rates[i] = cube_mean[i].data / cube_mean[(i-12)].data\n\n yoy_rates.append(cube_rates)\n\n return yoy_rates",
"def get_fuel_price(state_abbr, fuel_type=\"NG\", year=False):\n\n if(not year):\n\n year = UpdateParams.today.year\n\n if fuel_type.upper() == \"NG\":\n\n series_ID = \"NG.N3035\" + state_abbr + \"3.A\"\n \n series_USA = \"NG.RNGWHHD.A\"\n \n series_LA = UpdateParams.api.data_by_series(series=\"NG.N3035\" + \"LA\" + \"3.A\")\n \n dict_key_LA = list(series_LA.keys())[0]\n\n elif fuel_type.upper() == \"COAL\":\n\n series_ID = \"COAL.COST.\" + state_abbr + \"-10.A\"\n\n series_USA = \"COAL.COST.US-10.A\"\n\n elif fuel_type.upper() == \"PETRO\":\n # state level wholesale/resale price data ends 2011\n series_ID = \"PET.EMA_EPPR_PWA_S\" + state_abbr + \"_DPG.A\"\n\n series_USA = \"PET.EMA_EPPR_PWG_NUS_DPG.A\"\n\n else:\n raise AssertionError(\"Please input a valid fuel_type\")\n\n fuel_series_USA = UpdateParams.api.data_by_series(series=series_USA)\n \n dict_key_USA = list(fuel_series_USA.keys())[0]\n \n # find latest USA value\n i = 0\n\n while True:\n \n try:\n fp_USA = fuel_series_USA[dict_key_USA][str(year-i) + \" \"] / 1.0\n\n break\n\n except:\n \n i += 1\n\n # Check if state-level available, if not return USA price\n try:\n fuel_series = UpdateParams.api.data_by_series(series=series_ID)\n\n dict_key = list(fuel_series.keys())[0]\n\n # if fuel price in state is empty return national price\n if all(v is None for v in list(fuel_series[dict_key].values())):\n \n return (fp_USA, year-i)\n \n except KeyError:\n \n return (fp_USA, year-i)\n\n j = 0\n\n # find latest year for state\n while True:\n\n try:\n fp_state = fuel_series[dict_key][str(year-j) + \" \"] / 1.0\n\n break\n\n except:\n\n j += 1\n \n if fuel_type.upper() == \"NG\":\n # series_LA is just the actual series not a series ID\n fp_mult = fp_state / series_LA[dict_key_LA][str(year-j) + \" \"]\n return (fp_mult * fp_USA/1.037, year-j)\n \n # return USA value if 2 years more recent vs state\n if ((year-i) - (year-j) >= 2) | (fp_state >= fp_USA):\n \n return (fp_USA/1.037, year-i)\n\n return (fp_state, year-j)",
"def year_cost_rule(_m, y):\r\n\r\n return sum(m.RHO[y, s] * m.SCEN[y, s] for s in m.S)",
"def getagefromyear(year=None):\n if year is None:\n print(\"Please enter the year to assign class to them\")\n try:\n t = datetime.datetime.today()\n b = datetime.datetime.strptime(str(year), '%Y')\n a = (t - b).days / 365\n a = int(a)\n if (a < 10) or (a > 80):\n a = None\n except:\n a = None\n return a",
"def load_factor(dh: DataHandler):\n\n # Can be easily defined recursively (only 'full_load_hours' if then saves but no big deal)\n return full_load_hours(dh) / 8760",
"def year_demand_rule(_m, y):\r\n\r\n return sum(m.SCENARIO_DEMAND[y, s] for s in m.S)",
"def get_average_emission_factors(prefix: str, year: int):\n genavg = pd.read_csv(\n outputs_folder(f\"{prefix}annual_generation_averages_by_fuel_{year}.csv\"),\n index_col=\"fuel_category\",\n )\n efs = {}\n for pol in POLLUTANTS:\n efs[pol] = {}\n for adjustment in ADJUSTMENTS:\n efs[pol][adjustment] = {}\n for fuel in SRC:\n column = get_rate_column(pol, adjustment, generated=True)\n if FUEL_TYPE_MAP[fuel] not in genavg.index:\n logger.warning(\n f\"fuel {FUEL_TYPE_MAP[fuel]} not found in file annual_generation_averages_by_fuel_{year}.csv, using average\"\n )\n efs[pol][adjustment][fuel] = genavg.loc[\"total\", column]\n else:\n efs[pol][adjustment][fuel] = genavg.loc[FUEL_TYPE_MAP[fuel], column]\n return efs",
"def calc_lf_season(seasons, fuel_region_yh, average_fuel_yd):\n seasons_lfs = {}\n for season, yeardays_modelled in seasons.items():\n\n average_fuel_yd_full_year = np.average(\n average_fuel_yd[:, ],\n axis=1)\n\n # Calculate maximum hour in year\n max_load_h_days_season = np.max(\n fuel_region_yh[:, yeardays_modelled],\n axis=2)\n\n max_load_h_season = np.max(max_load_h_days_season, axis=1)\n\n # Unable local RuntimeWarning: divide by zero encountered\n with np.errstate(divide='ignore', invalid='ignore'):\n\n #convert to percentage\n season_lf = (average_fuel_yd_full_year / max_load_h_season) * 100\n\n # Replace\n season_lf[np.isinf(season_lf)] = 0\n season_lf[np.isnan(season_lf)] = 0\n\n seasons_lfs[season] = season_lf\n\n return seasons_lfs",
"def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")",
"def periods_in_a_year(self) -> float:\n return self.length / self.yearfrac",
"def calc_yearly_mean(yy_dly, x_dly):\n return calc_yearly(yy_dly, x_dly, np.mean)",
"def getLoadData(self):\n\n\t\t# Variables\n\t\turl = 'http://mis.nyiso.com/public/dss/nyiso_loads.csv' # Url with the data\n\t\tresponse = urllib2.urlopen(url) # Reading url\n\t\tload_data = csv.reader(response) # Convering data to csv format\n\t\tyear = self.helper.getYear() # Current Year\n\t\thourly_loads = [] # Stores the loads per hour\n\t\tdaily_loads = {} # Stores the loads per hour of a given day\n\t\tmonthly_loads = {} # Stores the loads per day of a given month\n\t\tyearly_loads = {} # Stores the monthly loads in a year\n\n\t\t# Converting data from csv to dictionary\n\t\tfor row in load_data:\n\n\t\t\t# Ignoring first row\n\t\t\tif row[1] != \"Month\" and row[2] != \"Day\" and row[3] != 'Hr1':\n\t\t\t\tmonth = int(row[1])\n\t\t\t\tday = int(row[2])\n\n\t\t\t\t# Getting hourly loads\n\t\t\t\tfor i in range(3,27):\n\t\t\t\t\ttry:\n\t\t\t\t\t\thourly_loads.append(int(row[i]))\n\t\t\t\t\t# If there is an error reading the load then generate a \n\t\t\t\t\t# random load value between 15000 and 25000\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tpass\n\t\t\t\t\t\thourly_loads.append((randint(15000,25000)))\n\t\t\t\tdaily_loads[day] = hourly_loads\n\t\t\t\thourly_loads = []\n\t\t\t\tmonthly_loads[month] = daily_loads\n\t\t\t\tif self.helper.isEndOfMonth(month, day):\n\t\t\t\t\tdaily_loads = {}\n\n\t\tyearly_loads[year] = monthly_loads\n\n\t\treturn yearly_loads",
"def austral_year_daily(x, y):\n if isinstance(x, xr.DataArray):\n x = x.values\n \n jfmamj = x < 182.\n jasond = x >= 182.\n \n x_jasond = []\n y_jasond = []\n if any(jasond):\n x_jasond = x[jasond] - 181\n y_jasond = y[jasond]\n\n x_jfmamj = []\n y_jfmamj = []\n if any(jfmamj):\n x_jfmamj = x[jfmamj] + 184\n y_jfmamj = y[jfmamj]\n\n xout = np.concatenate([xi for xi in [x_jasond, x_jfmamj] if len(xi)])\n yout = np.concatenate([yi for yi in [y_jasond, y_jfmamj] if len(yi)])\n \n return xout, yout",
"def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex",
"def get_max_fp(state_abbr, fuel_type=\"NG\", year=False):\n \n if(not year):\n\n year = UpdateParams.today.year\n\n if fuel_type.upper() == \"NG\":\n\n series_ID = \"NG.N3035\" + state_abbr + \"3.A\"\n\n elif fuel_type.upper() == \"COAL\":\n\n series_ID = \"COAL.COST.\" + state_abbr + \"-10.A\"\n\n elif fuel_type.upper() == \"PETRO\":\n\n series_ID = \"PET.EMA_EPPR_PWA_S\" + state_abbr + \"_DPG.A\"\n\n else:\n raise AssertionError(\"Please input a valid fuel_type\")\n \n # Check if state-level available, if not return USA price\n try:\n fuel_series = UpdateParams.api.data_by_series(series=series_ID)\n\n dict_key = list(fuel_series.keys())[0]\n\n # if fuel price in state is empty return national price\n if all(v is None for v in list(fuel_series[dict_key].values())):\n \n return 0.0\n \n except KeyError:\n \n return 0.0\n \n j = 0\n \n while True:\n\n try:\n return fuel_series[dict_key][str(year-j) + \" \"] / 1.0\n\n break\n\n except:\n\n j += 1"
] | [
"0.6781552",
"0.65222615",
"0.64230746",
"0.62575626",
"0.6094622",
"0.5996252",
"0.5939554",
"0.580923",
"0.57862556",
"0.57364595",
"0.57141274",
"0.5677293",
"0.56053805",
"0.553133",
"0.5470846",
"0.54136276",
"0.53950626",
"0.53443474",
"0.53425664",
"0.5330679",
"0.5271293",
"0.5267775",
"0.52655226",
"0.52360094",
"0.52161264",
"0.52100587",
"0.51986665",
"0.51734656",
"0.5172417",
"0.517172"
] | 0.6630462 | 1 |
tempfilt, coeffs, temp_sed, pz = readEazyBinary(MAIN_OUTPUT_FILE='photz', \ OUTPUT_DIRECTORY='./OUTPUT', \ CACHE_FILE = 'Same') Read Eazy BINARY_OUTPUTS files into structure data. If the BINARY_OUTPUTS files are not in './OUTPUT', provide either a relative or absolute path in the OUTPUT_DIRECTORY keyword. By default assumes that CACHE_FILE is MAIN_OUTPUT_FILE+'.tempfilt'. Specify the full filename if otherwise. | def readEazyBinary(MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY='./OUTPUT', CACHE_FILE='Same'):
#root='COSMOS/OUTPUT/cat3.4_default_lines_zp33sspNoU'
root = OUTPUT_DIRECTORY+'/'+MAIN_OUTPUT_FILE
###### .tempfilt
if CACHE_FILE == 'Same':
CACHE_FILE = root+'.tempfilt'
if os.path.exists(CACHE_FILE) is False:
print('File, %s, not found.' %(CACHE_FILE))
return -1,-1,-1,-1
f = open(CACHE_FILE,'rb')
s = np.fromfile(file=f,dtype=np.int32, count=4)
NFILT=s[0]
NTEMP=s[1]
NZ=s[2]
NOBJ=s[3]
tempfilt = np.fromfile(file=f,dtype=np.double,count=NFILT*NTEMP*NZ).reshape((NZ,NTEMP,NFILT)).transpose()
lc = np.fromfile(file=f,dtype=np.double,count=NFILT)
zgrid = np.fromfile(file=f,dtype=np.double,count=NZ)
fnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()
efnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()
f.close()
tempfilt = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\
'tempfilt':tempfilt,'lc':lc,'zgrid':zgrid,'fnu':fnu,'efnu':efnu}
###### .coeff
f = open(root+'.coeff','rb')
s = np.fromfile(file=f,dtype=np.int32, count=4)
NFILT=s[0]
NTEMP=s[1]
NZ=s[2]
NOBJ=s[3]
coeffs = np.fromfile(file=f,dtype=np.double,count=NTEMP*NOBJ).reshape((NOBJ,NTEMP)).transpose()
izbest = np.fromfile(file=f,dtype=np.int32,count=NOBJ)
tnorm = np.fromfile(file=f,dtype=np.double,count=NTEMP)
f.close()
coeffs = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\
'coeffs':coeffs,'izbest':izbest,'tnorm':tnorm}
###### .temp_sed
f = open(root+'.temp_sed','rb')
s = np.fromfile(file=f,dtype=np.int32, count=3)
NTEMP=s[0]
NTEMPL=s[1]
NZ=s[2]
templam = np.fromfile(file=f,dtype=np.double,count=NTEMPL)
temp_seds = np.fromfile(file=f,dtype=np.double,count=NTEMPL*NTEMP).reshape((NTEMP,NTEMPL)).transpose()
da = np.fromfile(file=f,dtype=np.double,count=NZ)
db = np.fromfile(file=f,dtype=np.double,count=NZ)
f.close()
temp_sed = {'NTEMP':NTEMP,'NTEMPL':NTEMPL,'NZ':NZ,\
'templam':templam,'temp_seds':temp_seds,'da':da,'db':db}
###### .pz
if os.path.exists(root+'.pz'):
f = open(root+'.pz','rb')
s = np.fromfile(file=f,dtype=np.int32, count=2)
NZ=s[0]
NOBJ=s[1]
chi2fit = np.fromfile(file=f,dtype=np.double,count=NZ*NOBJ).reshape((NOBJ,NZ)).transpose()
### This will break if APPLY_PRIOR No
s = np.fromfile(file=f,dtype=np.int32, count=1)
if len(s) > 0:
NK = s[0]
kbins = np.fromfile(file=f,dtype=np.double,count=NK)
priorzk = np.fromfile(file=f, dtype=np.double, count=NZ*NK).reshape((NK,NZ)).transpose()
kidx = np.fromfile(file=f,dtype=np.int32,count=NOBJ)
pz = {'NZ':NZ,'NOBJ':NOBJ,'NK':NK, 'chi2fit':chi2fit, 'kbins':kbins, 'priorzk':priorzk,'kidx':kidx}
else:
pz = None
f.close()
else:
pz = None
if False:
f = open(root+'.zbin','rb')
s = np.fromfile(file=f,dtype=np.int32, count=1)
NOBJ=s[0]
z_a = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_p = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_m1 = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_m2 = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_peak = np.fromfile(file=f,dtype=np.double,count=NOBJ)
f.close()
###### Done.
return tempfilt, coeffs, temp_sed, pz | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readtempfilt(MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY='./OUTPUT'):\n\n root = os.path.join(OUTPUT_DIRECTORY, MAIN_OUTPUT_FILE)\n \n ###### .tempfilt\n file_path = root+'.tempfilt'\n \n if os.path.exists(file_path) is False:\n raise ValueError('File, %s, not found.' %(file_path))\n\n with open(file_path,'rb') as f:\n # summary data\n s = np.fromfile(file=f,dtype=np.int32, count=4)\n NFILT=s[0] # number of filters\n NTEMP=s[1] # number of templates\n NZ=s[2] # number points on the redshift grid\n NOBJ=s[3] # number of objects\n # (?) template SED convolved with filter transmission at each redshift\n tempfilt = np.fromfile(file=f,dtype=np.double,count=NFILT*NTEMP*NZ).reshape((NZ,NTEMP,NFILT)).transpose()\n # filter pivot wavelengths\n lc = np.fromfile(file=f,dtype=np.double,count=NFILT)\n # redshift grid\n zgrid = np.fromfile(file=f,dtype=np.double,count=NZ)\n # observed flux\n fnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()\n # (?) error in observed flux\n efnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()\n \n tempfilt = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\\\n 'tempfilt':tempfilt,'lc':lc,'zgrid':zgrid,'fnu':fnu,'efnu':efnu}\n\n return tempfilt",
"def main():\n print \"=\" * 78\n print \"%s %s\" % (__prog_name__, __version__)\n debug, input_file_names = check_cli()\n if not input_file_names:\n _error(\"No input file name found!\\n\\n%s\" % __help__)\n for input_file_name in input_file_names:\n print \"* Reading\", input_file_name\n file_base_name = os.path.splitext(os.path.basename(input_file_name))[0]\n file_dir_name = os.path.dirname(input_file_name)\n sections = {}\n tex_map = {}\n with open(input_file_name, 'rU') as in_fd:\n sections = get_sections(in_fd.read())\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"sec\",\n {\"sections\": sections})\n\n if not sections:\n _error(\"Nothing could be read from '%s'.\\nIs this an Oolite .dat file?\" \\\n % input_file_name)\n\n # Magically call the 'check' functions\n for name in sections.keys():\n f_name = \"check_%s\" % name.lower()\n if f_name in globals().keys():\n if not globals()[f_name](sections):\n _error(\"Number of entries in '%s' section is different as declared!\" % name)\n\n def get_data(name, sections=sections):\n \"\"\"Returns the 'data' object from the 'name' one found in the\n 'sections' one.\n :sections: dictionary: Object returned by 'get_sections'.\n :name: string: The name of the section to get the 'data'.\n Returns a list of 'lines'.\n \"\"\"\n return sections.get(name, {}).get(\"data\", [])\n\n oti_file_name = build_file_path(file_dir_name, file_base_name, \"oti\")\n tex_map = parse_names(get_data(\"NAMES\"), oti_file_name)\n\n tex_refs, tex_lines_out = parse_textures(get_data(\"TEXTURES\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"tex\",\n {\"tex_refs\": tex_refs,\n \"tex_lines_out\": tex_lines_out})\n\n # Update the tex_map object if textures indexes and names are both\n # used in 'TEXTURES'.\n if sorted(tex_map.keys()) != sorted(tex_refs.get(\"named\").keys()):\n tex_map = update_tex_map(tex_map,\n set(tex_refs[\"named\"].keys()).difference(tex_map.keys()))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"txm\",\n {\"tex_map\": tex_map})\n\n n_verts, vertex_lines_out = parse_vertex(get_data(\"VERTEX\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"ver\",\n {\"n_verts\": n_verts,\n \"vertex_lines_out\": vertex_lines_out})\n\n n_normals, normals_lines_out = parse_normals(get_data(\"NORMALS\"))\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"nor\",\n {\"n_normals\": n_normals,\n \"normals_lines_out\": normals_lines_out})\n\n n_faces, faces_groups = parse_faces(get_data(\"FACES\"), tex_refs,\n normals_lines_out)\n\n if debug:\n write_dump_file(file_dir_name, file_base_name, \"fac\",\n {\"n_faces\": n_faces,\n \"faces_groups\": faces_groups})\n\n output_file_name = build_file_path(file_dir_name,\n file_base_name, 'obj')\n material_file_name = build_file_path(file_dir_name,\n file_base_name, 'mtl')\n mtl_lib_file = os.path.basename(material_file_name)\n\n write_obj(output_file_name, file_base_name, mtl_lib_file,\n tex_lines_out, tex_map, n_verts, vertex_lines_out,\n n_normals, normals_lines_out, n_faces, faces_groups)\n\n write_mtl(material_file_name, tex_map)\n\n _exit(\"* Done\")",
"def main():\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)",
"def get_files_io():\n if GC.conf['general']['training']:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'train.txt'),\n 'new': os.path.join(COOKED_DATA, 'train_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'train_norm.txt'),\n 'manu': os.path.join(RAW_DATA, 'others', 'temp_updt_manu.txt'),\n 'labels': os.path.join(TRAIN_DATA, 'train_norm.txt_labels.pkl'),\n 'segll': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TRAIN_DATA, 'train_norm.txt_seginf_deeplog.pkl'),\n 'struct': os.path.join(TRAIN_DATA, 'train_norm.txt_structured.csv'),\n 'output': TRAIN_DATA\n }\n else:\n files_zip = {\n 'raw': os.path.join(COOKED_DATA, 'test.txt'),\n 'new': os.path.join(COOKED_DATA, 'test_new.txt'),\n 'norm': os.path.join(COOKED_DATA, 'test_norm.txt'),\n 'labels': os.path.join(TEST_DATA, 'test_norm.txt_labels.pkl'),\n 'segll': os.path.join(TEST_DATA, 'test_norm.txt_seginf_loglab.pkl'),\n 'segdl': os.path.join(TEST_DATA, 'test_norm.txt_seginf_deeplog.pkl'),\n 'map_norm_raw': os.path.join(TEST_DATA, 'map_norm_raw.pkl'),\n 'map_norm_rcv': os.path.join(TEST_DATA, 'map_norm_rcv.pkl'),\n 'norm_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt'),\n 'struct': os.path.join(TEST_DATA, 'test_norm.txt_structured.csv'),\n 'struct_rcv': os.path.join(TEST_DATA, 'test_norm_rcv.txt_structured.csv'),\n 'top': os.path.join(TEST_DATA, 'analysis_summary_top.txt'),\n 'sum': os.path.join(TEST_DATA, 'analysis_summary.csv'),\n 'rst_llab': os.path.join(TEST_DATA, 'results_loglab.csv'),\n 'rst_dlog': os.path.join(TEST_DATA, 'results_deeplog.txt'),\n 'rst_llzr': os.path.join(TEST_DATA, 'results_loglizer.csv'),\n 'dbg': os.path.join(TEST_DATA, 'debug.csv'),\n 'output': TEST_DATA\n }\n return files_zip",
"def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict",
"def load_result_01(outputfile):\n results_dir='.'\n extension = os.path.splitext(outputfile)[1]\n summary={}\n try:\n\n if extension == '.npy':\n data=np.load( os.path.join(results_dir,str(outputfile)))\n if 'summary' in data:\n summary=data['summary'].item()\n else:\n summary={'X_evol':[data]}\n if extension == '.npz':\n data=np.load( os.path.join(results_dir,str(outputfile)))\n summary=data['summary'].item()\n if extension == '.mat':\n summary=loadmat(os.path.join(results_dir,str(outputfile)))\n if extension == '.json':\n with open(os.path.join(results_dir,str(outputfile)),'r') as result:\n summary=dict(json.load(result))\n summary['X_evol']=[np.array(X) for X in summary['X_evol']]\n summary['initialized_curve']=np.array(summary['initialized_curve'])\n except:\n summary={}\n return summary",
"def create_tempPDB(outputs):\n temp_dir = outputs + \"temp/\"\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n\n files_list = []\n models_list = os.listdir(outputs)\n for file in models_list:\n if file.endswith(\".pdb\") or file.endswith(\".B\"):\n files_list.append(file)\n\n for element in files_list:\n pdb = open(outputs + element, 'r')\n fo = open(temp_dir + \"mod_\" + element, \"w\")\n for line in pdb:\n if line.startswith('ATOM'):\n line = line.strip()\n search = re.search(r'[A-Z]*\\s*\\d{1,5}\\s*[A-Z]{1,3}[0-9]?\\'?\\s*([A-Z]{1,3}).*', line)\n residue = search.group(1)\n if residue in utilities.nucleic_list:\n continue\n else:\n fo.write(\"%s\\n\" % (line))\n fo.close()\n pdb.close()\n return temp_dir",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')",
"def _infer_yz(self, init_y, init_z, output_file):\n cur_y = init_y\n cur_z = init_z\n\n a_time = time()\n self.auto_save_sample(sample = (cur_y, cur_z))\n for i in xrange(self.niter):\n temp_cur_y = self._infer_y(cur_y, cur_z)\n temp_cur_y, temp_cur_z = self._infer_z(temp_cur_y, cur_z)\n #self._sample_lam(cur_y, cur_z)\n\n if self.record_best:\n if self.auto_save_sample(sample = (temp_cur_y, temp_cur_z)):\n cur_y, cur_z = temp_cur_y, temp_cur_z\n if self.no_improvement():\n break \n \n elif i >= self.burnin:\n cur_y, cur_z = temp_cur_y, temp_cur_z\n self.samples['z'].append(cur_z)\n self.samples['y'].append(cur_y)\n\n self.total_time += time() - a_time\n return self.gpu_time, self.total_time, None",
"def make_default_inputs(filtered_dot_file):\n# w_kernel = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dog-gabor-weights-3.p\", \"rb\" ))\n# w_kernel = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dog-gabor-weights-2.p\", \"rb\" ))\n# w_kernel = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dog-gabor-weights-4.p\", \"rb\" ))\n w_kernel = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dog-gabor-weights-new-2.p\", \"rb\" ))\n filtered = pickle.load(open(filtered_dot_file, \"rb\" ))\n \n n_inputs = 200\n centres = np.random.randn(n_inputs,2)\n mag = np.sum(centres**2, axis=1)**(1/2)\n centres = 155 * centres.T / mag\n centres = centres * np.random.rand(n_inputs)**(1/2)\n centres = np.round(centres).astype('int')\n centres = centres + 199 #put in image centre\n \n inputs = get_inputs(w_kernel, filtered, centres.T)\n \n pickle.dump(inputs, open(filtered_dot_file.replace('-filt.p', '-inputs.p'), \"wb\" ))",
"def convert( core ):\n print \"In the hycom_binary.convert routine\"\n\n args = core.variables\n print \"arg list = \", args\n\n afile = core.data\n # replace the .a with .b and save as 'bfile'\n bfile = str.replace( core.data, \".a\", \".b\" )\n with open( bfile, \"r\" ) as b:\n with open( afile, \"rb\" ) as a:\n lines = b.readlines()\n # This assumes idm and jdm are always\n # on lines 7 and 8 respectively\n idm = int(lines[7].split()[0])\n jdm = int(lines[8].split()[0])\n # read the last k value\n kdm = int(lines[-1].split()[4])\n # number of bytes in array\n n = idm*jdm\n numbytes = n*4 \n # each rec is padded to a multiple of 4096\n pad = (numbytes/4096+1)*4096-numbytes \n print \"numbytes = \", numbytes\n print \"pad = \", pad\n end = \">\" # Big endian\n form = \"f\" # unpack as ieee 32 bit floats\n vars = {} # store all the variables in a dictionary of lists\n lastRead = 0\n for lineno,line in enumerate( lines[10:] ):\n line = line.split()\n varName = line[0].strip()\n print \"varName = \", varName\n if varName in args:\n print varName, \"is in args!\"\n print \"lineno = \", lineno\n print \"lastRead = \", lastRead\n a.read( (lineno-lastRead)*(numbytes+pad) ) # skip through unwanted data\n array = core.struct.unpack(end+form*n, a.read( numbytes )) # read data\n a.read( pad )\n lastRead = lineno+1 # save the last line read for skipping future lines\n if varName in vars:\n # Append this array to the list of arrays (this makes it easier to\n # convert all the arrays into a 3 dimensional list later on)\n print varName, \" is in vars!\"\n\n # Do some preliinary error checking\n filtered = filter( lambda x: x<1e30, array ) # remove nans\n if abs(min(filtered)-float(line[6].strip())) > 1e-6:\n sys.exit(\"ERROR: The data's min is not equal to the .b file's min\")\n if abs(max(filtered)-float(line[7].strip())) > 1e-6:\n sys.exit(\"ERROR: The data's max is not equal to the .b file's max\")\n if len(vars[varName])+1 != int( line[4].strip() ):\n sys.exit(\"ERROR: Level of this array is out of sequence. Missed a record\")\n \n vars[varName].append( core.np.array( array ) )\n\n else:\n # Else add a new element to the dictionary\n print \"Adding new element \", varName, \" to vars!\"\n vars[varName] = [ core.np.array( array ) ]\n \n print \"vars.keys()[1] = \", vars.keys()\n print \"len( vars.values()[1] ) = \", len(vars.values()[1])\n\n # Convert to vtk now \n # Make mesh...\n \n nX = idm\n nY = jdm\n nZ = kdm \n conn = []\n pts = []\n rad = []\n cntr = 0\n for k in range(nZ):\n for j in range(nY):\n for i in range(nX):\n #print \"pt%d = (%d,%d,%d)\" % (cntr,i,j,k)\n cntr += 1\n pts.extend([ i, j, k ])\n rad.append( 0 )\n if k < nZ-1 and j < nY-1 and i < nX-1:\n pt1 = k*(nX*nY) + j*nX + i;\n pt2 = k*(nX*nY) + j*nX + i+1;\n pt3 = k*(nX*nY) + (j+1)*nX + i+1;\n pt4 = k*(nX*nY) + (j+1)*nX + i;\n pt5 = (k+1)*(nX*nY) + j*nX + i;\n pt6 = (k+1)*(nX*nY) + j*nX + i+1;\n pt7 = (k+1)*(nX*nY) + (j+1)*nX + i+1;\n pt8 = (k+1)*(nX*nY) + (j+1)*nX + i;\n\n conn.append([ \"hexahedron\", pt1, pt2, pt3, pt4, pt5, pt6, pt7, pt8 ])\n \n# variables = []\n# for name,lst in vars.iteritems():\n# fullArray = core.np.ndarray( (kdm,jdm,idm) )\n# print \"fullArray.shape = \", fullArray.shape\n# print \"fullArray[0].shape = \", fullArray[0].shape\n# print \"fullArray[:].shape = \", fullArray[:].shape\n# print \"fullArray[:][0].shape = \", fullArray[:][0].shape\n# print \"fullArray[:][:][0].shape = \", fullArray[:][:][0].shape\n# print \"fullArray.ndim = \", fullArray.ndim\n# print \"fullArray.size = \", fullArray.size\n# for lvl,array in enumerate(lst):\n# print \"Level = \", lvl\n# print \"array.shape = \", array.shape\n# print \"array.ndim = \", array.ndim\n# print \"array.size = \", array.size\n# tmp = array.reshape( jdm,idm )\n# print \"tmp.shape = \", tmp.shape\n# print \"tmp.ndim = \", tmp.ndim\n# print \"tmp.size = \", tmp.size\n# fullArray[lvl] = array.reshape( jdm,idm )\n# variables.append( (name, 1, 1, fullArray.tolist()) )\n\n##\n# for time in xrange( totalTime ):\n# # Data arrays\n# u_wind = dataVars['uwind_stress'][:][time].tolist()\n# v_wind = dataVars['vwind_stress'][:][time].tolist()\n# wind = []\n# for i in xrange( numberOfElements ):\n# wind.append( u_wind[i] )\n# wind.append( v_wind[i] )\n# wind.append( 0.0 )\n#\n# # Create the variables such as vectors (velocity) and scalars (temperature/salinity)\n# vars = [(\"wind\", 3, 0, wind), (\"u_wind\", 1, 0, u_wind ), (\"v_wind\", 1, 0, v_wind)]\n##\n \n variables = core.np.zeros( len(pts) )\n print \"variables.shape = \", variables.shape\n print \"variables.ndim = \", variables.ndim\n print \"variables.size = \", variables.size\n\n print \"len(pts) = \", len(pts)\n print \"len(pts)/3 = \", len(pts)/3\n \n var_datum = [ \"radius\", 1, 1, variables.tolist() ]\n vars = [ var_datum ]\n outfile = core.output + \".vtk\"\n core.vw.WriteUnstructuredMesh(outfile, 0, pts, conn, vars)",
"def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return",
"def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)",
"def test_extract_pytorch_statedict(self):\n\t\t\n\t\t\n\t\tmodel = models.resnet18().state_dict()\n\t\tmodel_name = \"resnet18\"\n \n\t\tlayer_names = model.keys()\n\t\texpected_layer_names = [name for name in layer_names if 'weight' in name or 'bias' in name]\n\t\texpected_num_files = len(expected_layer_names)\t\n\t\t\n\t\t# there are 18 real layers with weights\n\t\tlayer_weightfiles = [name for name in layer_names if 'weight' in name and 'bn' not in name and 'downsample' not in name ]\t\n\t\texpected_num_weightfiles = 18\n\t\tactual_num_weightfiles = (len(layer_weightfiles))\n\t\tself.assertEqual(expected_num_weightfiles,actual_num_weightfiles)\n\n\n\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as model_dir:\n\t\t\tprint(f\"using {model_dir} as model_dir\")\n\t\t\tself.assertTrue(model_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as weights_dir:\n\t\t\t\tprint(f\"using {weights_dir} as weights_dir\")\n\t\t\t\tself.assertTrue(weights_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\t\tstate_dict_filename = os.path.join(model_dir, \"pys.bin\")\n\t\t\t\ttorch.save(model, state_dict_filename)\n\t\t\t\t\n\t\t\t\tww.WeightWatcher.extract_pytorch_statedict_(weights_dir, model_name, state_dict_filename, format=MODEL_FILE_FORMATS.PYTORCH)\n\t\t\t\n\t\t\t\tweightfiles = [f for f in listdir(weights_dir) if isfile(join(weights_dir, f))]\t\n\t\t\t\tactual_num_files = len(weightfiles)\n\t\t\t\tself.assertEqual(expected_num_files,actual_num_files)\t\t\t\t\n\t\t\t\t\n\t\t\t\t# test that we can read the files ?\t\n\t\t\t\tfor filename in weightfiles:\n\t\t\t\t\tW = np.load(os.path.join(weights_dir,filename))\n\t\t\t\t\tself.assertIsNotNone(W)\n\t\t\t\n\t\t\t\t\t\t\n\t\tself.assertFalse(os.path.isdir(model_dir))\n\t\tself.assertFalse(os.path.isdir(weights_dir))\n\t\t\n\t\treturn",
"def main(args=None):\n if args is None:\n parser = create_parser()\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n input_module = input_mapping[args.input_reader]\n output_module = output_mapping[args.output_format]\n\n templates = []\n # Load templates from external folder if set.\n if args.template_folder:\n templates += read_templates(os.path.abspath(args.template_folder))\n\n # Load internal templates, if not disabled.\n if not args.exclude_built_in_templates:\n templates += read_templates()\n output = []\n for f in args.input_files:\n res = extract_data(f.name, templates=templates, input_module=input_module)\n if res:\n logger.info(res)\n output.append(res)\n if args.copy:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.copyfile(f.name, join(args.copy, filename))\n if args.move:\n filename = args.filename.format(\n date=res['date'].strftime('%Y-%m-%d'),\n invoice_number=res['invoice_number'],\n desc=res['desc'],\n )\n shutil.move(f.name, join(args.move, filename))\n f.close()\n\n if output_module is not None:\n output_module.write_to_file(output, args.output_name, args.output_date_format)",
"def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)",
"def photometry(userinputs, image, catalog, outputname, apertures, annulus='', dannulus='', recenter=False):\n logging.info('Running photometry function on {}'.format(image))\n logging.info('Using {}px apertures'.format(apertures))\n\n #set directory\n target_dir = userinputs['OUTDIR']\n\n #Update passed names to be full paths if they are not\n\n if len(image.split('/'))==1:\n logging.info('Looking for {} in {}.'.format(image,userinputs['DATA']))\n image = glob.glob(userinputs['DATA'] + '/' + image)\n if len(image)==0:\n logging.critical('No {} image found'.format(image))\n filemanagement.shutdown('Selected image does not exist',userinputs)\n else:\n image = image[0]\n logging.debug('Using image: {}'.format(image))\n\n if len(catalog.split('/'))==1:\n catalog = target_dir + '/init/' + catalog\n logging.debug('Input catalog: {}'.format(catalog))\n\n if len(outputname.split('/'))==1:\n output = target_dir + '/photometry/' + outputname\n logging.debug('Output name: {}'.format(output))\n else:\n output = outputname\n outputname = outputname.split('/')[-1]\n logging.debug('Output name: {}'.format(output))\n\n\n #Load zeropoints\n inst_zp, filter_zp, zp_zp = np.loadtxt(target_dir + '/init/Hi-PEEC_zeropoints.tab', unpack=True, dtype='str')\n # print inst_zp, filter_zp, zp_zp\n # Get filter from header\n filter = get_filter(image)\n\n\n # Set the necessary variables for photometry on the reference image\n exptime = fits.getheader(image)['EXPTIME']\n logging.debug('Exposure time from header: {}'.format(exptime))\n inst = fits.getheader(image)['INSTRUME']\n logging.debug('Intrument from header: {}'.format(inst))\n inst = inst.lower()\n\n\n match = (inst_zp == inst) & (filter_zp == filter.lower())\n zp = zp_zp[match]\n\n # zp is a string within an array, so need to turn into a float\n try:\n zp = float(zp[0])\n #If that cannot be done there was no match.\n except IndexError:\n if inst == 'acs':\n logging.debug('Zeropoint not found in file, passing to ACS calculation')\n zp = ACS_zeropoint(image)\n elif inst == 'wfc3':\n logging.debug('Zeropoint not found in file, passing to WFC3 calculation')\n zp = WFC3_zeropoint(image)\n else:\n logging.critical('No matching zeropoint found. Quitting.')\n logging.debug('No zeropoint match found for filter {} with instrument {}'\\\n .format(filter,inst))\n logging.debug('Available filters in zeropoint file : {} for instrument {}'\\\n .format(filter_zp, inst_zp))\n filemanagement.shutdown('No zeropoint was found for filter: {}'.format(filter),userinputs)\n\n logging.debug('Zeropoint from file: {}'.format(zp))\n # Remove output file if it already exists\n filemanagement.remove_if_exists(output)\n\n\n # Run photometry\n #--------------------------------------------------------------------------\n # Set up IRAF params:\n iraf.datapars.epadu = exptime\n\n # !!!!!!!!!!!!!!!!!\n # Only center on reference frame\n if recenter:\n iraf.centerpars.calgorithm = 'centroid'\n else:\n iraf.centerpars.calgorithm = 'none'\n # !!!!!!!!!!!!!!!\n # CHANGE BACKGROUND ESTIMATE IN ANNULUS TO MODE\n\n # Select the annulus depending on whether it is overwritten in the function call or not\n if annulus == '':\n iraf.fitskypars.annulus = userinputs['ANNULUS']\n logging.debug('Using annulus from inputfile ({}px)'.format(userinputs['ANNULUS']))\n else:\n iraf.fitskypars.annulus = annulus\n logging.debug('Using user specified annulus ({}px)'.format(annulus))\n if dannulus == '':\n iraf.fitskypars.dannulus = userinputs['D_ANNULUS']\n logging.debug('Using annulus width from inputfile ({}px)'.format(userinputs['D_ANNULUS']))\n else:\n iraf.fitskypars.dannulus = dannulus\n logging.debug('Using user specified annulus width ({}px)'.format(dannulus))\n\n iraf.photpars.apertures = apertures\n logging.debug('Using aperture(s) of {}px'.format(apertures))\n iraf.photpars.zmag = zp\n logging.debug('Setting zeropoint to {}'.format(zp))\n\n # Do phot\n iraf.phot(image+'[SCI]', catalog, output)\n #--------------------------------------------------------------------------\n\n\n #Depending on the number of apertures used, different methods of saving the\n # results are required\n #--------------------------------------------------------------------------\n\n naper = len(apertures.split(','))\n logging.debug('Number of apertures used {}'.format(naper))\n\n #final output filename\n fullcat_mag_short = target_dir + '/photometry/short_' + outputname\n\n if naper > 1:\n # Removes all outputlines that do not contain the character '*'\n # ensures only phot results are kept\n cmd = 'grep \"*\" ' + output + ' > ' + fullcat_mag_short\n os.system(cmd)\n\n # Replace INDEFS:\n cmd = 'sed -i.bak \"s/INDEF/99.999/g\" ' + fullcat_mag_short\n os.system(cmd)\n\n # Remove .bak files to prevent confusion\n bak_fullcat = fullcat_mag_short + '.bak'\n os.remove(bak_fullcat)\n\n\n else:\n #Dump results into a temp file\n temp = target_dir + '/photometry/phot_dump.mag'\n filemanagement.remove_if_exists(temp)\n iraf.txdump(output, 'XCENTER,YCENTER,FLUX,MAG,MERR,MSKY,ID', 'yes', Stdout = temp)\n\n # Set placeholders for sources outside of FOV and undetected sources\n # For outside of FOV, use 66.666 instead of INDEF\n # For undetected sources, use 99.999 instead of INDEF\n\n # Sources outside of FOV have exactly zero flux\n x, y, flux, mag, merr, msky, id = np.loadtxt(temp, unpack = True,\n dtype = str)\n\n flux = flux.astype(float)\n\n out_fov = (flux == 0.)\n logging.debug('Number of sources outside FOV: {}'.format(len(out_fov)))\n\n mag[out_fov] = 66.666\n merr[out_fov] = 66.666\n msky[out_fov] = 66.666\n\n # Undetected sources, those with negative flux or fluxes so small that mag err\n # is INDEF\n neg_flux = (flux < 0.)\n tiny_flux = (flux > 0.) & (merr == 'INDEF')\n\n mag[neg_flux] = 99.999\n merr[neg_flux] = 99.999\n msky[neg_flux] = 99.999\n\n merr[tiny_flux] = 99.999\n msky[tiny_flux] = 99.999\n\n logging.debug('Nr of undetected sources: {}'.format(len(tiny_flux)+len(neg_flux)))\n # Save results to new file\n x = x.astype(float)\n y = y.astype(float)\n mag = mag.astype(float)\n merr = merr.astype(float)\n msky = msky.astype(float)\n id = id.astype(int)\n\n zip_phot = zip(x, y, mag, merr, msky, id)\n\n np.savetxt(fullcat_mag_short, zip_phot,\n fmt = '%.3f %.3f %.3f %.3f %.9f %i')\n\n #--------------------------------------------------------------------------\n\n return fullcat_mag_short",
"def unpack(self, filter_file_type=\".dat\", verbose=False):\n\n if hasattr(self, \"phot\"):\n filter_names = np.unique(self.phot[\"filter\"])\n\n self.phot.add_index('filter', unique = True)\n\n\n for filter_name in filter_names:\n\n phot_table = self.phot.loc[\"filter\", filter_name]\n filter_filename = filter_name + filter_file_type\n if verbose: print(filter_filename)\n if verbose: print(phot_table)\n if verbose: print(type(filter_name), type(filter_file_type))\n\n # phot_table.meta = {\"filter_filename\": filter_filename}\n phot_table.meta[\"filter_filename\"] = filter_filename\n if not isinstance(phot_table, Row):\n # if len(np.unique(self.phot.loc[\"filter\", filter_name][\"MJD\"])) > 1:\n indices = phot_table.argsort(\"MJD\")\n # for column_name in phot_table.colnames:\n # phot_table[column_name] = phot_table[column_name][indices]\n sorted_phot_table = Table([phot_table[column_name][indices] for column_name in phot_table.colnames])\n else:\n sorted_phot_table = phot_table\n\n filter_key = np.unique(phot_table[\"filter\"])[0]\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != filter_name:\n raise errors.FilterMismatchError(\"There is a more than one filterdata in here! or there is a mismatch with filename\")\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n\n # def load_filter(path, cmap = False, verbose = False):\n #\n if utils.check_file_path(os.path.abspath(path_to_filter)):\n filter_object = FilterClass()\n filter_object.read_filter_file(os.path.abspath(path_to_filter), verbose = verbose)\n filter_object.calculate_AB_zp()\n else:\n warnings.warn(\"Couldn't load the filter\")\n\n self.data_filters[filter_key] = filter_object\n\n self.data[filter_name] = sorted_phot_table\n\n self.filter_names = filter_names\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n\n pass",
"def extract_metadata():\n\n create_output(ARGS.out)\n index = pre.pixz.read_index(ARGS.traffic)\n\n try:\n tmp = tempfile.mkdtemp(prefix=\"ictf2017_cache_\")\n print(\"Using temporary cache for extracted files at {}\".format(tmp))\n\n file_indexes = [i for i in range(len(index))\n if (i >= ARGS.start and i <= ARGS.stop)]\n\n # a wrapper which measures execution times and calculates eta\n eta = pre.timing.ETACalculator(len(file_indexes))\n\n for count, i in enumerate(file_indexes):\n print(\"\\nProcessing index {} from [{}, {}]\"\n .format(i, min(file_indexes), max(file_indexes)))\n\n def extract_read_append_remove():\n pcapfile = pre.pixz.extract_pcap(ARGS.traffic, index[i], tmp)\n metadata = pre.pcap.read(pcapfile)\n append_output(metadata, ARGS.out)\n os.remove(pcapfile)\n\n eta.execute(count, extract_read_append_remove)\n\n finally:\n shutil.rmtree(tmp)\n print(\"Cleaned up temporary cache {}\\n\\n\".format(tmp))",
"def main():\n parser = argparse.ArgumentParser(description=\"Convert a checkpoint file into a support sets and a reconstructor \"\n \"weights files\")\n parser.add_argument('--exp', type=str, required=True, help=\"set experiment's model dir (created by `train.py`)\")\n\n # Parse given arguments\n args = parser.parse_args()\n\n # Check structure of `args.exp`\n if not osp.isdir(args.exp):\n raise NotADirectoryError(\"Invalid given directory: {}\".format(args.exp))\n models_dir = osp.join(args.exp, 'models')\n if not osp.isdir(models_dir):\n raise NotADirectoryError(\"Invalid models directory: {}\".format(models_dir))\n checkpoint_file = osp.join(models_dir, 'checkpoint.pt')\n if not osp.isfile(checkpoint_file):\n raise FileNotFoundError(\"Checkpoint file not found: {}\".format(checkpoint_file))\n\n print(\"#. Convert checkpoint file into support sets and reconstructor weight files...\")\n\n # Load checkpoint file\n checkpoint_dict = torch.load(checkpoint_file)\n\n # Get checkpoint iteration\n checkpoint_iter = checkpoint_dict['iter']\n print(\" \\\\__Checkpoint iteration: {}\".format(checkpoint_iter))\n\n # Save support sets weights file\n print(\" \\\\__Save checkpoint support sets weights file...\")\n torch.save(checkpoint_dict['support_sets'], osp.join(models_dir, 'support_sets-{}.pt'.format(checkpoint_iter)))\n\n # Save reconstructor weights file\n print(\" \\\\__Save checkpoint reconstructor weights file...\")\n torch.save(checkpoint_dict['reconstructor'], osp.join(models_dir, 'reconstructor-{}.pt'.format(checkpoint_iter)))",
"def _run(input_file_name, num_zenith_angle_bins, num_albedo_bins,\n num_shortwave_sfc_down_flux_bins, num_aod_bins, num_surface_temp_bins,\n num_longwave_sfc_down_flux_bins, num_longwave_toa_up_flux_bins,\n example_dir_name, output_dir_name):\n\n # Process input args.\n if num_zenith_angle_bins <= 0:\n num_zenith_angle_bins = None\n else:\n error_checking.assert_is_geq(num_zenith_angle_bins, 3)\n\n if num_albedo_bins <= 0:\n num_albedo_bins = None\n else:\n error_checking.assert_is_geq(num_albedo_bins, 3)\n\n if num_shortwave_sfc_down_flux_bins <= 0:\n num_shortwave_sfc_down_flux_bins = None\n else:\n error_checking.assert_is_geq(num_shortwave_sfc_down_flux_bins, 3)\n\n if num_aod_bins <= 0:\n num_aod_bins = None\n else:\n error_checking.assert_is_geq(num_aod_bins, 3)\n\n if num_surface_temp_bins <= 0:\n num_surface_temp_bins = None\n else:\n error_checking.assert_is_geq(num_surface_temp_bins, 3)\n\n if num_longwave_sfc_down_flux_bins <= 0:\n num_longwave_sfc_down_flux_bins = None\n else:\n error_checking.assert_is_geq(num_longwave_sfc_down_flux_bins, 3)\n\n if num_longwave_toa_up_flux_bins <= 0:\n num_longwave_toa_up_flux_bins = None\n else:\n error_checking.assert_is_geq(num_longwave_toa_up_flux_bins, 3)\n\n print('Reading data from: \"{0:s}\"...\\n'.format(input_file_name))\n prediction_dict = prediction_io.read_file(input_file_name)\n\n if num_zenith_angle_bins is not None:\n edge_zenith_angles_rad = numpy.linspace(\n 0, MAX_ZENITH_ANGLE_RAD, num=num_zenith_angle_bins + 1, dtype=float\n )\n min_zenith_angles_rad = edge_zenith_angles_rad[:-1]\n max_zenith_angles_rad = edge_zenith_angles_rad[1:]\n\n for k in range(num_zenith_angle_bins):\n this_prediction_dict = prediction_io.subset_by_zenith_angle(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_zenith_angle_rad=min_zenith_angles_rad[k],\n max_zenith_angle_rad=max_zenith_angles_rad[k]\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, zenith_angle_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with zenith angles {1:.4f}...{2:.4f} '\n 'rad) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_zenith_angles_rad[k], max_zenith_angles_rad[k],\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_albedo_bins is not None:\n edge_albedos = numpy.linspace(\n 0, 1, num=num_albedo_bins + 1, dtype=float\n )\n min_albedos = edge_albedos[:-1]\n max_albedos = edge_albedos[1:]\n\n # Split by albedo.\n for k in range(num_albedo_bins):\n this_prediction_dict = prediction_io.subset_by_albedo(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_albedo=min_albedos[k], max_albedo=max_albedos[k]\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, albedo_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with albedos {1:.4f}...{2:.4f}) '\n 'to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_albedos[k], max_albedos[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n # Split by month.\n for k in range(1, 13):\n this_prediction_dict = prediction_io.subset_by_month(\n prediction_dict=copy.deepcopy(prediction_dict), desired_month=k\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, month=k,\n raise_error_if_missing=False\n )\n print('Writing {0:d} examples to: \"{1:s}\"...'.format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_shortwave_sfc_down_flux_bins is not None:\n edge_fluxes_w_m02 = numpy.linspace(\n 0, MAX_SHORTWAVE_SFC_DOWN_FLUX_W_M02,\n num=num_shortwave_sfc_down_flux_bins + 1, dtype=float\n )\n min_fluxes_w_m02 = edge_fluxes_w_m02[:-1]\n max_fluxes_w_m02 = edge_fluxes_w_m02[1:]\n max_fluxes_w_m02[-1] = numpy.inf\n\n for k in range(num_shortwave_sfc_down_flux_bins):\n this_prediction_dict = (\n prediction_io.subset_by_shortwave_sfc_down_flux(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_flux_w_m02=min_fluxes_w_m02[k],\n max_flux_w_m02=max_fluxes_w_m02[k]\n )\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, shortwave_sfc_down_flux_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with shortwave surface downwelling '\n 'fluxes of {1:.4f}...{2:.4f} W m^-2) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_fluxes_w_m02[k], max_fluxes_w_m02[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_longwave_sfc_down_flux_bins is not None:\n edge_fluxes_w_m02 = numpy.linspace(\n 0, MAX_LONGWAVE_SFC_DOWN_FLUX_W_M02,\n num=num_longwave_sfc_down_flux_bins + 1, dtype=float\n )\n min_fluxes_w_m02 = edge_fluxes_w_m02[:-1]\n max_fluxes_w_m02 = edge_fluxes_w_m02[1:]\n max_fluxes_w_m02[-1] = numpy.inf\n\n for k in range(num_longwave_sfc_down_flux_bins):\n this_prediction_dict = (\n prediction_io.subset_by_longwave_sfc_down_flux(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_flux_w_m02=min_fluxes_w_m02[k],\n max_flux_w_m02=max_fluxes_w_m02[k]\n )\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, longwave_sfc_down_flux_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with longwave surface downwelling '\n 'fluxes of {1:.4f}...{2:.4f} W m^-2) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_fluxes_w_m02[k], max_fluxes_w_m02[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_longwave_toa_up_flux_bins is not None:\n edge_fluxes_w_m02 = numpy.linspace(\n 0, MAX_LONGWAVE_TOA_UP_FLUX_W_M02,\n num=num_longwave_toa_up_flux_bins + 1, dtype=float\n )\n min_fluxes_w_m02 = edge_fluxes_w_m02[:-1]\n max_fluxes_w_m02 = edge_fluxes_w_m02[1:]\n max_fluxes_w_m02[-1] = numpy.inf\n\n for k in range(num_longwave_toa_up_flux_bins):\n this_prediction_dict = (\n prediction_io.subset_by_longwave_toa_up_flux(\n prediction_dict=copy.deepcopy(prediction_dict),\n min_flux_w_m02=min_fluxes_w_m02[k],\n max_flux_w_m02=max_fluxes_w_m02[k]\n )\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, longwave_toa_up_flux_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with longwave TOA upwelling '\n 'fluxes of {1:.4f}...{2:.4f} W m^-2) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_fluxes_w_m02[k], max_fluxes_w_m02[k], this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n print('\\n')\n\n if num_aod_bins is None and num_surface_temp_bins is None:\n return\n\n valid_times_unix_sec = example_utils.parse_example_ids(\n prediction_dict[prediction_io.EXAMPLE_IDS_KEY]\n )[example_utils.VALID_TIMES_KEY]\n\n example_file_names = example_io.find_many_files(\n directory_name=example_dir_name,\n first_time_unix_sec=numpy.min(valid_times_unix_sec),\n last_time_unix_sec=numpy.max(valid_times_unix_sec),\n raise_error_if_any_missing=False\n )\n\n example_id_strings = []\n aerosol_extinction_matrix_metres01 = numpy.array([])\n height_matrix_m_agl = numpy.array([])\n surface_temps_kelvins = numpy.array([])\n\n for this_file_name in example_file_names:\n print('Reading data from: \"{0:s}\"...'.format(this_file_name))\n this_example_dict = example_io.read_file(\n netcdf_file_name=this_file_name, exclude_summit_greenland=False,\n max_shortwave_heating_k_day01=numpy.inf,\n min_longwave_heating_k_day01=-1 * numpy.inf,\n max_longwave_heating_k_day01=numpy.inf\n )\n\n example_id_strings += this_example_dict[example_utils.EXAMPLE_IDS_KEY]\n\n if num_surface_temp_bins is not None:\n these_temps_kelvins = example_utils.get_field_from_dict(\n example_dict=this_example_dict,\n field_name=example_utils.SURFACE_TEMPERATURE_NAME\n )\n surface_temps_kelvins = numpy.concatenate(\n (surface_temps_kelvins, these_temps_kelvins), axis=0\n )\n\n if num_aod_bins is None:\n continue\n\n this_extinction_matrix_metres01 = example_utils.get_field_from_dict(\n example_dict=this_example_dict,\n field_name=example_utils.AEROSOL_EXTINCTION_NAME\n )\n\n if aerosol_extinction_matrix_metres01.size == 0:\n aerosol_extinction_matrix_metres01 = (\n this_extinction_matrix_metres01 + 0.\n )\n else:\n aerosol_extinction_matrix_metres01 = numpy.concatenate((\n aerosol_extinction_matrix_metres01,\n this_extinction_matrix_metres01\n ), axis=0)\n\n if (\n example_utils.HEIGHT_NAME in\n this_example_dict[example_utils.VECTOR_PREDICTOR_NAMES_KEY]\n ):\n this_height_matrix_m_agl = example_utils.get_field_from_dict(\n example_dict=this_example_dict,\n field_name=example_utils.HEIGHT_NAME\n )\n\n if height_matrix_m_agl.size == 0:\n height_matrix_m_agl = this_height_matrix_m_agl + 0.\n else:\n height_matrix_m_agl = numpy.concatenate(\n (height_matrix_m_agl, this_height_matrix_m_agl), axis=0\n )\n else:\n if height_matrix_m_agl.size == 0:\n height_matrix_m_agl = (\n this_example_dict[example_utils.HEIGHTS_KEY] + 0.\n )\n\n desired_indices = example_utils.find_examples(\n all_id_strings=example_id_strings,\n desired_id_strings=prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n allow_missing=False\n )\n del example_id_strings\n\n if num_surface_temp_bins is not None:\n surface_temps_kelvins = surface_temps_kelvins[desired_indices]\n\n edge_temps_kelvins = numpy.linspace(\n MIN_SURFACE_TEMP_KELVINS, MAX_SURFACE_TEMP_KELVINS,\n num=num_surface_temp_bins + 1, dtype=float\n )\n min_temps_kelvins = edge_temps_kelvins[:-1]\n max_temps_kelvins = edge_temps_kelvins[1:]\n min_temps_kelvins[0] = -numpy.inf\n max_temps_kelvins[-1] = numpy.inf\n\n for k in range(num_surface_temp_bins):\n these_indices = numpy.where(numpy.logical_and(\n surface_temps_kelvins >= min_temps_kelvins[k],\n surface_temps_kelvins <= max_temps_kelvins[k]\n ))[0]\n\n this_prediction_dict = prediction_io.subset_by_index(\n prediction_dict=copy.deepcopy(prediction_dict),\n desired_indices=these_indices\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, surface_temp_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with surface temperatures of '\n '{1:.4f}...{2:.4f} K) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_temps_kelvins[k], max_temps_kelvins[k],\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )\n\n if num_aod_bins is None:\n return\n\n aerosol_extinction_matrix_metres01 = (\n aerosol_extinction_matrix_metres01[desired_indices, :]\n )\n\n if len(height_matrix_m_agl.shape) == 2:\n height_matrix_m_agl = height_matrix_m_agl[desired_indices, :]\n num_examples = aerosol_extinction_matrix_metres01.shape[0]\n aerosol_optical_depths = numpy.full(num_examples, numpy.nan)\n print('\\n')\n\n for i in range(num_examples):\n if numpy.mod(i, 1000) == 0:\n print((\n 'Have computed aerosol optical depth for {0:d} of {1:d} '\n 'profiles...'\n ).format(\n i, num_examples\n ))\n\n aerosol_optical_depths[i] = simps(\n y=aerosol_extinction_matrix_metres01[i, :],\n x=height_matrix_m_agl[i, :],\n even='avg'\n )\n\n print((\n 'Have computed aerosol optical depth for all {0:d} profiles!\\n'\n ).format(\n num_examples\n ))\n else:\n aerosol_optical_depths = simps(\n y=aerosol_extinction_matrix_metres01, x=height_matrix_m_agl,\n axis=-1, even='avg'\n )\n\n edge_aerosol_optical_depths = numpy.linspace(\n 0, MAX_AEROSOL_OPTICAL_DEPTH,\n num=num_aod_bins + 1, dtype=float\n )\n min_aerosol_optical_depths = edge_aerosol_optical_depths[:-1]\n max_aerosol_optical_depths = edge_aerosol_optical_depths[1:]\n max_aerosol_optical_depths[-1] = numpy.inf\n\n for k in range(num_aod_bins):\n these_indices = numpy.where(numpy.logical_and(\n aerosol_optical_depths >= min_aerosol_optical_depths[k],\n aerosol_optical_depths <= max_aerosol_optical_depths[k]\n ))[0]\n\n this_prediction_dict = prediction_io.subset_by_index(\n prediction_dict=copy.deepcopy(prediction_dict),\n desired_indices=these_indices\n )\n\n this_output_file_name = prediction_io.find_file(\n directory_name=output_dir_name, aerosol_optical_depth_bin=k,\n raise_error_if_missing=False\n )\n print((\n 'Writing {0:d} examples (with aerosol optical depths of '\n '{1:.4f}...{2:.4f}) to: \"{3:s}\"...'\n ).format(\n len(this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY]),\n min_aerosol_optical_depths[k],\n max_aerosol_optical_depths[k],\n this_output_file_name\n ))\n\n prediction_io.write_file(\n netcdf_file_name=this_output_file_name,\n scalar_target_matrix=\n this_prediction_dict[prediction_io.SCALAR_TARGETS_KEY],\n vector_target_matrix=\n this_prediction_dict[prediction_io.VECTOR_TARGETS_KEY],\n scalar_prediction_matrix=\n this_prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY],\n vector_prediction_matrix=\n this_prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY],\n heights_m_agl=this_prediction_dict[prediction_io.HEIGHTS_KEY],\n example_id_strings=\n this_prediction_dict[prediction_io.EXAMPLE_IDS_KEY],\n model_file_name=\n this_prediction_dict[prediction_io.MODEL_FILE_KEY],\n isotonic_model_file_name=\n this_prediction_dict[prediction_io.ISOTONIC_MODEL_FILE_KEY],\n uncertainty_calib_model_file_name=this_prediction_dict[\n prediction_io.UNCERTAINTY_CALIB_MODEL_FILE_KEY\n ],\n normalization_file_name=\n this_prediction_dict[prediction_io.NORMALIZATION_FILE_KEY]\n )",
"def readmodel(model = 'dominguez'):\n ebl_file_path = os.path.join(os.path.split(__file__)[0],'data/')\n\n if model == 'kneiske':\n file_name = join(ebl_file_path , 'ebl_nuFnu_tanja.dat')\n elif model == 'franceschini':\n file_name = join(ebl_file_path , 'ebl_franceschini.dat')\n elif model == 'dominguez':\n file_name = join(ebl_file_path , 'ebl_dominguez11.out')\n elif model == 'dominguez-upper':\n file_name = join(ebl_file_path , 'ebl_upper_uncertainties_dominguez11.out')\n elif model == 'dominguez-lower':\n file_name = join(ebl_file_path , 'ebl_lower_uncertainties_dominguez11.out')\n elif model == 'inoue':\n file_name = join(ebl_file_path , 'EBL_z_0_baseline.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_baseline.dat')\n elif model == 'inoue-low-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_low_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_low_pop3.dat')\n elif model == 'inoue-up-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_up_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_up_pop3.dat')\n elif model == 'gilmore':\n file_name = join(ebl_file_path , 'eblflux_fiducial.dat')\n elif model == 'gilmore-fixed':\n file_name = join(ebl_file_path , 'eblflux_fixed.dat')\n elif model == 'cuba':\n file_name = join(ebl_file_path , 'CUBA_UVB.dat')\n elif model == 'finke':\n file_name = join(ebl_file_path , 'ebl_modelC_Finke.txt')\n else:\n raise ValueError(\"Unknown EBL model chosen!\")\n\n data = np.loadtxt(file_name)\n if model.find('inoue') >= 0:\n z = np.array([0.])\n #z = data[0,1:]\n #nuInu = data[:,1]\n lmu = data[:,0]\n nuInu = np.array([data[:,1]]).T\n raise ValueError('Inoue models not correctly implemented at the moment, choose another model')\n\n elif model.find('gilmore') >= 0:\n z = data[0,1:]\n lmu = data[1:,0] * 1e-4 # convert from Angstrom to micro meter\n nuInu = data[1:,1:] \n nuInu[nuInu == 0.] = 1e-20 * np.ones(np.sum(nuInu == 0.))\n \n # convert from ergs/s/cm^2/Ang/sr to nW/m^2/sr\n nuInu = (nuInu.T * data[1:,0]).T * 1e4 * 1e-7 * 1e9 \n\n elif model == 'cuba':\n z = data[0,1:-1]\n lmu = data[1:,0] * 1e-4\n nuInu = data[1:,1:-1]\n\n # replace zeros by 1e-40\n idx = np.where(data[1:,1:-1] == 0.)\n nuInu[idx] = np.ones(np.sum(nuInu == 0.)) * 1e-20\n\n # in erg / cm^2 / s / sr\n nuInu = (nuInu.T * c.c.value / (lmu * 1e-6)).T \n nuInu *= 1e6 # in nW / m^2 / sr\n\n # check where lmu is not strictly increasing\n idx = np.where(np.diff(lmu) == 0.)\n for i in idx[0]:\n lmu[i+1] = (lmu[i + 2] + lmu[i]) / 2.\n\n else:\n z = data[0,1:]\n lmu = data[1:,0]\n nuInu = data[1:,1:]\n if model == 'finke': \n lmu = lmu[::-1] * 1e-4\n nuInu = nuInu[::-1]\n\n return EBL(z,lmu,nuInu, model = model)",
"def main(infile, outfile, format):\n with tempfile.TemporaryDirectory(prefix=\"oab-parse\") as output_dir:\n if infile.name.endswith(\".lzx\"):\n print(f\"Decompressing LZX file...\")\n workingfile = os.path.join(output_dir, \"udetails.oab\")\n decompress_lzx(infile.name, workingfile)\n with open(workingfile, 'rb') as workingfilehandle:\n input = BitStream(workingfilehandle.read())\n else:\n input = BitStream(infile.read())\n\n data = parse_Uncompressed_OAB_v4_Full_Details(input)\n post_process(data)\n\n if format == \"CSV\":\n fieldnames = []\n for row in data[\"Records\"]:\n for field in row.keys():\n if field not in fieldnames:\n fieldnames.append(field)\n\n writer = csv.DictWriter(outfile, fieldnames=fieldnames, lineterminator='\\n')\n\n writer.writeheader()\n for row in data[\"Records\"]:\n writer.writerow(row)\n elif format == \"HTML\":\n outfile.write(\"\"\"<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"/>\n<style>\n.tableFixHead { overflow: auto; height: 100px; }\n.tableFixHead thead th { position: sticky; top: 0; z-index: 1; }\ntable { border-collapse: collapse; width: 100%; }\nth, td { padding: 8px 16px; }\nth { background:#eee; }\ntable, td, th {\n border: 1px solid;\n}\n</style>\n</head>\n<body class=\"tableFixHead\">\"\"\")\n for row in data[\"Records\"]:\n for field in row.keys():\n if field == \"ThumbnailPhoto\":\n row[\"ThumbnailPhoto\"] = f\"<img src='data:image/jpeg;base64,{row['ThumbnailPhoto']}'/>\"\n elif field == \"UserX509Certificate\":\n row[\"UserX509Certificate\"] = f\"<a download=\\\"user.crt\\\" href=\\\"data:application/octet-stream;base64,{row['UserX509Certificate']}\\\">Download</a>\"\n elif field == \"AddressBookX509Certificate\":\n row[\"AddressBookX509Certificate\"] = f\"<a download=\\\"addressbook.crt\\\" href=\\\"data:application/octet-stream;base64,{row['AddressBookX509Certificate']}\\\">Download</a>\"\n else:\n row[field] = html.escape(str(row[field]))\n outfile.write(tabulate.tabulate(data[\"Records\"], tablefmt=\"unsafehtml\", numalign=None, stralign=None, headers=\"keys\"))\n outfile.write(\"\"\"</body></html>\"\"\")\n elif format == \"JSON\":\n json.dump(data, outfile)",
"def _cl_infer_yz(self, init_y, init_z, output_file = None):\n cur_y = init_y.astype(np.int32)\n cur_z = init_z.astype(np.int32)\n\n self.auto_save_sample(sample = (cur_y, cur_z))\n for i in xrange(self.niter):\n a_time = time()\n temp_cur_y = self._cl_infer_y(cur_y, cur_z)\n temp_cur_z = self._cl_infer_z(temp_cur_y, cur_z)\n self.gpu_time += time() - a_time\n temp_cur_y, temp_cur_z = self._cl_infer_k_new(temp_cur_y, temp_cur_z)\n\n if self.record_best:\n if self.auto_save_sample(sample = (temp_cur_y, temp_cur_z)):\n cur_y, cur_z = temp_cur_y, temp_cur_z\n if self.no_improvement(1000):\n break \n elif i >= self.burnin:\n cur_y, cur_z = temp_cur_y, temp_cur_z\n self.samples['z'].append(cur_z)\n self.samples['y'].append(cur_y)\n \n self.total_time += time() - a_time\n\n return self.gpu_time, self.total_time, None",
"def read_cache(cc):\n \n out_file = os.path.join(cc.scene_dir, 'output', cc.scene_id+'_pickle')\n if cc.atmo_src == 'narr':\n out_file += '_narr'\n elif cc.atmo_src == 'merra':\n out_file += '_merra'\n \n if not os.path.isfile(out_file):\n raise OSError('pickle_file is not in expected location %s' % out_file) \n\n with open(out_file, 'rb') as f:\n x = pickle.load(f)\n return x",
"def test_extract_safetensors_statedict(self):\n\n\n\t\tmodel = models.resnet18().state_dict()\n\t\tmodel_name = \"resnet18\"\n \n\t\tlayer_names = model.keys()\n\t\texpected_layer_names = [name for name in layer_names if 'weight' in name or 'bias' in name]\n\t\texpected_num_files = len(expected_layer_names)\t\n\t\tprint(f\"we expect {expected_num_files} files\")\n\t\t\n\t\t# there are 18 real layers with weights\n\t\tlayer_weightfiles = [name for name in layer_names if 'weight' in name and 'bn' not in name and 'downsample' not in name ]\t\n\t\texpected_num_weightfiles = 18\n\t\tactual_num_weightfiles = (len(layer_weightfiles))\n\t\tself.assertEqual(expected_num_weightfiles,actual_num_weightfiles)\n\n\n\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as model_dir:\n\t\t\tprint(f\"using {model_dir} as model_dir\")\n\t\t\tself.assertTrue(model_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\twith TemporaryDirectory(dir=TEST_TMP_DIR, prefix=\"ww_\") as weights_dir:\n\t\t\t\tprint(f\"using {weights_dir} as weights_dir\")\n\t\t\t\tself.assertTrue(weights_dir.startswith(TEST_TMP_DIR))\n\t\t\t\n\t\t\t\tstate_dict_filename = os.path.join(model_dir, \"pys.safetensors\")\n\t\t\t\tsafe_save(model, state_dict_filename)\n\t\t\t\t\n\t\t\t\t# if save is false, we get no weightfiles\n\t\t\t\tconfig = ww.WeightWatcher.extract_pytorch_statedict_(weights_dir, model_name, state_dict_filename, format=MODEL_FILE_FORMATS.SAFETENSORS, save=False)\n\t\t\t\tweightfiles = [f for f in listdir(weights_dir) if isfile(join(weights_dir, f))]\t\n\t\t\t\tactual_num_files = len(weightfiles)\n\t\t\t\tself.assertEqual(0,actual_num_files)\t\n\t\t\t\t\n\t\t\t\tprint(len(config.keys()))\n\t\t\t\t\n\t\t\t\t# is save is true, safetensors are extracted\n\t\t\t\tww.WeightWatcher.extract_pytorch_statedict_(weights_dir, model_name, state_dict_filename, format=MODEL_FILE_FORMATS.SAFETENSORS, save=True)\n\t\t\t\tweightfiles = [f for f in listdir(weights_dir) if isfile(join(weights_dir, f))]\t\n\t\t\t\tprint(weightfiles)\n\t\t\t\tactual_num_files = len(weightfiles)\n\t\t\t\tself.assertEqual(expected_num_files,actual_num_files)\t\n\t\t\t\tprint(f\"checked {actual_num_files} weightfiles\")\t\t\t\n\t\t\t\t\n\t\t\t\t# test that we can read the files ?\t\n\t\t\t\tfor filename in weightfiles:\n\t\t\t\t\tW = np.load(os.path.join(weights_dir,filename))\n\t\t\t\t\tself.assertIsNotNone(W)\n\t\t\t\n\t\t\t\t\t\t\n\t\tself.assertFalse(os.path.isdir(model_dir))\n\t\tself.assertFalse(os.path.isdir(weights_dir))\n\t\t\n\t\treturn",
"def save_equilibrator_bin_data(self, npz_file_name):\n preprocess_dict = {'cids': self.params['cids']}\n for k, v in self.params.items():\n if k.find('preprocess_') != -1:\n preprocess_dict[k.replace('preprocess_', '')] = v\n np.savez_compressed(npz_file_name, **preprocess_dict)",
"def read_model(modelfile, dictlist):\n global dxdict\n global dxlist\n global import_img\n dxdict, dxlist = {}, [] # the list is needed for fixed ordering\n mod = io.open(modelfile, 'r')\n st = next(mod)\n ### image adress is found\n while 'SCHEME_IMAGE' not in st:\n st = next(mod)\n #image_adress = st.strip().split()[-1]\n #import_img = ImageTk.PhotoImage(Image.open(image_adress).resize((496, 384), Image.ANTIALIAS))\n #scheme.configure(image = import_img)\n ### the file must contain equations for ODE between ***STATES*** and ***END*** statements\n while \"***STATES***\" not in st:\n st = next(mod)\n #\n while \"***END***\" not in st:\n st = next(mod)\n try:\n dxdict[st.split('=')[0].strip()] = st.split('=')[1].strip().strip(';')\n dxlist.append(st.split('=')[0].strip())\n except:\n continue\n ## now, add dict names to the equations\n ## also, add state names to the PREDEFINED dict\n for s in dxdict.keys():\n for d in dictlist:\n keys = d + '.keys()'\n for k in eval(keys):\n dxdict[s] = dxdict[s].replace(k, \"%(d)s['%(k)s']\" % vars())\n ##\n for i in dxdict.keys():\n for j in dxdict.keys():\n if \"Xdict['%(j)s']\" % vars() not in dxdict[i]:\n dxdict[i] = dxdict[i].replace(j, \"Xdict['%(j)s']\" % vars())\n modelprint, nstates = os.path.basename(modelfile), len(dxlist)",
"def run(self, args: List[str], outputs: List[PipelineOutput]=[], inputs: List[PipelineInput]=[]) -> Tuple[PipelineOutput]:\n\n preopen_directories=set()\n for index, input_ in enumerate(inputs):\n if input_.type == InterfaceTypes.TextFile or input_.type == InterfaceTypes.BinaryFile:\n preopen_directories.add(str(PurePosixPath(input_.data.path).parent))\n for index, output in enumerate(outputs):\n if output.type == InterfaceTypes.TextFile or output.type == InterfaceTypes.BinaryFile:\n preopen_directories.add(str(PurePosixPath(output.data.path).parent))\n preopen_directories = list(preopen_directories)\n\n ri = RunInstance(self.engine, self.linker, self.module, args, preopen_directories)\n\n for index, input_ in enumerate(inputs):\n if input_.type == InterfaceTypes.TextStream:\n data_array = input_.data.data.encode()\n array_ptr = ri.set_input_array(data_array, index, 0)\n data_json = { \"size\": len(data_array), \"data\": f\"data:application/vnd.itk.address,0:{array_ptr}\" }\n ri.set_input_json(data_json, index)\n elif input_.type == InterfaceTypes.BinaryStream:\n data_array = input_.data.data\n array_ptr = ri.set_input_array(data_array, index, 0)\n data_json = { \"size\": len(data_array), \"data\": f\"data:application/vnd.itk.address,0:{array_ptr}\" }\n ri.set_input_json(data_json, index)\n elif input_.type == InterfaceTypes.TextFile:\n pass\n elif input_.type == InterfaceTypes.BinaryFile:\n pass\n elif input_.type == InterfaceTypes.Image:\n image = input_.data\n mv = bytes(image.data.data)\n data_ptr = ri.set_input_array(mv, index, 0)\n dv = bytes(image.direction.data)\n direction_ptr = ri.set_input_array(dv, index, 1)\n image_json = {\n \"imageType\": asdict(image.imageType),\n \"name\": image.name,\n \"origin\": image.origin,\n \"spacing\": image.spacing,\n \"direction\": f\"data:application/vnd.itk.address,0:{direction_ptr}\",\n \"size\": image.size,\n \"data\": f\"data:application/vnd.itk.address,0:{data_ptr}\"\n }\n ri.set_input_json(image_json, index)\n elif input_.type == InterfaceTypes.Mesh:\n mesh = input_.data\n if mesh.numberOfPoints:\n pv = bytes(mesh.points)\n else:\n pv = bytes([])\n points_ptr = ri.set_input_array(pv, index, 0)\n if mesh.numberOfCells:\n cv = bytes(mesh.cells)\n else:\n cv = bytes([])\n cells_ptr = ri.set_input_array(cv, index, 1)\n if mesh.numberOfPointPixels:\n pdv = bytes(mesh.pointData)\n else:\n pdv = bytes([])\n point_data_ptr = ri.set_input_array(pdv, index, 2)\n if mesh.numberOfCellPixels:\n cdv = bytes(mesh.cellData)\n else:\n cdv = bytes([])\n cell_data_ptr = ri.set_input_array(cdv, index, 3)\n mesh_json = {\n \"meshType\": asdict(mesh.meshType),\n \"name\": mesh.name,\n\n \"numberOfPoints\": mesh.numberOfPoints,\n \"points\": f\"data:application/vnd.itk.address,0:{points_ptr}\",\n\n \"numberOfCells\": mesh.numberOfCells,\n \"cells\": f\"data:application/vnd.itk.address,0:{cells_ptr}\",\n \"cellBufferSize\": mesh.cellBufferSize,\n\n \"numberOfPointPixels\": mesh.numberOfPointPixels,\n \"pointData\": f\"data:application/vnd.itk.address,0:{point_data_ptr}\",\n\n \"numberOfCellPixels\": mesh.numberOfCellPixels,\n \"cellData\": f\"data:application/vnd.itk.address,0:{cell_data_ptr}\",\n }\n ri.set_input_json(mesh_json, index)\n elif input_.type == InterfaceTypes.PolyData:\n polydata = input_.data\n if polydata.numberOfPoints:\n pv = bytes(polydata.points)\n else:\n pv = bytes([])\n points_ptr = ri.set_input_array(pv, index, 0)\n\n if polydata.verticesBufferSize:\n pv = bytes(polydata.vertices)\n else:\n pv = bytes([])\n vertices_ptr = ri.set_input_array(pv, index, 1)\n\n if polydata.linesBufferSize:\n pv = bytes(polydata.lines)\n else:\n pv = bytes([])\n lines_ptr = ri.set_input_array(pv, index, 2)\n\n if polydata.polygonsBufferSize:\n pv = bytes(polydata.polygons)\n else:\n pv = bytes([])\n polygons_ptr = ri.set_input_array(pv, index, 3)\n\n if polydata.triangleStripsBufferSize:\n pv = bytes(polydata.triangleStrips)\n else:\n pv = bytes([])\n triangleStrips_ptr = ri.set_input_array(pv, index, 4)\n\n if polydata.numberOfPointPixels:\n pv = bytes(polydata.pointData)\n else:\n pv = bytes([])\n pointData_ptr = ri.set_input_array(pv, index, 5)\n\n if polydata.numberOfCellPixels:\n pv = bytes(polydata.cellData)\n else:\n pv = bytes([])\n cellData_ptr = ri.set_input_array(pv, index, 6)\n\n polydata_json = {\n \"polyDataType\": asdict(polydata.polyDataType),\n \"name\": polydata.name,\n\n \"numberOfPoints\": polydata.numberOfPoints,\n \"points\": f\"data:application/vnd.itk.address,0:{points_ptr}\",\n\n \"verticesBufferSize\": polydata.verticesBufferSize,\n \"vertices\": f\"data:application/vnd.itk.address,0:{vertices_ptr}\",\n\n \"linesBufferSize\": polydata.linesBufferSize,\n \"lines\": f\"data:application/vnd.itk.address,0:{lines_ptr}\",\n\n \"polygonsBufferSize\": polydata.polygonsBufferSize,\n \"polygons\": f\"data:application/vnd.itk.address,0:{polygons_ptr}\",\n\n \"triangleStripsBufferSize\": polydata.triangleStripsBufferSize,\n \"triangleStrips\": f\"data:application/vnd.itk.address,0:{triangleStrips_ptr}\",\n\n \"numberOfPointPixels\": polydata.numberOfPointPixels,\n \"pointData\": f\"data:application/vnd.itk.address,0:{pointData_ptr}\",\n\n \"numberOfCellPixels\": polydata.numberOfCellPixels,\n \"cellData\": f\"data:application/vnd.itk.address,0:{cellData_ptr}\"\n }\n ri.set_input_json(polydata_json, index)\n elif input_.type == InterfaceTypes.JsonCompatible:\n data_array = json.dumps(input_.data).encode()\n array_ptr = ri.set_input_array(data_array, index, 0)\n data_json = { \"size\": len(data_array), \"data\": f\"data:application/vnd.itk.address,0:{array_ptr}\" }\n ri.set_input_json(data_json, index)\n else:\n raise ValueError(f'Unexpected/not yet supported input.type {input_.type}')\n\n return_code = ri.delayed_start()\n\n populated_outputs: List[PipelineOutput] = []\n if len(outputs) and return_code == 0:\n for index, output in enumerate(outputs):\n output_data = None\n if output.type == InterfaceTypes.TextStream:\n data_ptr = ri.get_output_array_address(0, index, 0)\n data_size = ri.get_output_array_size(0, index, 0)\n data_array = ri.wasmtime_lift(data_ptr, data_size)\n output_data = PipelineOutput(InterfaceTypes.TextStream, TextStream(data_array.decode()))\n elif output.type == InterfaceTypes.BinaryStream:\n data_ptr = ri.get_output_array_address(0, index, 0)\n data_size = ri.get_output_array_size(0, index, 0)\n data_array = ri.wasmtime_lift(data_ptr, data_size)\n output_data = PipelineOutput(InterfaceTypes.BinaryStream, BinaryStream(data_array))\n elif output.type == InterfaceTypes.TextFile:\n output_data = PipelineOutput(InterfaceTypes.TextFile, TextFile(output.data.path))\n elif output.type == InterfaceTypes.BinaryFile:\n output_data = PipelineOutput(InterfaceTypes.BinaryFile, BinaryFile(output.data.path))\n elif output.type == InterfaceTypes.Image:\n image_json = ri.get_output_json(index)\n\n image = Image(**image_json)\n\n data_ptr = ri.get_output_array_address(0, index, 0)\n data_size = ri.get_output_array_size(0, index, 0)\n data_array = _to_numpy_array(image.imageType.componentType, ri.wasmtime_lift(data_ptr, data_size))\n shape = list(image.size)[::-1]\n if image.imageType.components > 1:\n shape.append(image.imageType.components)\n image.data = data_array.reshape(tuple(shape))\n\n direction_ptr = ri.get_output_array_address(0, index, 1)\n direction_size = ri.get_output_array_size(0, index, 1)\n direction_array = _to_numpy_array(FloatTypes.Float64, ri.wasmtime_lift(direction_ptr, direction_size))\n dimension = image.imageType.dimension\n direction_array.shape = (dimension, dimension)\n image.direction = direction_array\n\n output_data = PipelineOutput(InterfaceTypes.Image, image)\n elif output.type == InterfaceTypes.Mesh:\n mesh_json = ri.get_output_json(index)\n mesh = Mesh(**mesh_json)\n\n if mesh.numberOfPoints > 0:\n data_ptr = ri.get_output_array_address(0, index, 0)\n data_size = ri.get_output_array_size(0, index, 0)\n mesh.points = _to_numpy_array(mesh.meshType.pointComponentType, ri.wasmtime_lift(data_ptr, data_size))\n else:\n mesh.points = _to_numpy_array(mesh.meshType.pointComponentType, bytes([]))\n\n if mesh.numberOfCells > 0:\n data_ptr = ri.get_output_array_address(0, index, 1)\n data_size = ri.get_output_array_size(0, index, 1)\n mesh.cells = _to_numpy_array(mesh.meshType.cellComponentType, ri.wasmtime_lift(data_ptr, data_size))\n else:\n mesh.cells = _to_numpy_array(mesh.meshType.cellComponentType, bytes([]))\n if mesh.numberOfPointPixels > 0:\n data_ptr = ri.get_output_array_address(0, index, 2)\n data_size = ri.get_output_array_size(0, index, 2)\n mesh.pointData = _to_numpy_array(mesh.meshType.pointPixelComponentType, ri.wasmtime_lift(data_ptr, data_size))\n else:\n mesh.pointData = _to_numpy_array(mesh.meshType.pointPixelComponentType, bytes([]))\n\n if mesh.numberOfCellPixels > 0:\n data_ptr = ri.get_output_array_address(0, index, 3)\n data_size = ri.get_output_array_size(0, index, 3)\n mesh.cellData = _to_numpy_array(mesh.meshType.cellPixelComponentType, ri.wasmtime_lift(data_ptr, data_size))\n else:\n mesh.cellData = _to_numpy_array(mesh.meshType.cellPixelComponentType, bytes([]))\n\n output_data = PipelineOutput(InterfaceTypes.Mesh, mesh)\n elif output.type == InterfaceTypes.PolyData:\n polydata_json = ri.get_output_json(index)\n polydata = PolyData(**polydata_json)\n\n if polydata.numberOfPoints > 0:\n data_ptr = ri.get_output_array_address(0, index, 0)\n data_size = ri.get_output_array_size(0, index, 0)\n polydata.points = _to_numpy_array(FloatTypes.Float32, ri.wasmtime_lift(data_ptr, data_size))\n else:\n polydata.points = _to_numpy_array(FloatTypes.Float32, bytes([]))\n\n if polydata.verticesBufferSize > 0:\n data_ptr = ri.get_output_array_address(0, index, 1)\n data_size = ri.get_output_array_size(0, index, 1)\n polydata.vertices = _to_numpy_array(IntTypes.UInt32, ri.wasmtime_lift(data_ptr, data_size))\n else:\n polydata.vertices = _to_numpy_array(IntTypes.UInt32, bytes([]))\n\n if polydata.linesBufferSize > 0:\n data_ptr = ri.get_output_array_address(0, index, 2)\n data_size = ri.get_output_array_size(0, index, 2)\n polydata.lines = _to_numpy_array(IntTypes.UInt32, ri.wasmtime_lift(data_ptr, data_size))\n else:\n polydata.lines = _to_numpy_array(IntTypes.UInt32, bytes([]))\n\n if polydata.polygonsBufferSize > 0:\n data_ptr = ri.get_output_array_address(0, index, 3)\n data_size = ri.get_output_array_size(0, index, 3)\n polydata.polygons = _to_numpy_array(IntTypes.UInt32, ri.wasmtime_lift(data_ptr, data_size))\n else:\n polydata.polygons = _to_numpy_array(IntTypes.UInt32, bytes([]))\n\n if polydata.triangleStripsBufferSize > 0:\n data_ptr = ri.get_output_array_address(0, index, 4)\n data_size = ri.get_output_array_size(0, index, 4)\n polydata.triangleStrips = _to_numpy_array(IntTypes.UInt32, ri.wasmtime_lift(data_ptr, data_size))\n else:\n polydata.triangleStrips = _to_numpy_array(IntTypes.UInt32, bytes([]))\n\n if polydata.numberOfPointPixels > 0:\n data_ptr = ri.get_output_array_address(0, index, 5)\n data_size = ri.get_output_array_size(0, index, 5)\n polydata.pointData = _to_numpy_array(polydata.polyDataType.pointPixelComponentType, ri.wasmtime_lift(data_ptr, data_size))\n else:\n polydata.triangleStrips = _to_numpy_array(polydata.polyDataType.pointPixelComponentType, bytes([]))\n\n if polydata.numberOfCellPixels > 0:\n data_ptr = ri.get_output_array_address(0, index, 6)\n data_size = ri.get_output_array_size(0, index, 6)\n polydata.cellData = _to_numpy_array(polydata.polyDataType.cellPixelComponentType, ri.wasmtime_lift(data_ptr, data_size))\n else:\n polydata.triangleStrips = _to_numpy_array(polydata.polyDataType.cellPixelComponentType, bytes([]))\n\n output_data = PipelineOutput(InterfaceTypes.PolyData, polydata)\n elif output.type == InterfaceTypes.JsonCompatible:\n data_ptr = ri.get_output_array_address(0, index, 0)\n data_size = ri.get_output_array_size(0, index, 0)\n data_array = ri.wasmtime_lift(data_ptr, data_size)\n output_data = PipelineOutput(InterfaceTypes.JsonCompatible, json.loads(data_array.decode()))\n else:\n raise ValueError(f'Unexpected/not yet supported output.type {output.type}')\n\n populated_outputs.append(output_data)\n\n ri.delayed_exit(return_code)\n\n # Should we be returning the return_code?\n return tuple(populated_outputs)"
] | [
"0.6047751",
"0.5246178",
"0.51966566",
"0.5086983",
"0.5083798",
"0.5073814",
"0.505774",
"0.5057473",
"0.5057473",
"0.50435996",
"0.4937564",
"0.49273506",
"0.4898537",
"0.48841074",
"0.48752788",
"0.48730662",
"0.4870236",
"0.4848012",
"0.48013437",
"0.47919822",
"0.4791026",
"0.47591823",
"0.47561613",
"0.47420764",
"0.4739238",
"0.47375524",
"0.4735512",
"0.47185713",
"0.4715364",
"0.471186"
] | 0.8627245 | 0 |
Callback function that is called when the Save Parameters btn is clicked. | def on_save_parameters(self):
obj_points = self.get_object_points()
cam_pos = self.get_camera_position()
distortion = self.get_distortion_coeeficients()
d = {
'object positions': obj_points,
'camera positions': cam_pos,
'distortion coefficients': distortion
}
jsn = json.dumps(d)
h = hashlib.sha1(jsn.encode('utf-8')).hexdigest()
fn = f'{h}.json'
with open(fn, 'w') as f:
f.write(jsn)
self.statusBar().showMessage(f'Parameters have been save to {fn}.')
self.param_file = fn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def saveParam(self):\n qApp.emit(QtCore.SIGNAL(\"saveMe\"), self._param)",
"def saveParam(self):\n qApp.emit(QtCore.SIGNAL(\"saveMe\"), self._param)",
"def save(self, *args):\n # need to do!!\n pass",
"def init_save_curve_params_button(self):\n def save_params():\n \"\"\"\n function to invoke different save routines\n \"\"\"\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"JSON\", \"*.json\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n self.parent_class.classes[\"fractal\"].curve.store_curve_tofile(\n file_name)\n\n self.buttons[\"btn_save_params\"] = Button(\n self.frame, text=\"Save Parameters\", command=save_params)\n self.buttons[\"btn_save_params\"].grid(row=4, column=1)",
"def save(self, *args, **kwargs):\n pass",
"def save(self, *args, **kwargs) -> Any:\n pass",
"def onSaveNotesButton(self, button):\n pass",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs):\n return",
"def save_buttons_values(self):\r\n pass",
"def cb_save(self, button):\n print(\"Save File callback\")\n\n if self.filename:\n with open(self.filename, \"w\") as fout:\n fout.write(self.main_data)\n else:\n # If self.flename is blank then call the Save_As method.\n self.cb_save_as(button)",
"def save():\n self.SSM_CLIENT.put_parameter(\n Name=self._state_name,\n Description=self._STATE_DESCRIPTION.format(self._app_type, self.function_name),\n Value=param_value,\n Type='SecureString',\n Overwrite=True\n )",
"def save(self):\n # type: () -> None\n setattr(self.fn, self.PARAM_NAME, self)",
"def save():\n pass",
"def callback_save(*args, **kwargs):\n return True",
"def saveButtonMethod(self):\n return AbstractAction(\"Save\")",
"def save(self):\n pass",
"def save(self):\n pass",
"def save():",
"def OnSave(self, e):\n\t\tconvert_to = None\n\t\tif e.Id == 201:\n\t\t\tconvert_to = \"photoabsorption\"\n\t\telif e.Id == 202:\n\t\t\tconvert_to = \"refractive_index\"\n\t\tlogger.info(\"Save\")\n\t\tfd = wx.FileDialog(self, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)\n\t\tif fd.ShowModal()==wx.ID_OK:\n\t\t\tmetadata = {\"Density\": float(self.DensityText.GetValue()), \"Molecular Formula\":self.StoichiometryText.GetValue(),\"Formula Mass\":data.calculate_FormulaMass(self.Stoichiometry)}\n\t\t\tdata.export_data(fd.GetPath(), numpy.transpose(numpy.vstack((self.Full_E,self.KK_Real_Spectrum,data.coeffs_to_ASF(self.Full_E,self.Imaginary_Spectrum)))), header_info=metadata, convert_to=convert_to)",
"def save(self, values):",
"def save_params():\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"JSON\", \"*.json\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n self.parent_class.classes[\"fractal\"].curve.store_curve_tofile(\n file_name)",
"def onSaveMenu(self, item):\n self.dialog = SaveDialog()\n self.dialog.doModal(self.onSaveChosen)\n return 1",
"def save (self):\n pass",
"def on_post(self):\n return \"Ok, the stuff is being saved\"",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass"
] | [
"0.70925754",
"0.70925754",
"0.67913026",
"0.6768073",
"0.6621593",
"0.66042584",
"0.6581914",
"0.6530856",
"0.6530856",
"0.6530856",
"0.6524365",
"0.64923036",
"0.6481401",
"0.64349306",
"0.64142895",
"0.6389021",
"0.63860184",
"0.6382369",
"0.6373126",
"0.6373126",
"0.63344663",
"0.62794954",
"0.6256562",
"0.61834013",
"0.61733353",
"0.6134559",
"0.6131544",
"0.611801",
"0.611801",
"0.611801"
] | 0.7093935 | 0 |
Callback function that is called when the "Load Parameters" button is called. | def on_load_parameters(self, filename=None):
if filename is None:
path, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Choose a parameter file.", "", "JSON Files (*.json)")
else:
path = filename
if path == '' or path is None:
return
self.param_file = path
with open(self.param_file, 'r') as f:
params = json.loads(f.read())
obj_points = params['object positions']
cam_pos = params['camera positions']
dist_coeff = params['distortion coefficients']
for p in obj_points:
x, y = p['x'], p['y']
lat, lon, alt = p['lat'], p['lon'], p['alt']
self.add_known_image_points((x, y), latlonalt=(lat, lon, alt))
self.camera_lat_line.setValue(float(cam_pos['lat']))
self.camera_lon_line.setValue(float(cam_pos['lon']))
self.camera_alt_line.setValue(float(cam_pos['alt']))
self.cx_line.setValue(float(cam_pos['cx']))
self.cy_line.setValue(float(cam_pos['cy']))
self.phi_line.setValue(float(cam_pos['phi']))
self.theta_line.setValue(float(cam_pos['theta']))
self.psi_line.setValue(float(cam_pos['psi']))
self.k1_line.setValue(float(dist_coeff['k1']))
self.k2_line.setValue(float(dist_coeff['k2']))
self.k3_line.setValue(float(dist_coeff['k3']))
self.p1_line.setValue(float(dist_coeff['p1']))
self.p2_line.setValue(float(dist_coeff['p2']))
self.statusBar().showMessage(f'Loaded parameters from {self.param_file}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_parameter(self):",
"def init_load_params_button(self):\n def load_params():\n \"\"\"\n load parameters from the Curve class to the fastfractal\n \"\"\"\n file_name = filedialog.askopenfilename(\n filetypes=[(\"JSON\", \"*.json\")])\n if file_name:\n self.parent_class.classes[\"fractal\"].curve.load_from_file(\n file_name)\n self.parent_class.classes[\"fractal\"].curve.set_parent_parameters(\n )\n self.rules_frame_class.fill_entries_from_rules(\n self.parent_class.classes[\"fractal\"].rules)\n # fill the entries in rules input on load\n self.set_recursion_depth_entry(\n self.parent_class.classes[\"fractal\"].recursion_depth)\n self.set_base_length_entry(\n self.parent_class.classes[\"fractal\"].base_length)\n self.rules_frame_class.render_preview()\n\n self.buttons[\"btn_load_params\"] = Button(\n self.frame, text=\"Load Parameters\", command=load_params)\n self.buttons[\"btn_load_params\"].grid(row=4, column=0)",
"def enterParameters(self,**kwargs):\n\n members = self.bl.getAllParameters().keys() \n entries={}\n\n for param in members:\n entries[param] = getattr(self.bl, 'paramSelection') # save param names in entries\n entries['view selection'] = [getattr(self.bl, 'displayText'), str(self.bl.getAllParameters())]\n entries['reset selection'] = getattr(self.bl, 'paramReset')\n self.mm.addGenericMenu(\"param\",self.mm.cur_page,\"Select your desired params for this operation\", entries)\n self.mm.loadMenu(\"param\")",
"def getParameters(self): #$NON-NLS-1$\r",
"def init_params(self):\n blah",
"def get_params(self):",
"def load():\n\n global R, P, NP, update, update_available, region_dict\n\n loader = GoSmartParameterLoader(gosmart._prefix)\n loader.initiate()\n\n R = loader.get_regions()\n P, NP = loader.get_parameters()\n\n region_dict = loader.get_region_dict()\n\n update = gosmart.status.StatusUpdater()\n update_available = update.connect()",
"def parameters(self):",
"def define_parameters(self):",
"def load_params(self):\n return self.params",
"def parameters_changed(self):\n pass",
"def load_params():\n file_name = filedialog.askopenfilename(\n filetypes=[(\"JSON\", \"*.json\")])\n if file_name:\n self.parent_class.classes[\"fractal\"].curve.load_from_file(\n file_name)\n self.parent_class.classes[\"fractal\"].curve.set_parent_parameters(\n )\n self.rules_frame_class.fill_entries_from_rules(\n self.parent_class.classes[\"fractal\"].rules)\n # fill the entries in rules input on load\n self.set_recursion_depth_entry(\n self.parent_class.classes[\"fractal\"].recursion_depth)\n self.set_base_length_entry(\n self.parent_class.classes[\"fractal\"].base_length)\n self.rules_frame_class.render_preview()",
"def load_parameters(self):\n json_data = open(\"param.json\")\n data = json.load(json_data)\n self.items = data[\"items\"]\n self.pollInterval = self.items[0]['poll_interval']",
"def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n self._data = load_dict[\"data\"][\"data\"][0]\n self._default = self._data\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n if \"explored_data\" in load_dict:\n self._explored_range = [\n x for x in load_dict[\"explored_data\"][\"data\"].tolist()\n ]\n self._explored = True\n\n self._locked = True",
"def load_args(self):\n self.args_loading = True\n\n idx = self.stepsListWidget.currentRow()\n args = self.mgr.obj.steps[idx].args\n\n load_table_from_dict(args, self.argsTableWidget)\n\n self.args_loading = False",
"def set_params(self):\r\n pass",
"def onResetParameters(self):\r\n # productive #button\r\n profprint()\r\n fileName = pathToScene = slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config/default.cfg\")\r\n self.logic.loadParameters(fileName)",
"def parameters(self):\n pass",
"def get_params(self):\n pass",
"def _get_parameters(self):\n return None",
"def updateParameters(self):\n\n return",
"def params(self):\n pass",
"def load_params():\r\n return pickle.load(open('params.p', mode='rb'))",
"def load_parameters():\n\n retval = RP_LIB.rp_LoadLockboxConfig()\n if retval != 0:\n LOG.error(\"Failed to load parameters. Error code: %s\", ERROR_CODES[retval])",
"def loadParameters (self, filePath):\r\n # productive #onButton\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n config = ConfigParser.RawConfigParser()\r\n config.read(filePath)\r\n\r\n autoCorrectTip = config.getboolean('BooleanSection', 'autoCorrectTip')\r\n invertedContrast = config.getboolean('BooleanSection', 'invertedContrast')\r\n gradient = config.getboolean('BooleanSection', 'gradient')\r\n filterControlPoints = config.getboolean('BooleanSection', 'filterControlPoints')\r\n drawFiducialPoints = config.getboolean('BooleanSection', 'drawFiducialPoints')\r\n autoStopTip = config.getboolean('BooleanSection', 'autoStopTip')\r\n extendNeedle = config.getboolean('BooleanSection', 'extendNeedle')\r\n maxLength = config.getboolean('BooleanSection', 'maxLength')\r\n gaussianAttenuationButton = config.getboolean('BooleanSection', 'gaussianAttenuationButton')\r\n\r\n realNeedleLength = config.getint('IntegerSection', 'realNeedleLength')\r\n sigmaValue = config.getint('IntegerSection', 'sigmaValue')\r\n gradientPonderation = config.getint('IntegerSection', 'gradientPonderation')\r\n exponent = config.getint('IntegerSection', 'exponent')\r\n try:\r\n radiusMax = config.getint('IntegerSection', 'distanceMax') # try deprecated parameter name (old parameter files)\r\n except:\r\n radiusMax = config.getint('IntegerSection', 'radiusMax')\r\n nbRotatingIterations = config.getint('IntegerSection', 'nbRotatingIterations')\r\n numberOfPointsPerNeedle = config.getint('IntegerSection', 'numberOfPointsPerNeedle')\r\n lenghtNeedleParameter = config.getint('IntegerSection', 'lenghtNeedleParameter')\r\n radiusNeedleParameter = config.getint('IntegerSection', 'radiusNeedleParameter')\r\n algoVersParameter = config.getint('IntegerSection', 'algoVersParameter')\r\n\r\n widget.autoCorrectTip.checked = autoCorrectTip\r\n widget.invertedContrast.checked = invertedContrast\r\n widget.gradient.checked = gradient\r\n widget.filterControlPoints.checked = filterControlPoints\r\n widget.drawFiducialPoints.checked = drawFiducialPoints\r\n widget.autoStopTip.checked = autoStopTip\r\n widget.extendNeedle.checked = extendNeedle\r\n widget.maxLength.checked = maxLength\r\n widget.gaussianAttenuationButton.checked = gaussianAttenuationButton\r\n\r\n widget.realNeedleLength.value = realNeedleLength\r\n widget.sigmaValue.value = sigmaValue\r\n widget.gradientPonderation.value = gradientPonderation\r\n widget.exponent.value = exponent\r\n widget.radiusMax.value = radiusMax\r\n widget.nbRotatingIterations.value = nbRotatingIterations\r\n widget.numberOfPointsPerNeedle.value = numberOfPointsPerNeedle\r\n widget.lenghtNeedleParameter.value = lenghtNeedleParameter\r\n widget.radiusNeedleParameter.value = radiusNeedleParameter\r\n widget.algoVersParameter.value = algoVersParameter\r\n print \"#############\"\r\n print \"algoVers: \", algoVersParameter\r\n print \"Parameters successfully loaded!\"",
"def get_parameters(self):\r\n raise Exception(\"Not implemented (server-side parameter initialization)\")",
"def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['functionType'] = self.functionType\n paramDict['dataFilename'] = self.dataFilename\n paramDict['workingDir'] = self.workingDir\n return paramDict",
"def initialize_params(self, params):\n pass",
"def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n try:\n self._data = load_dict[\"data\" + ArrayParameter.IDENTIFIER]\n\n if \"explored_data\" + ArrayParameter.IDENTIFIER in load_dict:\n explore_table = load_dict[\"explored_data\" + ArrayParameter.IDENTIFIER]\n\n idx = explore_table[\"idx\"]\n\n explore_list = []\n\n # Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'\n for name_idx in idx:\n arrayname = self._build_name(name_idx)\n explore_list.append(load_dict[arrayname])\n\n self._explored_range = [x for x in explore_list]\n self._explored = True\n\n except KeyError:\n super(ArrayParameter, self)._load(load_dict)\n\n self._default = self._data\n self._locked = True",
"def _update_params(self):\n raise NotImplementedException()"
] | [
"0.7636233",
"0.66088814",
"0.6579747",
"0.6318373",
"0.62812096",
"0.62741035",
"0.6250368",
"0.6236836",
"0.62086946",
"0.618174",
"0.6136972",
"0.611286",
"0.60765636",
"0.6062588",
"0.59959674",
"0.5963001",
"0.5931317",
"0.59206915",
"0.58839107",
"0.5883892",
"0.5875783",
"0.58747715",
"0.58148944",
"0.57780725",
"0.5777848",
"0.5775407",
"0.57485116",
"0.5745445",
"0.5722917",
"0.56981564"
] | 0.6689701 | 1 |
This function draws points onto the visible image. All of the drawing happens on a copy of the image so the original is maintained. | def draw_known_points(self):
if self.tracking:
if self.current_tracked_point == None:
return
p = self.current_tracked_point
x, y = (int(u) for u in p)
cv2.circle(self.altered_image, (x, y), 10, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(self.altered_image, f'az:{x:.3f} alt:{y:.3f}', (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))
self.statusBar().showMessage(f'az: {x:.3f} alt: {y:.3f}')
else:
for p in self.known_image_points:
x, y = (int(u) for u in p)
cv2.circle(self.altered_image, (x, y), 5, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(self.altered_image, f'x:{x} y:{y}', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_points(self, imsize):\n mask = np.zeros(imsize)\n for t in self.points:\n mask[t] = 1\n toimage(mask).show()",
"def drawPointSet (self, points, colour):\r\n\r\n w = self.bih_vals [bih_Width]\r\n\r\n for pt in points:\r\n self.image [pt [1]][pt [0]] = colour",
"def draw_birdseye_points(self, image, points):\n point_image = np.copy(image)\n for point in points:\n cv2.circle(point_image, point, 5, [0, 0, 255], cv2.FILLED)\n\n cv2.imshow('image1', point_image)\n cv2.waitKey(1)",
"def draw_points(in_img, points, colour=(255, 0, 0)):\n img = in_img.copy()\n\n radius = int(max(img.shape) / 100)\n\n img = convert_when_colour(colour, img)\n\n for point in points:\n img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)\n\n return img",
"def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis",
"def tool_draw_point(self,img,point,color=[0,0,0]):\n def s(pos):\n return int((pos + 1) / 2 * 128)\n if point is None:\n print(\"Warn: tool_draw_point Fail => point is None\")\n return img\n x, y = s(point[0]), s(point[1])\n img = cv2.rectangle(img, (x, y), (x, y), color, 5)\n return img",
"def draw_features(self, image):\n \n for x,y in self.new_points.reshape(-1,2):\n cv2.circle(image, (x,y), 2, (255,0,255), 2)\n return image",
"def draw(self):\r\n\r\n\r\n\t\tself.predict()\r\n\t\t#print np.shape(self.gray)\r\n\t\t#cv2.rectangle(self.gray, (self.bb[0], self.bb[1]), (self.bb[0] + self.bb[2], self.bb[1] + self.bb[3]))\r\n\r\n\t\t# draw points as green circles\r\n\t\tfor point in self.features:\r\n\t\t\tcv2.circle(self.gray,(int(point[0][0]),int(point[0][1])),3,(255),-1)\r\n\t\t\t\r\n\t\tcv2.imshow('image',self.gray)\r\n\t\tcv2.waitKey(1)",
"def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")",
"def drawSet(self, points, color):\n for coord in points:\n try:\n self.im.put(color, coord)\n except TclError:\n continue\n self.zoomMap(self.scale)",
"def draw_points(self, pic_path, points_data):\n # Pupil Finding here\n pupils = get_eye_locations_in_image(pic_path)\n img = cv2.imread(pic_path)\n frame_number = int(re.findall(r'\\d+', pic_path.split('/')[-1])[0])\n dets = detector(img)\n shape = None\n height, width, channels = img.shape\n\n for k, d in enumerate(dets):\n shape = predictor(img, d)\n\n if(not shape):\n return\n\n pointList = []\n c = 0\n for b in range(68):\n # sanitizing input points\n point = Point(shape.part(b).x, shape.part(b).y)\n points_data[c] = [point.x, point.y]\n c = c + 1\n # some points might be out of bound\n # so, move them to the closest boundary\n if(point.x < 0):\n point.x = 0\n elif(point.x >= width):\n point.x = width - 1\n if(point.y < 0):\n point.y = 0\n elif(point.y >= height):\n point.y = height - 1\n\n pointList.append(point)\n\n roll = findRoll(pointList)\n #print(\"roll is \" + str(roll) + ' angles')\n yaw = findYaw(pointList)\n #print(\"yaw is \" + str(yaw) + ' angles')\n pitch = findPitch(pointList)\n #print(\"pitch is \" + str(pitch) + ' angles')\n self.data[frame_number] = [roll, yaw, pitch]\n counter = 0\n for point in pointList:\n cv2.circle(img, (point.x, point.y), ImageProcessor.POINT_SIZE, ImageProcessor.POINT_COLOR, -1)\n counter = counter + 1\n\n self.draw_triangles(img, pointList)\n \n for pupil in pupils:\n cv2.circle(img, (pupil.left.x, pupil.left.y), 5, (0,0,255), -1)\n cv2.circle(img, (pupil.right.x, pupil.right.y), 5, (0,0,255), -1)\n points_data[-1] = [pupil.left.x, pupil.left.y]\n points_data[-2] = [pupil.right.x, pupil.right.y]\n #print(pupil.left.x, \", \", pupil.left.y)\n #print(pupil.right.x, \", \", pupil.right.y)\n\n cv2.imwrite(pic_path, img)",
"def show_keypoints(image, key_pts):\n plt.imshow(image)\n plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')",
"def drawClippedPointSet (self, points, colour):\r\n\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n \r\n for pt in points:\r\n if pt [0] >= 0 and pt [0] < w and pt [1] >= 0 and pt [1] < h:\r\n self.image [pt [1]][pt [0]] = colour",
"def Draw(self):\n\t\tGameImage.Draw(self, self.coords)",
"def draw(self, surface):\r\n if self.visible:\r\n surface.blit(self.image, (self.x, self.y))",
"def drawPoints(self, points, color):\n for p in points:\n Point\n p.color = color\n p.radius = self.points_radius\n p.conversion = False\n p.show(self.context)",
"def draw(self):\n self.write_image()\n self.update()",
"def drawOnCanvas(myPoints, paintColors):\n for point in myPoints:\n cv2.circle(imgResult, center=(point[0], point[1]), radius=10, \n color=paintColors[point[2]], thickness=cv2.FILLED)",
"def draw_pion(self, p):\n self.board_mat[p.x][p.y].set_data(p.img)",
"def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)",
"def draw(self):\n self.screen.blit(self.image, (self.x_pos1, self.y_pos))\n self.screen.blit(self.image, (self.x_pos2, self.y_pos))",
"def draw_keypoints(img_to_plot, keypoints):\n cv2.drawKeypoints(img_to_plot, keypoints, img_to_plot, flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)",
"def draw_point_gt(img,point_gt,width=3):\n point_img=copy.deepcopy(img)\n\n for i in range(point_gt.shape[0]):\n x=int(point_gt[i,0])\n y=int(point_gt[i,1])\n point_img=cv2.circle(point_img,center=(x,y),radius=width//2+1,color=(0,0,255),thickness=width)\n\n return point_img",
"def draw_2d_points(self, pts_2d, color):\n assert pts_2d.shape[1] == 2, pts_2d.shape\n\n image = self.frame.copy()\n pts_2d = pts_2d.astype(np.int32).tolist()\n\n for (x,y),c in zip(pts_2d,color):\n cv2.circle(image, (x, y), 2, c, -1)\n \n return image",
"def drawFast(img, nonmaxSuppression=True, color=(255,0,0)):\n\ttmp = img.copy()\n\tkp = fast(tmp, nonmaxSuppression=nonmaxSuppression)\n\ttmp = cv2.drawKeypoints(tmp, kp, color=color)\n\treturn tmp",
"def draw_point(x, y):\n map_image = Image.open('map.png')\n map_image.putpixel((x, y), (0, 255, 0))\n map_image.save('map.png')\n map_image.show('map.png')",
"def draw_point(image, x, y, radius = 1):\n for dx in range(-radius, radius + 1):\n for dy in range(-radius, radius + 1):\n image.putpixel((x+dx, y+dy), 255)",
"def draw(self):\n\n self.updateLazyImageLoading()\n\n image(self.baseMap, 0, 0)\n\n for layer in self.layers:\n layer.draw()\n\n for marker in self.markers:\n marker.draw()",
"def draw(self):\r\n self.canvas.delete(tk.ALL)\r\n\r\n # Draw the points.\r\n radius = 2\r\n for point in self.points:\r\n x0 = point[0] - radius\r\n y0 = point[1] - radius\r\n x1 = x0 + 2 * radius\r\n y1 = y0 + 2 * radius\r\n self.canvas.create_oval(x0, y0, x1, y1, fill=\"red\", outline=\"red\")\r\n\r\n # If we have a solution, draw it.\r\n if self.solved:\r\n curve = []\r\n for x in range(self.canvas.winfo_width()):\r\n curve.append((x, F(self.a_values, x)))\r\n self.canvas.create_line(curve, fill=\"blue\")",
"def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)"
] | [
"0.6897224",
"0.66766715",
"0.6621769",
"0.65924174",
"0.65777516",
"0.6547684",
"0.6543244",
"0.6404244",
"0.63991845",
"0.6369983",
"0.635793",
"0.6353487",
"0.6333849",
"0.6314204",
"0.62561333",
"0.6206732",
"0.61894614",
"0.6156099",
"0.6151059",
"0.6147598",
"0.6144985",
"0.6129776",
"0.6109087",
"0.6093573",
"0.60298616",
"0.6025694",
"0.60115063",
"0.5970446",
"0.5963571",
"0.5948622"
] | 0.7168063 | 0 |
This function is called when we need to update the scene in tracking mode. If we don't have any points to track we just return, because there is nothing to do. If the tracker isn't initialized then we initialize it If the tracker is initialized we update it and then draw the box on the. scene. | def update_frame_tracking(self):
if not self.current_tracked_point:
return
if not self.tracker.initialized:
x, y = self.current_tracked_point
roi = (x, y, 50, 50)
self.tracker.initialize(self.original_image, roi)
return
self.roi = self.tracker.update(self.original_image)
x, y, w, h = self.roi
self.send_pointing_message(az=int(x), el=int(y))
cv2.circle(self.altered_image, (int(x), int(y)), 10, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(self.altered_image, f'az:{x:.3f}alt:{y:.3f}', (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tracking(self) -> None:\n dist, delta_angle, timestamp = self.vision.get_vision_data()\n # collect data only once per loop\n if timestamp is None:\n # self.next_state(\"searching\")\n # print(f\"tracking -> searching {self.vision.get_vision_data()}\")\n self.state = self.searching\n else:\n if abs(delta_angle) > self.find_allowable_angle(dist):\n # print(f\"Telling turret to slew by {delta_angle}\")\n self.turret.slew(delta_angle)\n if self.ready_to_spin():\n # self.next_state(\"firing\")\n # print(f\"tracking -> spining_up {self.vision.get_vision_data()}\")\n self.distance = dist\n self.state = self.spining_up",
"def __init__(self, frame, track_window):\n # set up the roi\n global tracking_state\n self.tracker = cv2.TrackerKCF_create()\n ok = self.tracker.init(frame, track_window)\n if ok is True:\n tracking_state = True\n else:\n tracking_state = False\n cv2.putText(frame, \"Tracking init failed\", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)",
"def track(self, image):\r\n \r\n # if the object was initialized correctrly\r\n if self.well_initialized:\r\n ok, self.object_bound_rect = self.tracker.update(image)\r\n \r\n return ok, self.object_bound_rect",
"def draw_known_points(self):\n if self.tracking:\n if self.current_tracked_point == None:\n return\n p = self.current_tracked_point\n x, y = (int(u) for u in p)\n cv2.circle(self.altered_image, (x, y), 10, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.putText(self.altered_image, f'az:{x:.3f} alt:{y:.3f}', (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))\n self.statusBar().showMessage(f'az: {x:.3f} alt: {y:.3f}')\n else:\n for p in self.known_image_points:\n x, y = (int(u) for u in p)\n\n cv2.circle(self.altered_image, (x, y), 5, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.putText(self.altered_image, f'x:{x} y:{y}', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255))",
"def update_display(self):\n \n # check availability of display queue of the wide camera\n# if not hasattr(self,'wide_disp_queue'):\n# pass\n# elif self.wide_disp_queue.empty():\n# pass\n# else:\n# try:\n# wide_disp_image = self.wide_disp_queue.get()\n# \n# self.wide_disp_counter += 1\n# self.wide_disp_counter %= 2\n# if self.wide_disp_counter == 0:\n# if type(wide_disp_image) == np.ndarray:\n# if wide_disp_image.shape == (self.wide_cam.settings.height.value(),self.wide_cam.settings.width.value()):\n# try:\n# self.wide_cam_image.setImage(wide_disp_image)\n# except Exception as ex:\n# print('Error: %s' % ex)\n# except Exception as ex:\n# print(\"Error: %s\" % ex)\n \n # check availability of display queue of the track camera \n if not hasattr(self,'track_disp_queue'):\n pass\n elif self.track_disp_queue.empty():\n pass\n else:\n try:\n track_disp_image = self.track_disp_queue.get()\n self.track_disp_counter += 1\n self.track_disp_counter %= 4\n if self.track_disp_counter == 0:\n if type(track_disp_image) == np.ndarray:\n if track_disp_image.shape == (self.track_cam.settings.height.value(),self.track_cam.settings.width.value()):\n try:\n self.track_cam_image.setImage(track_disp_image)\n except Exception as ex:\n print('Error: %s' % ex)\n \n x = int(self.settings.x.value())\n y = int(self.settings.y.value())\n self.tracker_data[:] = 0\n self.tracker_data[x,y] = 1\n self.tracker_image.setImage(np.copy(self.tracker_data))\n except Exception as ex:\n print(\"Error: %s\" % ex)",
"def update_visualizer(self):\n if self.visualizer:\n if self.frame_count == 2:\n self.visualizer.add_geometry(self.vis_points)\n self.visualizer.update_geometry(self.vis_points)\n self.visualizer.poll_events()\n self.visualizer.update_renderer()\n time.sleep(0.001)\n self.frame_count += 1",
"def before_flip(self):\n from klibs.KLGraphics import blit\n\n if P.show_gaze_dot and self.el.recording:\n try:\n blit(self.tracker_dot, 5, self.el.gaze())\n except RuntimeError:\n pass",
"def run(self):\n self.track_len = []\n # debug\n while self._segment_index <= self._segment_cnt:\n if self._segment_index < 0: # Uncomment this block to debug specific segment\n self._segment_index += 1\n continue\n # run association\n print \"[Tracking]\\tSegment index:\\t{} Total segment num:\\t{}\".format(self._segment_index, self._segment_cnt)\n start = cv2.getTickCount()\n \n self._run_segment()\n print \"[Tracking]\\tSegment start:\\t{} Segment end\\t{}\".format(self._segment_start_fid,\n self._segment_end_fid)\n # dump into file\n \"\"\"\n seg_name = 'segment_{}.track'.format(self._segment_index)\n seg_file = os.path.join(self._segment_dir, seg_name)\n self._segments_path.append(seg_file)\n Track.dump_to_track_file(self._high_level_tracks, save_name=seg_file)\n print \"Track contains {} high level tracks\".format(len(self._high_level_tracks))\n \"\"\"\n self._segment_index += 1\n end = cv2.getTickCount()\n print \"[Tracking]\\tTime:\\t{} seconds\".format(float(end - start) / cv2.getTickFrequency())\n if P['debug']:\n pos_feature_num = self.pos_arr.shape[0]\n neg_feature_num = self.neg_arr.shape[0]\n pos_arr = np.hstack((self.pos_arr, np.ones(shape=(pos_feature_num, 1))))\n neg_arr = np.hstack((self.neg_arr, np.zeros(shape=(neg_feature_num, 1))))\n np.savetxt(os.path.join(\"../feature_classifier/\", \"{}_pos_feature.txt\".format(self._video_name)), pos_arr)\n np.savetxt(os.path.join(\"../feature_classifier/\", \"{}_neg_feature.txt\".format(self._video_name)), neg_arr)\n\n final_track_save_file = os.path.join(self._save_dir, self._video_name + \"_final_merged.track\")\n mot_track_save_file = os.path.join(self._save_dir, self._video_name + \".txt\")\n Track.dump_to_track_file_no_feature(self._final_tracks, final_track_save_file, self._calib_w, self._calib_h)\n Track.dump_track_with_mot_format(self._final_tracks, mot_track_save_file,)\n print(\"there are {} tracklet in final merged track\".format(len(self._final_tracks)))",
"def update(self, geometry=None, accuracy=None):\r\n update_track(self.project, self, geometry, accuracy)",
"def trackFace():\n\n\t\t# start face tracker\n\t\tself.track.setWholeBodyOn(False)\n\t\tself.track.startTracker()",
"def trackFace(self):\n\n\t\t# start face tracker\n\t\tself.track.setWholeBodyOn(False)\n\t\tself.track.startTracker()",
"def initialize_tracking(wander,yr):\n\n global BTRACK, GSTRUC\n\n # How many steps are we tracking\n ntrack = 0\n if wander==False:\n ntrack = yr[1]*3\n n = ntrack\n else:\n ntrack = 0\n n = 100000\n \n # Tracking lists\n BTRACK = {'data':[],'count':0,'wander':wander,'ntrack':ntrack,'x':np.zeros(n,int)-1,'y':np.zeros(n,int)-1}\n GSTRUC = {'data':[],'count':0,'ngauss':0,'x':np.zeros(n,int)-1,'y':np.zeros(n,int)-1}",
"def track_features(self):\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Compute a rough relative rotation which takes a vector \r\n # from the previous frame to the current frame.\r\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\r\n\r\n # Organize the features in the previous image.\r\n prev_ids = []\r\n prev_lifetime = []\r\n prev_cam0_points = []\r\n prev_cam1_points = []\r\n\r\n for feature in chain.from_iterable(self.prev_features):\r\n prev_ids.append(feature.id)\r\n prev_lifetime.append(feature.lifetime)\r\n prev_cam0_points.append(feature.cam0_point)\r\n prev_cam1_points.append(feature.cam1_point)\r\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\r\n\r\n # Number of the features before tracking.\r\n self.num_features['before_tracking'] = len(prev_cam0_points)\r\n\r\n # Abort tracking if there is no features in the previous frame.\r\n if len(prev_cam0_points) == 0:\r\n return\r\n\r\n # Track features using LK optical flow method.\r\n curr_cam0_points = self.predict_feature_tracking(\r\n prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)\r\n\r\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.prev_cam0_pyramid, self.curr_cam0_pyramid,\r\n prev_cam0_points.astype(np.float32), \r\n curr_cam0_points.astype(np.float32), \r\n **self.config.lk_params)\r\n \r\n # Mark those tracked points out of the image region as untracked.\r\n for i, point in enumerate(curr_cam0_points):\r\n if not track_inliers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n track_inliers[i] = 0\r\n\r\n # Collect the tracked points.\r\n prev_tracked_ids = select(prev_ids, track_inliers)\r\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\r\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\r\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\r\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\r\n\r\n # Number of features left after tracking.\r\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\r\n\r\n # Outlier removal involves three steps, which forms a close\r\n # loop between the previous and current frames of cam0 (left)\r\n # and cam1 (right). Assuming the stereo matching between the\r\n # previous cam0 and cam1 images are correct, the three steps are:\r\n #\r\n # prev frames cam0 ----------> cam1\r\n # | |\r\n # |ransac |ransac\r\n # | stereo match |\r\n # curr frames cam0 ----------> cam1\r\n #\r\n # 1) Stereo matching between current images of cam0 and cam1.\r\n # 2) RANSAC between previous and current images of cam0.\r\n # 3) RANSAC between previous and current images of cam1.\r\n #\r\n # For Step 3, tracking between the images is no longer needed.\r\n # The stereo matching results are directly used in the RANSAC.\r\n\r\n # Step 1: stereo matching.\r\n curr_cam1_points, match_inliers = self.stereo_match(\r\n curr_tracked_cam0_points)\r\n\r\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\r\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\r\n prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)\r\n prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)\r\n curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)\r\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\r\n\r\n # Number of features left after stereo matching.\r\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\r\n\r\n # Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.\r\n # cam0_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam0_points, curr_matched_cam0_points,\r\n # cam0_R_p_c, self.cam0_intrinsics, \r\n # self.cam0_distortion_model, self.cam0_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n\r\n # cam1_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam1_points, curr_matched_cam1_points,\r\n # cam1_R_p_c, self.cam1_intrinsics, \r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\r\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\r\n\r\n # Number of features after ransac.\r\n after_ransac = 0\r\n for i in range(len(cam0_ransac_inliers)):\r\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\r\n continue \r\n row = int(curr_matched_cam0_points[i][1] / grid_height)\r\n col = int(curr_matched_cam0_points[i][0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n\r\n grid_new_feature = FeatureMetaData()\r\n grid_new_feature.id = prev_matched_ids[i]\r\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\r\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\r\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\r\n prev_matched_lifetime[i] += 1\r\n\r\n self.curr_features[code].append(grid_new_feature)\r\n after_ransac += 1\r\n self.num_features['after_ransac'] = after_ransac\r\n\r\n # Compute the tracking rate.\r\n # prev_feature_num = sum([len(x) for x in self.prev_features])\r\n # curr_feature_num = sum([len(x) for x in self.curr_features])\r",
"def show_tracked_people(self):\r\n\r\n # Check existence of tracking results\r\n\r\n key_frames_path = os.path.join(\r\n self.track_path, c.FACE_RECOGNITION_KEY_FRAMES_DIR)\r\n\r\n if not(os.path.exists(key_frames_path)):\r\n\r\n os.makedirs(key_frames_path)\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n\r\n with open(self.track_file_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n\r\n return\r\n\r\n p_counter = 0\r\n\r\n for segment_dict in self.tracked_faces:\r\n\r\n frame_list = segment_dict[c.FRAMES_KEY]\r\n\r\n # Choose central frame in segment\r\n frames_nr = len(frame_list)\r\n\r\n if frames_nr >= 1:\r\n\r\n middle_idx = int(math.ceil(frames_nr/2.0) - 1)\r\n\r\n middle_frame_dict = frame_list[middle_idx]\r\n\r\n frame_name = middle_frame_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n frame_path = os.path.join(self.frames_path, frame_name)\r\n\r\n image = cv2.imread(frame_path, cv2.IMREAD_COLOR)\r\n\r\n # Add tracking window to image as red rectangle\r\n track_bbox = middle_frame_dict[c.TRACKING_BBOX_KEY]\r\n\r\n x0 = track_bbox[0]\r\n x1 = x0 + track_bbox[2]\r\n y0 = track_bbox[1]\r\n y1 = y0 + track_bbox[3]\r\n\r\n cv2.rectangle(\r\n image, (x0, y0), (x1, y1), (0, 0, 255), 3, 8, 0)\r\n\r\n # Save image\r\n fr_name = '%07d.png' % p_counter\r\n\r\n fr_path = os.path.join(key_frames_path, fr_name)\r\n\r\n cv2.imwrite(\r\n fr_path, image, [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n del image\r\n\r\n p_counter += 1",
"def __init__(self, ini_image, object_bound_rect = (0,0,0,0)):\r\n \r\n self.ini_image = ini_image\r\n \r\n # if the bounding rectangle of the object is not set, than chose it manually\r\n if (object_bound_rect == (0,0,0,0)):\r\n print('Select object to track on the image')\r\n object_bound_rect = cv2.selectROI(self.ini_image)\r\n \r\n self.tracker = cv2.legacy.TrackerCSRT_create()\r\n ok = self.tracker.init(self.ini_image, object_bound_rect)\r\n \r\n if ok:\r\n print('Tracker initiated')\r\n self.well_initialized = True\r\n else:\r\n print('Tracker not initiated')\r\n self.well_initialized = False\r\n cv2.destroyAllWindows()",
"def _setup(self):\n super(TrackingGridCMFDTestHarness, self)._create_geometry()\n super(TrackingGridCMFDTestHarness, self)._create_trackgenerator()\n\n # Initialize track objects\n self.tracks['Diagonal Track'] = openmoc.Track()\n self.tracks['Nudged Diagonal Track'] = openmoc.Track()\n self.tracks['Horizontal Track'] = openmoc.Track()\n self.tracks['Vertical Track'] = openmoc.Track()\n self.tracks['Reverse Diagonal Track'] = openmoc.Track()\n\n # Set track trajectories and locations\n self.tracks['Diagonal Track'].setValues(-3, -3, 3, 3, math.atan(1))\n nudge = 1e-5\n self.tracks['Nudged Diagonal Track'].setValues(-3+nudge, -3, 3,\\\n 3-nudge, math.atan(1))\n self.tracks['Horizontal Track'].setValues(-3, 0, 3, 0, 0)\n self.tracks['Vertical Track'].setValues(0, -3, 0, 3, math.pi/2)\n self.tracks['Reverse Diagonal Track'].setValues(3, 3, -3, -3,\\\n math.pi + math.atan(1))",
"def updateComplete(self):\n self.livesScreen()\n if self.getWave().getLives() == 0:\n self.deathScreen()\n else:\n self.winScreen()",
"def display_tracking_window(tracker, traceable_object_list, exitKey=\"q\"):\n\n while(True):\n tracker.track_objects(traceable_object_list)\n cv2.waitKey(1)\n\n if cv2.waitKey(1) & 0xFF == ord(exitKey):\n break\n\n cv2.destroyAllWindows()",
"def run(self):\n while not glfw.window_should_close(self.win):\n # clear draw buffer and depth buffer (<-TP2)\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n winsize = glfw.get_window_size(self.win)\n view = self.trackball.view_matrix()\n projection = self.trackball.projection_matrix(winsize)\n\n # draw our scene objects\n for drawable in self.drawables:\n drawable.draw(projection, view, identity(), win = self.win,\n color_shader=self.color_shader, normal_mapping = self.normal_mapping)\n\n # flush render commands, and swap draw buffers\n glfw.swap_buffers(self.win)\n\n # Poll for and process events\n glfw.poll_events()",
"def locate_tracker(self, debug):\n\n # tmp_image =\n # tmp_image = cv2.GaussianBlur(self.frame, (11, 11), 0) # Experiment with this\n\n hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Convert to HSV Color Space. This is temporary for testing using colored objects)\n\n mask = cv2.inRange(hsv, self.hueLower, self.hueUpper)\n\n try:\n mask = cv2.inRange(hsv, self.hueLower2, self.hueUpper2) + mask\n except AttributeError:\n pass\n\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n if debug:\n tmpMask = imutils.resize(mask, width=1000, height=1000)\n cv2.imshow(\"mask\", tmpMask)\n\n\n # find contours in the mask and initialize the current (x, y) center of the object\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n # if radius > 10:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n if debug:\n cv2.drawContours(self.frame, c, -1, (0, 255, 0), 20)\n return center, radius\n # update the points queue\n cv2.imshow(\"mask\", imutils.resize(mask, width=1000, height=1000))\n cv2.imshow(\"frame\", imutils.resize(self.frame, width=1000, height=1000))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n raise OpenCVError(\"Could not find tracker!\")\n\n # return (1, 1), 1",
"def start(self):\n self.start_pre()\n\n #scn = bpy.context.scene\n #bpy.ops.ed.undo_push() # push current state to undo\n\n #self.header_text_set(\"PointsPicker\")\n #self.cursor_modal_set(\"CROSSHAIR\")\n #self.manipulator_hide()\n #self.b_pts = list() # list of 'Point' objects (see /lib/point.py)\n self.b_pts = []\n \n \n self.points_shader = None\n self.points_batch = None \n default_keymap = {\n \"add\": {\"LEFTMOUSE\"},\n \"grab\": {\"LEFTMOUSE\"},\n \"remove\": {\"ALT+LEFTMOUSE\", \"RIGHTMOUSE\"},\n \"commit\": {\"RET\"},\n \"cancel\": {\"ESC\"},\n \"done\": {'ENTER', 'UP_ARROW'}\n }\n\n self.actions = ActionHandler(self.context, default_keymap)\n #self.reload_stylings()\n \n \n self.variable_1 = BoundFloat('''options['variable_1']''', min_value =0.5, max_value = 15.5)\n self.variable_2 = BoundInt('''self.variable_2_gs''', min_value = 0, max_value = 10)\n self.variable_3 = BoundBool('''options['variable_3']''')\n \n self.ui_setup()\n self.ui_setup_post()\n\n self.snap_type = \"SCENE\" #'SCENE' 'OBJECT'\n self.snap_ob = None #bpy.context.object\n \n self.started = False\n \n \n self.selected = -1\n self.hovered = [None, -1]\n\n self.grab_undo_loc = None\n self.grab_undo_no = None\n self.mouse = (None, None)\n\n self.xform = XForm(Matrix.Identity(4))\n\n self.d3_points_render = D3PointsRender(self, render_opts)\n \n self.start_post()\n self.update_ui()",
"def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)",
"def detect_points(self):\r\n\r\n\t\r\n\r\n\t\tfeature_mask = np.zeros_like(self.gray) ## Create a mask so we only look for template features in the ROI\r\n\t\t\r\n\t\tfeature_mask[max(0,self.bb[1]):min(360,self.bb[1] + self.bb[3]),max(0,self.bb[0]):min(640,self.bb[0] + self.bb[2])] = 255\r\n\r\n\t\t# search for good points\r\n\t\tfeatures = cv2.goodFeaturesToTrack(self.gray, mask = feature_mask, **feature_params)\r\n\t\t# refine the corner locations\r\n\t\tcv2.cornerSubPix(self.gray,features, **subpix_params)\r\n\r\n\t\tself.features = features\r\n\r\n\t\tself.tracks = [[p] for p in features.reshape((-1,2))]\r\n\r\n\t\tself.prev_gray = self.gray",
"def draw_tracking_object_bounding_box(seat, img):\n if seat.chair_tracker_status:\n draw_box_and_text(img, \"Tracking chair\", seat.chair_tracker_bb, CvColor.BLACK)\n for i, tracker in enumerate(seat.trackers):\n if seat.trackers_status[i]:\n draw_box_and_text(img, \"Tracking object {}\".format(i), seat.trackers_bb[i], CvColor.BLACK)",
"def update(self):\n\t\t\n\t\tframe = self.cam.get_frame()\n\t\tframe_time = time.time()\n\t\t\n\t\tif self.face_tracker is None:\n\t\t\t# No face known\n\t\t\tself.find_face(frame)\n\t\telse:\n\t\t\t# Track the face\n\t\t\tself.face_tracker.update(frame_time, frame)\n\t\t\tself.annotator.set_face(self.face_tracker.get_face())\n\t\t\tself.annotator.set_forehead(self.face_tracker.get_forehead())\n\t\t\t\n\t\t\t# Update the heart monitor\n\t\t\tself.heart_monitor.add_sample(frame_time, self.sample_frame(frame))\n\t\t\tself.annotator.set_busy(not self.heart_monitor.buf_full)\n\t\t\t\n\t\t\tif self.heart_monitor.ready:\n\t\t\t\tbpm, phase, fft_data = self.heart_monitor.get_bpm()\n\t\t\t\t\n\t\t\t\t# Draw the OSD\n\t\t\t\tif fft_data and self.show_fft:\n\t\t\t\t\tself.annotator.draw_fft(frame, fft_data,\n\t\t\t\t\t self.heart_monitor.min_bpm,\n\t\t\t\t\t self.heart_monitor.max_bpm)\n\t\t\t\t\n\t\t\t\tif self.show_face:\n\t\t\t\t\tself.annotator.draw_face(frame)\n\t\t\t\t\n\t\t\t\tif self.show_forehead:\n\t\t\t\t\tself.annotator.draw_forehead(frame)\n\t\t\t\t\n\t\t\t\tif self.show_bpm:\n\t\t\t\t\tself.annotator.draw_bpm(frame, bpm)\n\t\t\t\t\tself.annotator.draw_phase(frame, phase)\n\t\t\n\t\t# Display the (possibly annotated) frame\n\t\tcv.ShowImage(self.window, frame)\n\t\t\n\t\t# Handle keypresses\n\t\tkey = cv.WaitKey(10) & 255\n\t\tif key == 27: # Escape\n\t\t\t# Exit\n\t\t\treturn False\n\t\telif key == ord(\"r\"):\n\t\t\t# Reset the heart monitor and face tracker\n\t\t\tself.face_tracker = None\n\t\t\tself.heart_monitor.reset()\n\t\telif key == ord(\" \"):\n\t\t\t# Re-find the face\n\t\t\tself.face_tracker = None\n\t\telif key == ord(\"1\"):\n\t\t\tself.show_face = not self.show_face\n\t\telif key == ord(\"2\"):\n\t\t\tself.show_forehead = not self.show_forehead\n\t\telif key == ord(\"3\"):\n\t\t\tself.show_fft = not self.show_fft\n\t\telif key == ord(\"4\"):\n\t\t\tself.show_bpm = not self.show_bpm\n\t\t\n\t\treturn True",
"def firing(self) -> None:\n self.shooter.fire()\n # self.next_state(\"tracking\")\n self.state = self.tracking",
"def track(self, frame, init_pos, frame_ctr=1):\n distance_list = []\n self.tracker.init(frame, tuple(init_pos))\n init_center_pos = int((init_pos[3] / 2) + init_pos[1])\n\n if self.debug_mode:\n # Created window to display video\n cv2.namedWindow('Barbell_Tracker', cv2.WINDOW_NORMAL)\n\n while(self.video.isOpened()):\n \"\"\"\n Based off: https://pythonprogramming.net/haar-cascade-object-detection-python-opencv-tutorial/\n Using tracker instead of constant detection\n \"\"\"\n # Read a frame\n retval, frame = self.video.read()\n if not retval:\n break\n frame_ctr += 1\n\n # Update the tracker\n retval, current_pos = self.tracker.update(frame)\n if not retval:\n raise Exception('Could not update tracker')\n\n # Find center of current position\n current_center_pos = int((current_pos[3] / 2) + current_pos[1])\n\n # Log distance moved\n distance_moved = init_center_pos - current_center_pos\n distance_list.append(distance_moved)\n logging.info('Frame {}. Distance moved: {}'.format(frame_ctr, distance_moved))\n\n if self.debug_mode:\n # Draw inital position\n cv2.rectangle(frame, (int(init_pos[0]), int(init_pos[1])),\n (int(init_pos[0] + init_pos[2]), int(init_pos[1] + init_pos[3])),\n (0, 255, 0), 1)\n # Draw rectangle over tracker position\n cv2.rectangle(frame, (int(current_pos[0]), int(current_pos[1])),\n (int(current_pos[0] + current_pos[2]), int(current_pos[1] + current_pos[3])),\n (0, 0, 255), 2)\n # Display distance moved\n cv2.putText(frame, 'Distance moved: {}'.format(distance_moved),\n (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)\n # Show frame\n cv2.imshow('Barbell_Tracker', frame)\n # Exit if q pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Stop video when finished\n self.video.release()\n return distance_list",
"def on_update(self):\n \n # update physics engine\n \n \n # use code from pick up coins lab to pick up coins\n # you don't need all of the code from that lab(no gameover or reset)",
"def test_set_tracking(self):\n self.server_widget.tracking = True\n assert self.client_widget.tracking == self.server_widget.tracking",
"def startup(self):\n self.prev_gray = None\n self.frame_idx = 1\n self.tracks = []\n self.fps = []\n self.vid_info = None\n self.track_new_points_count = 0"
] | [
"0.5731918",
"0.57305074",
"0.56346434",
"0.5629391",
"0.55517864",
"0.5453429",
"0.5344364",
"0.5320971",
"0.529912",
"0.5293814",
"0.52543193",
"0.5253417",
"0.52365303",
"0.5204984",
"0.51992756",
"0.51680225",
"0.51443803",
"0.5124607",
"0.51197994",
"0.50978005",
"0.50972706",
"0.5082406",
"0.50666517",
"0.5063294",
"0.5060331",
"0.5029275",
"0.5020398",
"0.5016898",
"0.50119656",
"0.4997761"
] | 0.6090624 | 0 |
Save the current image to a FITS file. This should prompt the user to create a JSON file containing a FITS header if one isn't already in use. This FITS header will be reused during the current session. | def save_fits(self):
hdu = fits.PrimaryHDU()
hdu.data = self.original_image.astype('float32')
hdr = hdu.header
if not self.metadata_filename:
# Let the user choose a JSON file containing fits header.
self.metadata_filename = self.get_fits_metadata()
if self.metadata_filename == None:
# The user didn't select a file so they must create one.
self.metadata_filename = self.user_create_fits_header()
if self.metadata_filename:
# A metadata file exists so load it into a dict that will become the header.
with open(self.metadata_filename, 'r') as f:
d = json.load(f)
for k, v in d.items():
hdr[k] = v
if not self.save_directory:
self.save_directory = QtWidgets.QFileDialog.getExistingDirectory(
self,
"Select a directory",
options=QtWidgets.QFileDialog.DontUseNativeDialog
)
directory = os.path.join(self.save_directory, self.starting_time)
if not os.path.exists(directory):
os.makedirs(directory)
path = os.path.join(directory, f'{self._image_counter}.fits')
hdu.writeto(path)
self.statusBar().showMessage(f'Saved to {path}.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fitswrite(img, imgname, **kwargs):\n try:\n if kwargs.has_key('header'):\n hdu = pyfits.PrimaryHDU(img, header = kwargs['header'])\n else:\n hdu = pyfits.PrimaryHDU(img)\n hdu.writeto(imgname)\n except IOError:\n print \"FITSWRITE: Unable to write FITS image %s. Stopping.\" %imgname\n \n return",
"def save_as_fits(self, filename):",
"def create_jpg (filename):\n\n try:\n\n image_jpg = '{}.jpg'.format(filename.split('.fits')[0])\n\n if not isfile(image_jpg):\n\n log.info ('saving {} to {}'.format(filename, image_jpg))\n\n # read input image\n data, header = read_hdulist(filename, get_header=True)\n\n imgtype = header['IMAGETYP'].lower()\n file_str = image_jpg.split('/')[-1].split('.jpg')[0]\n if imgtype == 'object':\n title = ('file:{} object:{} filter:{} exptime:{:.1f}s '\n 'QC flag:{}'.format(file_str, header['OBJECT'],\n header['FILTER'], header['EXPTIME'],\n header['QC-FLAG']))\n else:\n title = ('file:{} imgtype:{} filter:{} QC flag:{}'\n .format(file_str, header['IMAGETYP'], header['FILTER'],\n header['QC-FLAG']))\n\n pixelcoords = True\n if pixelcoords:\n f = aplpy.FITSFigure(data)\n else:\n f = aplpy.FITSFigure(filename)\n\n vmin, vmax = zscale().get_limits(data)\n f.show_colorscale(cmap='gray', vmin=vmin, vmax=vmax)\n f.add_colorbar()\n f.set_title(title)\n #f.add_grid()\n #f.set_theme('pretty')\n f.save(image_jpg, adjust_bbox=False)\n f.close()\n\n\n except Exception as e:\n #log.exception (traceback.format_exc())\n log.exception ('exception was raised in creating jpg of image {}: {}'\n .format(filename,e))\n\n\n return image_jpg",
"def write_fits(self, name=None, output_path=None):\n pass",
"def saveFits(self, filename):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.')\n \n header = fits.Header()\n header['NAXIS1'] = self.naxis\n header['NAXIS2'] = self.naxis\n header['CTYPE1'] = 'RA---SIN'\n header['CTYPE2'] = 'DEC--SIN'\n header['CDELT1'] = - self.fov/(np.pi/180 * self.naxis)\n header['CDELT2'] = self.fov/(np.pi/180 * self.naxis)\n header['BUNIT'] = 'JY/PIXEL'\n \n hdu = fits.PrimaryHDU(self.res, header=header)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(filename, overwrite=True)\n \n print(\"Saved as '%s'.\" %(filename))",
"def to_fits(self, imagename, fitsname=None, script='to_fits', del_script=True, overwrite=False):\n input_image = imagename\n ct.exportfits(input_image, fitsname=fitsname, script=script, overwrite=overwrite)",
"def save_to_fits(self, fits_file_name, data='image', overwrite=True):\n if data == 'image':\n data_use = self.image\n elif data == 'mask':\n data_use = self.mask\n else:\n raise ValueError('Data can only be \"image\" or \"mask\".')\n img_hdu = fits.PrimaryHDU(data_use)\n\n if self.header is not None:\n img_hdu.header = self.header\n if self.wcs is not None:\n wcs_header = self.wcs.to_header()\n import fnmatch\n for i in wcs_header:\n if i in self.header:\n self.header[i] = wcs_header[i]\n if fnmatch.fnmatch(i, 'PC?_?'):\n self.header['CD' + i.lstrip(\"PC\")] = wcs_header[i]\n img_hdu.header = self.header\n elif self.wcs is not None:\n wcs_header = self.wcs.to_header()\n img_hdu.header = wcs_header\n else:\n img_hdu = fits.PrimaryHDU(data_use)\n\n if os.path.islink(fits_file_name):\n os.unlink(fits_file_name)\n\n img_hdu.writeto(fits_file_name, overwrite=overwrite)\n return img_hdu",
"def save(self, path, filename=None, overwrite=False):\n \n if filename is None and self.metadata is None:\n raise ValueError(\"If the image has no 'metadata', you must specify a filename\")\n elif filename is not None:\n pass\n elif filename is None and self.metadata is not None:\n filename = os.path.basename(self.metadata[\"pfilename\"])\n \n full_image_path = os.path.join(path, filename)\n \n if overwrite and os.path.exists(full_image_path):\n os.remove(full_image_path)\n \n self.fits.writeto(full_image_path)",
"def write_fits(self):\n \n import time\n import getpass\n \n formats = {}\n formats['bool'] = 'L'\n formats['int16'] = 'I'\n formats['int32'] = 'J'\n formats['int64'] = 'K'\n formats['float32'] = 'E'\n formats['float64'] = 'D'\n \n formats['>i8'] = 'K'\n formats['>f8'] = 'D'\n \n #### Make the table columns, translating numpy data types to \"TFORM\"\n coldefs = []\n dt = str(np.array(self.images).dtype)\n if 'S' in dt:\n TFORM = 'A'+dt.split('S')[1]\n elif 'U' in dt:\n TFORM = 'A'+dt.split('U')[1]\n \n print(TFORM)\n \n coldefs.append(pyfits.Column(name='images', array=np.array(self.images), format=TFORM))\n \n for column in self.params.keys():\n if column == 'comment':\n coldata = np.array(self.params['comment'])\n else:\n coldata = self.params[column]\n #\n dtype = str(coldata.dtype)\n #print column, dtype\n if dtype in formats.keys():\n TFORM=formats[dtype]\n else:\n if ('S' not in dtype) & ('U' not in dtype):\n print('Unrecognized data type in: %s' %(dtype))\n return False\n #\n if 'S' in dtype:\n TFORM = 'A'+dtype.split('S')[1]\n elif 'U' in dtype:\n TFORM = 'A'+dtype.split('U')[1]\n #\n #data = self.params[column]\n if '>' in dtype:\n cast_types = {'>i8':np.int64, '>f8':np.float64}\n coldata = np.cast[cast_types[dtype]](coldata)\n #\n coldefs.append(pyfits.Column(name=column, array=coldata, format=TFORM))\n \n #### Done, now make the binary table\n tbhdu = pyfits.BinTableHDU().from_columns(coldefs)\n \n linehdu = pyfits.ImageHDU(data=self.marked_reads, name='FLAGGED')\n \n #### Primary HDU\n hdu = pyfits.PrimaryHDU()\n thdulist = pyfits.HDUList([hdu, tbhdu, linehdu])\n\n #### Add modification time of \"infile\" to FITS header\n infile_mod_time = time.strftime(\"%m/%d/%Y %I:%M:%S %p\",\n time.localtime()) # os.path.getmtime(self.filename)))\n \n thdulist[0].header['MODTIME'] = infile_mod_time\n thdulist[0].header['USER'] = getpass.getuser()\n \n thdulist.writeto(self.logfile, clobber=True)\n \n print('Log to file %s' %(self.logfile))",
"def save_fits(self, name: str, hdu):\r\n hdu.writeto(self._path_for_fits(name), overwrite=True)",
"def export_fits(self, mask=None, **kwargs):\n \n ## Check key word arguments\n save_file = kwargs.pop('save_file', 'image.fits')\n fill_value = kwargs.pop('fill_value',0.)\n \n ## Check if mask provided matches data shape\n if self.is_valid_mask(mask):\n masked_data = np.ma.MasedArray()",
"def save_fits(df, fname):\n df = df.reset_index()\n outtable = Table.from_pandas(df)\n Path(fname).parent.mkdir(parents=True, exist_ok=True)\n outtable.write(fname, format='fits', overwrite=True)",
"def write(self, image):\n raise NotImplementedError()",
"def write_image(self, img, extname=None, extver=None,\n compress=None, tile_dims=None, header=None):\n\n self.create_image_hdu(img,\n header=header,\n extname=extname, extver=extver,\n compress=compress, tile_dims=tile_dims)\n\n if header is not None:\n self[-1].write_keys(header)\n self[-1]._update_info()\n\n # if img is not None:\n # self[-1].write(img)",
"def make_fake_image(header, output='direct.fits', background=None, exptime=1.e4, nexp=10):\n hdu = pyfits.HDUList()\n \n header['EXPTIME'] = exptime\n header['NEXP'] = nexp\n header['BUNIT'] = 'ELECTRONS/S'\n \n hdu.append(pyfits.PrimaryHDU(header=header))\n \n naxis = (header['NAXIS1'], header['NAXIS2'])\n \n for name, dtype in zip(['SCI', 'ERR', 'DQ'], \n [np.float32, np.float32, np.int32]):\n hdu.append(pyfits.ImageHDU(header=header, \n data=np.zeros(np.array(naxis).T, \n dtype=dtype), name=name))\n \n if background == None:\n background = header['BACKGR']\n \n header['BACKGR'] = background\n \n ### Simple error model of read noise and sky background\n var = nexp*header['READN'] + background*exptime\n \n ### electrons / s\n rms = np.sqrt(var)/exptime\n hdu['ERR'].data += rms\n hdu['SCI'].data = np.random.normal(size=np.array(naxis).T)*rms\n \n hdu.writeto(output, clobber=True, output_verify='fix')",
"def save_image(image, file_name):\n io.imsave(file_name,image)",
"def write_image(self, image_name, image):\n raise NotImplementedError",
"def save_image(img: Image, filename: str) -> None:\r\n img.save(filename)",
"def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)",
"def save_fits(data, fname):\n\tcols = fits.ColDefs(np.copy(data)) # This is somehow necessary.\n\ttbhdu = fits.BinTableHDU.from_columns(cols)\n\ttbhdu.writeto(fname, clobber=True)\n\t\n\treturn",
"def _save(self):\n\n out_dict = {}\n out_dict[\"version\"] = pyfx.__version__\n out_dict[\"name\"] = self._name\n out_dict[\"src\"] = self._src\n\n # Write out the background file as an image\n bg_file = os.path.join(self._name,\"master_bg_image.png\")\n pyfx.util.to_file(self._bg_frame,bg_file)\n out_dict[\"bg_frame\"] = bg_file\n\n f = open(os.path.join(self._name,\"pyfx.json\"),\"w\")\n json.dump(out_dict,f)\n f.close()",
"def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)",
"def tofits(outfilename, pixelarray, hdr=None, verbose=True):\n # print \"LOGX:: Entering `tofits` method/function in %(__file__)s\" %\n # globals()\n pixelarrayshape = pixelarray.shape\n if verbose:\n print(\"FITS export shape : (%i, %i)\" % (pixelarrayshape[0], pixelarrayshape[1]))\n\n if pixelarray.dtype.name == \"bool\":\n pixelarray = np.cast[\"uint8\"](pixelarray)\n\n if os.path.isfile(outfilename):\n os.remove(outfilename)\n\n if hdr == None: # then a minimal header will be created\n hdu = pyfits.PrimaryHDU(pixelarray.transpose())\n else: # this if else is probably not needed but anyway ...\n hdu = pyfits.PrimaryHDU(pixelarray.transpose(), hdr)\n\n hdu.writeto(outfilename, output_verify='ignore')\n\n if verbose:\n print(\"Wrote %s\" % outfilename)",
"def save_image(self, filename):\n raster.save_image(filename, self.image, self.metadata)",
"def writeFitsImage( rs, cpuIndex, grid, projection):\n\n# print(\"Image: \", imageData)\n \n imageData = grid.image\n size = imageData.shape\n imageCopy = copy.deepcopy( imageData)\n nx = size[1]\n ny = size[0]\n\n # now flip the Y axis of the image to match the FITS Convention\n iy = ny - 1\n for iii in range(ny):\n imageCopy[iii][:] = imageData[iy][:]\n iy = iy - 1\n\n pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)\n\n # Create a new WCS object. The number of axes must be set\n # from the start\n w = wcs.WCS(naxis=2)\n\n gridtype = grid.gridtype.upper()\n print(\"Grid Type: %s %d\" % (gridtype, gridtype.find('RA')))\n# gridtype = \"RA\"\n if gridtype.find('RA') > -1:\n maptype = 'RA'\n XTYPE = 'RA--'\n YTYPE = 'DEC-'\n else:\n maptype = 'GAL'\n XTYPE = 'GLON'\n YTYPE = 'GLAT'\n xstart = 360.\n ystart = 90.\n\n# select the projection here:\n# projection = \"-CYP\"\n# projection = \"-CAR\"\n\n crval1 = grid.crval1\n crval2 = grid.crval2\n crpix1 = grid.crpix1\n crpix2 = grid.crpix2\n cdelt1 = grid.cdelt1\n cdelt2 = grid.cdelt2\n print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))\n\n hdu = fits.PrimaryHDU()\n header = hdu.header\n\n dateobs = \"%s\" % (rs.utc)\n dateobs = dateobs.replace(\" \",\"T\")\n mydate = datetime.datetime.now()\n mydate = \"%s\" % (mydate)\n mydate = mydate[2:10]\n mydate.replace('-','/')\n\n header['NAXIS1'] = int(nx)\n header['NAXIS2'] = int(ny)\n header['BUNIT'] = 'K-km/s/BEAM'\n maptype = \"RA\"\n if maptype[0:2] == \"RA\":\n maptype = \"RA\"\n header['CTYPE1'] = 'RA---CAR'\n else:\n maptype = \"GAL\"\n header['CTYPE1'] = 'GLON-CAR'\n\n # create a cartesian x centered iamge \n header['CRPIX1'] = nx/2.\n header['CRVAL1'] = 180.\n grid.crval1 = header['CRVAL1']\n header['CDELT1'] = cdelt1\n header['CUNIT1'] = 'deg'\n header['CRVAL2'] = (grid.ymax+grid.ymin)/2.\n grid.crval2 = header['CRVAL2']\n header['CRPIX2'] = ny/2.\n header['CDELT2'] = cdelt2\n header['CUNIT2'] = 'deg'\n\n grid.gridtype = maptype\n if maptype[0:2] == \"RA\":\n print(\"RA: writeFits: %s\" % (maptype))\n header['CTYPE2'] = 'DEC--CAR'\n else:\n print(\"GAL: writeFits: %s\" % (maptype))\n header['CTYPE2'] = 'GLAT-CAR'\n\n header['WCAXES'] = 2\n header['RADESYS'] ='FK5'\n\n# temporarily replace ref coordinate iwth zero\n crval2 = header['CRVAL2']\n crpix2 = header['CRPIX2']\n# redefine the reference for the best cartisian format \n referencevalue = 0.\n dpix = (referencevalue - crval2)/cdelt2\n crpix2 = crpix2 + dpix\n# change x axis\n header['CRVAL2'] = referencevalue\n header['CRPIX2'] = crpix2\n\n header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates\n header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm\n header['BMIN'] = 18.1 # Beam minor axis in degrees\n header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees\n header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz\n header['RESTWAV'] = 0.211061140551 # Line wavelength (m)\n header['DATE-OBS'] = dateobs\n header['DATE'] = mydate\n header['OBSERVER'] = 'Science Aficionado'\n header['OBJECT'] = 'Milky Way'\n header['TELESCOP'] = 'Aficionado Horn'\n header['HISTORY'] = \"GridSave.py -- Glen Langston -- 20 May 13\"\n header['HISTORY'] = \"Observations in March + April 2020\"\n\n# while len(header) < (36 * 4 - 1):\n# header.append() # Adds a blank card to the end\n# header.delval(\"EXTEND\")\n header.update()\n\n# hdu = fits.PrimaryHDU(header=header, data=imageData)\n hdu = fits.PrimaryHDU(header=header, data=imageCopy)\n\n # As file at filePath is deleted now, so we should check if file exists or not not before deleting them\n outname = (\"Aficionado_T%d\" % (cpuIndex)) + \"-\" + maptype + projection + \".fit\"\n if os.path.exists(outname):\n os.remove(outname)\n hdu.writeto(outname)\n\n# create a second file with new projection\n fixImageCoordinates( outname, projection)\n\n return",
"def save_to_fits(self, filename, comment=None, overwrite = False):\n\n\n hdu = fits.PrimaryHDU(self.flux)\n hdu.header = self.header\n\n # Update header information\n crval = self.dispersion[0]\n cd = self.dispersion[1]-self.dispersion[0]\n crpix = 1\n\n hdu.header['CRVAL1'] = crval\n hdu.header['CD1_1'] = cd\n hdu.header['CDELT1'] = cd\n hdu.header['CRPIX1'] = crpix\n\n hdu.header['HISTORY'] = '1D spectrum generated with SpecOneD'\n\n if comment:\n hdu.header['HISTORY'] = comment\n\n hdul = fits.HDUList([hdu])\n\n try:\n hdul.writeto(filename, overwrite = overwrite)\n except:\n raise ValueError(\"Spectrum could not be saved. Maybe a file with the same name already exists and overwrite is False\")",
"def write_to_file(data, filename):\n fimg = fits.HDUList()\n fimghdu = fits.PrimaryHDU()\n fimghdu.data = data\n fimg.append(fimghdu)\n fimg.writeto(filename, overwrite=True)\n print(' wrote output data to: ', filename)",
"def register_image_file(self, image):\n save_path = os.path.join(self.session_dir, 'image.jpg')\n image.save(save_path)\n self.image = np.array(Image.open(save_path))",
"def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)",
"def wfits(self, filename=None):\n with self.lock:\n dark = self.dark\n if not filename:\n if dark != 0:\n filename = self.getNextFilename(\"dark\")\n else:\n filename = self.getNextFilename(\"object\")\n with self.lock:\n if(self.data.size == 0):\n raise FliError(\"No image available\")\n hdu = pyfits.PrimaryHDU(self.data)\n hdr = hdu.header\n with self.lock:\n hdr.set('DATE', self.timestamp, 'exposure begin date')\n hdr.set('INSTRUME', self.devname, 'this instrument')\n hdr.set('SERIAL', self.devsn, 'serial number')\n hdr.set('EXPTIME', self.exptime, 'exposure time (ms)')\n hdr.set('VBIN', self.vbin, 'vertical binning')\n hdr.set('HBIN', self.hbin, 'horizontal binning')\n hdr.set('CCD-TEMP', self.temp, 'CCD temperature')\n if dark != 0:\n hdr.set('SHUTTER', 'CLOSE', 'shutter status')\n else:\n hdr.set('SHUTTER', 'OPEN', 'shutter status')\n hdr.set('CCDAREA', '[%d:%d,%d:%d]' % self.expArea, 'image area')\n hdu.writeto(filename, overwrite=True, checksum=True)\n with self.lock:\n self.filename = filename"
] | [
"0.7054182",
"0.6930849",
"0.63592213",
"0.6270393",
"0.61625844",
"0.6130224",
"0.6086769",
"0.59388566",
"0.5841737",
"0.58076614",
"0.5787606",
"0.5749618",
"0.5720291",
"0.57192296",
"0.5700703",
"0.56808454",
"0.5671213",
"0.5667887",
"0.565113",
"0.56492496",
"0.5648957",
"0.56371444",
"0.5634331",
"0.56111556",
"0.55781424",
"0.55690354",
"0.5545587",
"0.552696",
"0.5522178",
"0.5489123"
] | 0.77152437 | 0 |
Removes all results records such as wins, lossses and match id's from the database. | def deleteMatches():
db = connect()
c = db.cursor()
query = ("DELETE FROM results;")
c.execute(query)
db.commit()
db.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteMatches():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM results\")\n conn.commit()\n conn.close()",
"def deleteMatches():\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes delete query to delete all records in MATCH table\n c.execute(\"DELETE FROM MATCH;\")\n # commits the changes perform on MATCH table after delete statement executes\n conn.commit()\n # closes the connection to tournament database\n conn.close()",
"def deleteMatches():\n conn = connect()\n c = conn.cursor()\n # Clears the \"matches\" table, but does not get rid of the table.\n c.execute(\"delete from matches;\")\n conn.commit()\n conn.close()",
"def deleteMatches():\n\n query = (\"DELETE FROM matches;\")\n results = executeQuery({'dbname': 'tournament', 'query' : query, 'type' : 'delete'})",
"def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)",
"def clear_results(self):\n for res in self.results:\n self.results[res] = None",
"def deleteMatches():\n DB = dbc()\n DB.cursor().execute('DELETE FROM matches')\n DB.commit()\n DB.close()",
"def deleteMatches():\n conn, cur = connect()\n cur.execute(\"DELETE FROM MATCHES;\")\n conn.commit()\n conn.close()",
"def deleteMatches():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM matches\")\n DB.commit()\n DB.close()",
"def delete_matches():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM matches\")\n DB.commit()\n DB.close()",
"def deleteMatches():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n dbcursor.execute(\"DELETE FROM matches\")\n dbconnection.commit()\n dbconnection.close()",
"def deleteMatches():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"delete from matches;\")\n db_conn.commit()\n db_conn.close()",
"def deleteMatches():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"DELETE FROM match\")\n dbConn.commit()\n dbConn.close()",
"def deleteMatches():\n db, cursor = connect()\n cursor.execute(\"DELETE FROM matches\")\n db.commit()\n db.close()",
"def deleteMatches():\n\n conn, c = main.connect()\n\n c.execute(\"TRUNCATE tournamentMatch\")\n\n conn.commit()\n conn.close()",
"def deleteMatches():\n conn, c = connect()\n c.execute(\"DELETE FROM matches;\")\n conn.commit()\n conn.close()",
"def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())",
"def deleteMatches():\n db = connect()\n db_cursor = db.cursor()\n query = \"DELETE FROM matches\"\n db_cursor.execute(query)\n db.commit()\n db.close()",
"def deleteMatches():\n c.execute(\"DELETE FROM matchup\");\n print \"All matches have been successfully deleted\"\n return",
"def data_cleaning():\n conn = get_connect()\n conn.execute(\"DELETE FROM championMatchData WHERE kills < 2 AND deaths < 2 AND assists < 2\")\n conn.commit()\n conn.close()\n return",
"def deleteMatches():\n conn, c = connect()\n\n q = \"DELETE FROM MATCHES;\"\n c.execute(q)\n c.close()\n conn.commit()\n conn.close()",
"def clear(self):\n self.results.clear()",
"def deleteMatches():\n cursor.execute(\"\"\"delete from matches\"\"\")",
"def clear_old_results(self):\n logger.info('Clearing old job results')\n self.db.alter(clear_iso_image_sql, self.job_id)\n self.db.alter(clear_iso_image_metrics_sql, self.job_id)",
"def erase_scores(self):\n self.database.erase_scores(self.difficulty)",
"def deleteMatches():\n cur3 = conn.cursor()\n query = \"DELETE from MATCHES;\"\n cur3.execute(query)\n cur3.execute(\"commit;\")\n print \"\\t\\t\\tMatches Table DELETED\\n\"",
"def deleteMatches():\n executeNonQuery(\"truncate table matches;\")",
"def clear_result(self):\n self.results = []",
"def deleteMatches():\n conn = psycopg2.connect(\"dbname=tournament\")\n c = conn.cursor()\n c.execute(\"TRUNCATE match\")\n conn.commit()\n conn.close()",
"def remove_all_recs(self):\n return self.storage.clear()"
] | [
"0.7346613",
"0.69081885",
"0.68491256",
"0.6731901",
"0.6683167",
"0.6651814",
"0.66495967",
"0.6608778",
"0.6582605",
"0.6582482",
"0.6555129",
"0.6554785",
"0.6542801",
"0.65361875",
"0.6525654",
"0.6517774",
"0.6511976",
"0.6508967",
"0.6475777",
"0.64202684",
"0.63511294",
"0.63454634",
"0.63395196",
"0.63390654",
"0.6311498",
"0.63084173",
"0.6293246",
"0.62830585",
"0.62785447",
"0.6277718"
] | 0.7342317 | 1 |
Return the address of config file | def config_file_address() -> str:
config_files = json_files_from_folder("config")
config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder
return config_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME",
"def config_file(self):\n return self[CONFIG_FILE_KEY]",
"def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")",
"def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None",
"def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')",
"def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath",
"def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path",
"def get_instance_config_path():\n return join(settings.PROJECT_DIR, \"conf\", \"eoxserver.conf\")",
"def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')",
"def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()",
"def get_config(_config_file):\n ''' script absolute location '''\n abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))\n\n if _config_file[0] not in ('/', '~'):\n if os.path.isfile(os.path.join(abs_path, _config_file)):\n config_path = os.path.join(abs_path, _config_file)\n else:\n raise IOError('Failed to find config file')\n else:\n if os.path.isfile(_config_file):\n config_path = _config_file\n else:\n raise IOError('Failed to find config file')\n\n with open(config_path) as cjson:\n config_data = json.load(cjson)\n # config must not be empty:\n if len(config_data) > 0:\n return config_data\n else:\n raise Exception('Failed to load config file')",
"def get_config(name):\n db = dbm.open(config_file, 'c')\n url = db[name]\n db.close()\n return url",
"def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH",
"def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")",
"def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;",
"def configPath(self):\n return os.path.dirname(__file__)",
"def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)",
"def config_url(config):\n if 'url' not in config:\n raise Exception('The config file does not contain \"url\"')\n return config['url']",
"def _github_config(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path",
"def config_locator():\n print(pkgrs.resource_filename('latools', 'latools.cfg'))\n return",
"def cfg_path(self):\n return self._cfg_path",
"def get_config_file():\n return deployr_config_repository.get_deployr_config_file()",
"def _app_config_file() -> str:\n if 'AISCALATOR_HOME' in os.environ:\n home = os.environ['AISCALATOR_HOME']\n file = os.path.join(home, \"config\", \"aiscalator.conf\")\n if os.path.exists(file):\n return file\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator',\n 'config', 'aiscalator.conf')",
"def get_config_file_for_auto_config(self) -> Optional[Text]:\n return self.config_file",
"def _get_config_path():\n return os.path.join(os.path.expanduser('~'))",
"def get_github_config_path(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path",
"def get_global_config_path():\n\n return \"/etc/dapsenv/dapsenv.conf\"",
"def get_http_config_file_path(node_uuid):\n return os.path.join(get_http_boot_dir(), node_uuid, 'config')",
"def get_configuration_file():\n path = os.path.abspath(os.curdir)\n while path != os.sep:\n config_path = os.path.join(path, CONFIG_FILE_NAME)\n if os.path.exists(config_path):\n return config_path\n path = os.path.dirname(path)\n return None",
"def path_config_docker(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_EXT)"
] | [
"0.7951816",
"0.7826119",
"0.76803935",
"0.74781823",
"0.738902",
"0.733537",
"0.72772247",
"0.72337604",
"0.71990085",
"0.7156913",
"0.7138757",
"0.70974725",
"0.70779324",
"0.7067025",
"0.7065846",
"0.70410687",
"0.70331275",
"0.7028916",
"0.7022305",
"0.6978829",
"0.69780165",
"0.6907443",
"0.6827179",
"0.6787578",
"0.6756672",
"0.67503846",
"0.6726445",
"0.67194766",
"0.6715841",
"0.6706304"
] | 0.821069 | 0 |
GradCAM method for visualizing input saliency. | def grad_cam(input_model, image, cls, layer_name):
y_c = input_model.output[0, cls]
conv_output = input_model.get_layer(layer_name).output
grads = K.gradients(y_c, conv_output)[0]
# Normalize if necessary
# grads = normalize(grads)
gradient_function = K.function([input_model.input], [conv_output, grads])
output, grads_val = gradient_function([image])
output, grads_val = output[0, :], grads_val[0, :, :, :]
weights = np.mean(grads_val, axis=(0, 1))
cam = np.dot(output, weights)
# Process CAM
cam = cv2.resize(cam, (H, W), cv2.INTER_LINEAR)
cam = np.maximum(cam, 0)
cam = cam / cam.max()
return cam | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grad_cam(input_model, image, clss, layer_name, H=180, W=180):\r\n y_c = input_model.output[0, clss]\r\n conv_output = input_model.get_layer(layer_name).output\r\n grads = K.gradients(y_c, conv_output)[0]\r\n\r\n gradient_function = K.function([input_model.input], [conv_output, grads])\r\n\r\n output, grads_val = gradient_function([image])\r\n output, grads_val = output[0, :], grads_val[0, :, :, :]\r\n\r\n weights = np.mean(grads_val, axis=(0, 1))\r\n cam = np.dot(output, weights)\r\n\r\n # Process CAM\r\n cam = cv2.resize(cam, (W, H), cv2.INTER_LINEAR)\r\n cam = np.maximum(cam, 0)\r\n cam = cam / cam.max()\r\n return cam",
"def grad_cam_batch(input_model, images, classes, layer_name):\n loss = tf.gather_nd(input_model.output, np.dstack([range(images.shape[0]), classes])[0])\n layer_output = input_model.get_layer(layer_name).output\n grads = K.gradients(loss, layer_output)[0]\n gradient_fn = K.function([input_model.input, K.learning_phase()], [layer_output, grads])\n\n conv_output, grads_val = gradient_fn([images, 0]) \n weights = np.mean(grads_val, axis=(1, 2))\n cams = np.einsum('ijkl,il->ijk', conv_output, weights)\n \n # Process CAMs\n new_cams = np.empty((images.shape[0], H, W))\n for i in range(new_cams.shape[0]):\n cam_i = cams[i] - cams[i].mean()\n cam_i = (cam_i + 1e-10) / (np.linalg.norm(cam_i, 2) + 1e-10)\n new_cams[i] = cv2.resize(cam_i, (H, W), cv2.INTER_LINEAR)\n new_cams[i] = np.maximum(new_cams[i], 0)\n new_cams[i] = new_cams[i] / new_cams[i].max()\n \n return new_cams",
"def demo2(image_paths, output_dir, cuda):\n\n device = get_device(cuda)\n\n # Synset words\n classes = get_classtable()\n\n # Model\n model = models.resnet152(pretrained=True)\n model.to(device)\n model.eval()\n\n # The four residual layers\n target_layers = [\"relu\", \"layer1\", \"layer2\", \"layer3\", \"layer4\"]\n target_class = 243 # \"bull mastif\"\n\n # Images\n images, raw_images = load_images(image_paths)\n images = torch.stack(images).to(device)\n\n gcam = GradCAM(model=model)\n probs, ids = gcam.forward(images)\n # ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)\n ids_ = torch.tensor([[target_class]] * len(images), dtype=torch.long).to(device)\n gcam.backward(ids=ids_)\n\n for target_layer in target_layers:\n print(\"Generating Grad-CAM @{}\".format(target_layer))\n\n # Grad-CAM\n regions = gcam.generate(target_layer=target_layer)\n\n for j in range(len(images)):\n print(\n \"\\t#{}: {} ({:.5f})\".format(\n j, classes[target_class], float(probs[ids == target_class])\n )\n )\n\n # save_gradcam(\n # filename=osp.join(\n # output_dir,\n # \"{}-{}-gradcam-{}-{}.png\".format(\n # j, \"resnet152\", target_layer, classes[target_class]\n # ),\n # ),\n # gcam=regions[j, 0],\n # raw_image=raw_images[j],\n # )",
"def compute_saliency(model, guided_model, img_path, layer_name=conv_name, cls=-1, visualize=False, save=True):\n #--------- slide image get --------------\n ori_img = Image.open(img_path)\n\n #define slide range\n slide_xl = 0\n slide_xr = 100\n slide_yu = 0\n slide_yd = 100\n name_cnt_int = 1\n\n for m in range(9):\n for i in range(9):\n slide_img = ori_img.crop((slide_xl,slide_yu,slide_xr,slide_yd))\n name_cnt_str = str(name_cnt_int)\n roop_str = str(m)\n slide_name = './slide_img/slide_img_' + roop_str + '_' + name_cnt_str + '.jpg'\n slide_img.save(slide_name)\n preprocessed_input = load_image(slide_name)\n\n pred = model.predict(preprocessed_input)[0]\n #print(pred)\n top_n = 3\n top_indices = pred.argsort()[-top_n:][::-1]\n result = [(classes[i], pred[i]) for i in top_indices]\n #print(\"number: \",name_cnt_str)\n print(\"number:\",roop_str,name_cnt_str)\n print(\"xrange: \",slide_xl,slide_xr)\n print(\"yrange: \",slide_yu,slide_yd)\n for x in result:\n print(x)\n\n if cls == -1:\n cls = np.argmax(pred)\n \n print(\"argmax:\",cls)\n if cls == 1:\n print(\"\\n\")\n print(\"-----Careful-----\")\n print(\"-----Doubt spotted-----\")\n print(\"\\n\")\n\n if cls == 2:\n print(\"\\n\")\n print(\"-----Warning!!!-----\")\n print(\"-----Bad spotted!!!!!-----\")\n print(\"\\n\")\n\n gradcam = grad_cam(model, preprocessed_input, cls, layer_name)\n gb = guided_backprop(guided_model, preprocessed_input, layer_name)\n guided_gradcam = gb * gradcam[..., np.newaxis]\n cls = -1\n\n if save:\n cam_name = './cam_image/' + roop_str + '_' + name_cnt_str + '.jpg'\n jetcam = cv2.applyColorMap(np.uint8(255 * gradcam), cv2.COLORMAP_JET)\n jetcam = (np.float32(jetcam) + load_image(slide_name, preprocess=False)) / 2\n cv2.imwrite(cam_name, np.uint8(jetcam))\n #cv2.imwrite('guided_backprop.jpg', deprocess_image(gb[0]))\n #cv2.imwrite('guided_gradcam.jpg', deprocess_image(guided_gradcam[0]))\n \n name_cnt_int = int(name_cnt_str)\n name_cnt_int += 1\n #x軸スライド幅\n slide_xl += 50\n slide_xr += 50\n \n \n if visualize:\n \n plt.figure(figsize=(15, 10))\n plt.subplot(131)\n plt.title('GradCAM')\n plt.axis('off')\n plt.imshow(load_image(img_path, preprocess=False))\n plt.imshow(gradcam, cmap='jet', alpha=0.5)\n\n plt.subplot(132)\n plt.title('Guided Backprop')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(gb[0]), -1))\n \n plt.subplot(133)\n plt.title('Guided GradCAM')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(guided_gradcam[0]), -1))\n plt.show()\n\n #右端までスライド完了、y軸方向へスライド\n name_cnt_int = 0\n slide_xl = 0\n slide_xr = 100\n slide_yu = slide_yu + 50\n slide_yd = slide_yd + 50\n \n \n\n return gradcam, gb, guided_gradcam",
"def create_cam_colored(dl: DatasetLoader, model, outname: str, im_width=256, n=8, s=256):\n\n heatmaps = []\n for i in range(0, dl.nb_classes):\n predict_input = (cv2.imread(dl.baseDirectory + \"/\" + dl.picker[i].directory + \"/\" +\n dl.picker[i].name, cv2.IMREAD_COLOR))\n base = Image.open(dl.baseDirectory + \"/\" + dl.picker[i].directory + \"/\" +\n dl.picker[i].name)\n predict_input = predict_input.astype('float32')\n predict_input = np.expand_dims(predict_input, axis=0)\n predict_input = preprocess_input(predict_input)\n\n output_generator = get_outputs_generator(model, 'CAM')\n layer_outputs = output_generator(predict_input)[0]\n\n inputs = model.input\n output_predict = model.get_layer('W').output\n fn_predict = K.function([inputs], [output_predict])\n prediction = fn_predict([predict_input])[0]\n value = np.argmax(prediction)\n\n w = model.get_layer(\"W\").get_weights()[0]\n heatmap = cv2.resize(layer_outputs[:, :, 0], (im_width, im_width), interpolation=cv2.INTER_CUBIC)\n heatmap *= w[0][value]\n for z in range(1, layer_outputs.shape[2]): # Iterate through the number of kernels\n img = cv2.resize(layer_outputs[:, :, z], (im_width, im_width), interpolation=cv2.INTER_CUBIC)\n heatmap += img * w[z][value]\n\n heatmap = cv2.applyColorMap(np.uint8(np.asarray(ImageOps.invert(toimage(heatmap)))), cv2.COLORMAP_JET)\n heatmap = cv2.putText(heatmap, str(dl.picker[i].img_class), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0),\n 2)\n heatmap = toimage(heatmap)\n heatmap = reduce_opacity(heatmap, 0.5)\n base.paste(heatmap, (0, 0), heatmap)\n heatmaps.append(base)\n\n result = Image.new(\"RGB\", (n * s, (len(heatmaps) // n + 1) * s))\n for index, img in enumerate(heatmaps):\n x = index % n * 256\n y = index // n * 256\n w, h = img.size\n print('pos {0},{1} size {2},{3}'.format(x, y, w, h))\n result.paste(img, (x, y, x + w, y + h))\n\n result.save(outname)",
"def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n\n white_level = float(props['android.sensor.info.whiteLevel'])\n black_levels = props['android.sensor.blackLevelPattern']\n idxs = its.image.get_canonical_cfa_order(props)\n black_levels = [black_levels[i] for i in idxs]\n\n # Expose for the scene with min sensitivity\n sens_min, sens_max = props['android.sensor.info.sensitivityRange']\n s_ae,e_ae,awb_gains,awb_ccm,_ = cam.do_3a(get_results=True)\n s_e_prod = s_ae * e_ae\n\n # Make the image brighter since the script looks at linear Bayer\n # raw patches rather than gamma-encoded YUV patches (and the AE\n # probably under-exposes a little for this use-case).\n s_e_prod *= 2\n\n # Capture raw frames across the full sensitivity range.\n NUM_SENS_STEPS = 15\n sens_step = int((sens_max - sens_min - 1) / float(NUM_SENS_STEPS))\n reqs = []\n sens = []\n for s in range(sens_min, sens_max, sens_step):\n e = int(s_e_prod / float(s))\n req = its.objects.manual_capture_request(s, e)\n req[\"android.colorCorrection.transform\"] = \\\n its.objects.float_to_rational(awb_ccm)\n req[\"android.colorCorrection.gains\"] = awb_gains\n reqs.append(req)\n sens.append(s)\n\n caps = cam.do_capture(reqs, cam.CAP_RAW)\n\n # A list of the (x,y) coords of the center pixel of a collection of\n # patches of a color checker chart. Each patch should be uniform,\n # however the actual color doesn't matter. Note that the coords are\n # relative to the *converted* RGB image, which is 1/2 x 1/2 of the\n # full size; convert back to full.\n img = its.image.convert_capture_to_rgb_image(caps[0], props=props)\n patches = its.image.get_color_checker_chart_patches(img, NAME+\"_debug\")\n patches = [(2*x,2*y) for (x,y) in sum(patches,[])]\n\n lines = []\n for (s,cap) in zip(sens,caps):\n # For each capture, compute the mean value in each patch, for each\n # Bayer plane; discard patches where pixels are close to clamped.\n # Also compute the variance.\n CLAMP_THRESH = 0.2\n planes = its.image.convert_capture_to_planes(cap, props)\n points = []\n for i,plane in enumerate(planes):\n plane = (plane * white_level - black_levels[i]) / (\n white_level - black_levels[i])\n for j,(x,y) in enumerate(patches):\n tile = plane[y/2-16:y/2+16:,x/2-16:x/2+16:,::]\n mean = its.image.compute_image_means(tile)[0]\n var = its.image.compute_image_variances(tile)[0]\n if (mean > CLAMP_THRESH and mean < 1.0-CLAMP_THRESH):\n # Each point is a (mean,variance) tuple for a patch;\n # for a given ISO, there should be a linear\n # relationship between these values.\n points.append((mean,var))\n\n # Fit a line to the points, with a line equation: y = mx + b.\n # This line is the relationship between mean and variance (i.e.)\n # between signal level and noise, for this particular sensor.\n # In the DNG noise model, the gradient (m) is \"S\", and the offset\n # (b) is \"O\".\n points.sort()\n xs = [x for (x,y) in points]\n ys = [y for (x,y) in points]\n m,b = numpy.polyfit(xs, ys, 1)\n lines.append((s,m,b))\n print s, \"->\", m, b\n\n # TODO: Clean up these checks (which currently fail in some cases).\n # Some sanity checks:\n # * Noise levels should increase with brightness.\n # * Extrapolating to a black image, the noise should be positive.\n # Basically, the \"b\" value should correspnd to the read noise,\n # which is the noise level if the sensor was operating in zero\n # light.\n #assert(m > 0)\n #assert(b >= 0)\n\n # Draw a plot.\n pylab.plot(xs, ys, 'r')\n pylab.plot([0,xs[-1]],[b,m*xs[-1]+b],'b')\n matplotlib.pyplot.savefig(\"%s_plot_mean_vs_variance.png\" % (NAME))\n\n # Now fit a line across the (m,b) line parameters for each sensitivity.\n # The gradient (m) params are fit to the \"S\" line, and the offset (b)\n # params are fit to the \"O\" line, both as a function of sensitivity.\n gains = [d[0] for d in lines]\n Ss = [d[1] for d in lines]\n Os = [d[2] for d in lines]\n mS,bS = numpy.polyfit(gains, Ss, 1)\n mO,bO = numpy.polyfit(gains, Os, 1)\n\n # Plot curve \"O\" as 10x, so it fits in the same scale as curve \"S\".\n pylab.plot(gains, [10*o for o in Os], 'r')\n pylab.plot([gains[0],gains[-1]],\n [10*mO*gains[0]+10*bO, 10*mO*gains[-1]+10*bO], 'b')\n pylab.plot(gains, Ss, 'r')\n pylab.plot([gains[0],gains[-1]], [mS*gains[0]+bS, mS*gains[-1]+bS], 'b')\n matplotlib.pyplot.savefig(\"%s_plot_S_O.png\" % (NAME))\n\n print \"\"\"\n /* Generated test code to dump a table of data for external validation\n * of the noise model parameters.\n */\n #include <stdio.h>\n #include <assert.h>\n double compute_noise_model_entry_S(int sens);\n double compute_noise_model_entry_O(int sens);\n int main(void) {\n int sens;\n for (sens = %d; sens <= %d; sens += 100) {\n double o = compute_noise_model_entry_O(sens);\n double s = compute_noise_model_entry_S(sens);\n printf(\"%%d,%%lf,%%lf\\\\n\", sens, o, s);\n }\n return 0;\n }\n\n /* Generated functions to map a given sensitivity to the O and S noise\n * model parameters in the DNG noise model.\n */\n double compute_noise_model_entry_S(int sens) {\n double s = %e * sens + %e;\n return s < 0.0 ? 0.0 : s;\n }\n double compute_noise_model_entry_O(int sens) {\n double o = %e * sens + %e;\n return o < 0.0 ? 0.0 : o;\n }\n \"\"\"%(sens_min,sens_max,mS,bS,mO,bO)",
"def __init__(self, calcGrad, calcCost, input, alpha = 0.01, gamma = 0.1):\n\tVisualizer.__init__(self, calcGrad, calcCost, input)\n\tself.alpha = alpha\n\tself.gamma = gamma",
"def forward(self, y):\n # landmarks are the weighted average y\n # activations across each channel\n y_projected = torch.zeros_like(y)\n y = self.softmax(y)\n\n # self.save_image(y[0, :, :, :])\n\n # print(y.size())\n # y_0 = y\n y = y.view(y.size(0), y.size(1), -1)\n # print(y[0, :].min(), y[0, :].max())\n mu_y = y * self.yv.reshape(1, 1, -1)\n mu_y = mu_y.sum(dim=-1) #/ y.sum(dim=-1)\n mu_x = y * self.xv.reshape(1, 1, -1)\n mu_x = mu_x.sum(dim=-1) #/ y.sum(dim=-1)\n means = torch.cat((mu_y.unsqueeze(-1), mu_x.unsqueeze(-1)), dim=-1)\n\n # project landmarsk to guassain fuction with fixed standard deviation\n # yv, xv = torch.meshgrid([torch.arange(0, y.size(-2)), torch.arange(0, y.size(-1))])\n # h_act = torch.zeros(y_projected.size(0), y_projected.size(1), device=y.device)\n for batch_id in range(y_projected.size(0)):\n for heatmap_id in range(y_projected.size(1)):\n gdist = MultivariateNormal(means[batch_id, heatmap_id, :], covariance_matrix=self.cov)\n logprobs = gdist.log_prob(torch.cat((self.yv.unsqueeze(0), self.xv.unsqueeze(0)), dim=0).t())\n y_projected[batch_id, heatmap_id, :, :] = torch.exp(logprobs).reshape(y_projected.size(-2), y_projected.size(-1))\n # y_projected[batch_id, heatmap_id, :, :] -= y_projected[batch_id, heatmap_id, :, :].min()\n # y_projected[batch_id, heatmap_id, :, :] /= y_projected[batch_id, heatmap_id, :, :].max()\n # coords = means[batch_id, heatmap_id, :].round().long()\n # h_act[batch_id, heatmap_id] = y_0[batch_id, heatmap_id, coords[0], coords[1]]\n # h_act[batch_id, heatmap_id]\n # max_coords = y_0[batch_id, heatmap_id, :, :].clone()\n # max_coords[max_coords < max_coords.max()] = 0\n # # print(max_coords.size())\n # if batch_id == 0 and heatmap_id == 0:\n # print(means[batch_id, heatmap_id, :], max_coords.nonzero().float().mean(0))\n # a = 'fail' if y.pow(2).mean() > h_act.mean() else 'pass'\n # print(h_act.mean(), y.mean())\n # print(y_projected.max(), y_projected.min())\n # y_out = y_projected.sum(dim=1)\n # self.save_image(y_out.unsqueeze(1), normalize=True)\n # self.save_image(y_out.unsqueeze(1), normalize=True, output='landmarks')\n return y_projected, self.prior_loss(means, y)",
"def calculate_gradcam_metrics(no_norm_gc_mask_numpy: torch.Tensor, segm: torch.Tensor):\n\n # initial segm size = [1, 3, 224, 224]\n maxpool = nn.MaxPool3d(kernel_size=(3, 1, 1))\n true_mask = maxpool(segm)\n true_mask_invert = 1 - true_mask\n\n true_mask_invert = true_mask_invert.detach().clone().cpu()\n true_mask = true_mask.detach().clone().cpu()\n gradcam_mask = no_norm_gc_mask_numpy.detach().clone().cpu()\n\n gc_miss_rel_sum = 0.\n gc_direct_rel_sum = 0.\n gc_miss_sum = 0.\n gc_direct_sum = 0.\n # iterate over batch to calculate metrics on each image of the batch\n assert gradcam_mask.size() == true_mask.size() == true_mask_invert.size()\n for i in range(gradcam_mask.size(0)):\n cur_gc = gradcam_mask[i]\n cur_mask = true_mask[i]\n cur_mask_inv = true_mask_invert[i]\n\n gc_miss_rel_sum += safe_division(torch.sum(cur_gc * cur_mask_inv), torch.sum(cur_gc))\n gc_direct_rel_sum += safe_division(torch.sum(cur_gc * cur_mask), torch.sum(cur_gc))\n gc_miss_sum += safe_division(torch.sum(cur_gc * cur_mask_inv), torch.sum(cur_mask_inv))\n gc_direct_sum += safe_division(torch.sum(cur_gc * cur_mask), torch.sum(cur_mask))\n return gc_miss_rel_sum, gc_direct_rel_sum, gc_miss_sum, gc_direct_sum",
"def _visualize_cam_w_s(self, imgs, names=\"2DPositions\"):\n feed_dict = {}\n feed_dict[self.images_tf] = imgs\n conv6_val, output_val = self.sess.run([self.conv6, self.output],feed_dict=feed_dict)\n preds = output_val\n preds_order = preds.argsort( axis=1 )[:,::-1]\n best_preds = preds.argmax( axis=1 )\n\n\n if names == \"Predictions\":\n self.classmap = self.detector.get_classmap( self.labels_tf, self.conv6, list(imgs.shape[1:3]), eval_all=False )\n classmap_vals = self.sess.run(\n self.classmap,\n feed_dict={\n self.labels_tf: best_preds,\n self.conv6: conv6_val\n })\n \n classmap_vis = map(lambda x: ((x-x.min())/(x.max()-x.min())), classmap_vals)\n self.classmap_vis = classmap_vis # for Debug\n named_preds = map(self.to_named_pred, preds)\n\n if names == \"BoundingBoxes\":\n softmaxs = f_sotfmax(preds)\n self.classmap = self.detector.get_classmap( self.labels_tf, self.conv6, list(imgs.shape[1:3]), eval_all=True )\n classmap_vals = self.sess.run(\n self.classmap,\n feed_dict={\n self.labels_tf: best_preds*0-1,\n self.conv6: conv6_val\n })\n classmap_vis = classmap_vals\n ls,ts,rs,bs = self._get_bounding_boxes(imgs, classmap_vals, threshold_value=.8)\n named_preds = map(self.to_named_pred, softmaxs, ls, ts, rs, bs)\n\n\n return named_preds, np.array(classmap_vis)[0]",
"def plot_saliency(image, model):\n saliency = get_saliency(image, model)\n plt.ion()\n fig, (ax1, ax2) = plt.subplots(2)\n ax1.imshow(np.squeeze(saliency), cmap=\"viridis\")\n hide_ticks(ax1)\n ax2.imshow(np.squeeze(image), cmap=\"gray\")\n hide_ticks(ax2)\n plt.pause(0.01)\n plt.show()",
"def visualize_saliency_of_output(FLAGS, model, input_images=[]):\n if len(input_images) == 0:\n # use predefined images\n img_dir='/esat/opal/kkelchte/docker_home/pilot_data/visualization_images'\n input_images=sorted([img_dir+'/'+f for f in os.listdir(img_dir)])\n\n print(\"[tools.py]: extracting saliency maps of {0} in {1}\".format([os.path.basename(i) for i in input_images], os.path.dirname(input_images[0])))\n \n \n inputs = load_images(input_images, model.input_size[1:])\n\n print 'shape: ',inputs.shape\n\n if 'nfc' in FLAGS.network:\n inputs = np.concatenate([inputs]*FLAGS.n_frames, axis=-1)\n \n # extract deconvolution\n import tf_cnnvis\n\n # layers = ['c']\n # layers=['MobilenetV1_1/control/Conv2d_1c_1x1/Conv2D']\n # layers=['MobilenetV1_1/control/Conv2d_1c_1x1/Conv2D','MobilenetV1_1/AvgPool_1a/AvgPool']\n\n # layers = [str(i.name) for i in model.sess.graph.get_operations() if 'outputs' in i.name and not 'activations' in i.name and not 'gradients' in i.name]\n layers = [model.endpoints['eval']['outputs'].name[:-2]] #cut out :0 in the end to change name from tensor to operation name\n # layers = ['outputs']\n \n # results = tf_cnnvis.activation_visualization(sess_graph_path = model.sess, \n # value_feed_dict = {model.inputs : inputs}, \n # layers=layers)\n results = tf_cnnvis.deconv_visualization(sess_graph_path = model.sess, \n value_feed_dict = {model.inputs : inputs}, \n layers=layers)\n\n # Normalize deconvolution within 0:1 range\n num_rows=0\n clean_results={} \n # Loop over layers\n for k in results.keys():\n clean_results[k]=[]\n # Loop over channels\n for c in range(len(results[k])):\n num_rows+=1\n clean_results[k].append(np.zeros((results[k][c].shape[0:3])))\n # Loop over images\n for i in range(results[k][c].shape[0]):\n clean_results[k][c][i]=deprocess_image(results[k][c][i],one_channel=True)\n if num_rows > 6:\n print(\"[tools.py]: There are too many columns to create a proper image.\")\n return\n\n # create one combined image with each input image on each column\n fig, axes = plt.subplots(num_rows+1,min(len(input_images),5),figsize=(23, 4*(2*len(results.keys())+1)))\n # fig, axes = plt.subplots(num_columns+1,min(len(input_images),5),figsize=(23, 4*(2*len(results.keys())+1)))\n # add original images in first row\n for i in range(axes.shape[1]):\n axes[0, i].set_title(os.path.basename(input_images[i]).split('.')[0])\n axes[0, i].imshow(matplotlibprove(inputs[i]), cmap='inferno')\n axes[0, i].axis('off')\n \n # experts=np.asarray([[k]*(FLAGS.action_quantity if FLAGS.discrete else 1) for v in sorted(model.factor_offsets.values()) for k in model.factor_offsets.keys() if model.factor_offsets[k]==v]).flatten()\n\n # add deconvolutions over the columns\n row_index = 1\n for k in results.keys(): # go over layers\n for c in range(len(results[k])): # add each channel in 2 new column\n for i in range(axes.shape[1]): # fill row going over input images\n # axes[row_index, i].set_title(k.split('/')[1]+'/'+k.split('/')[2]+'_'+str(c))\n axes[row_index, i].set_title(k+'_'+str(c))\n # axes[row_index, i].set_title(experts[c])\n \n axes[row_index, i].imshow(np.concatenate((inputs[i],np.expand_dims(clean_results[k][c][i],axis=2)), axis=2))\n axes[row_index, i].axis('off')\n # row_index+=2\n row_index+=1\n # plt.show()\n plt.savefig(FLAGS.summary_dir+FLAGS.log_tag+'/saliency_maps.jpg',bbox_inches='tight')",
"def __init__(self, calcGrad, calcCost, input, alpha = 0.01):\n \n\tVisualizer.__init__(self, calcGrad, calcCost, input)\n\n\tself.alpha = alpha",
"def compute_cm_fm(illuminant, gains, ccm, cal):\n\n ###########################################################################\n # Standard matrices.\n\n # W is the matrix that maps sRGB to XYZ.\n # See: http://www.brucelindbloom.com/\n W = numpy.array([\n [ 0.4124564, 0.3575761, 0.1804375],\n [ 0.2126729, 0.7151522, 0.0721750],\n [ 0.0193339, 0.1191920, 0.9503041]])\n\n # HH is the chromatic adaptation matrix from D65 (since sRGB's ref white is\n # D65) to D50 (since CIE XYZ's ref white is D50).\n HH = numpy.array([\n [ 1.0478112, 0.0228866, -0.0501270],\n [ 0.0295424, 0.9904844, -0.0170491],\n [-0.0092345, 0.0150436, 0.7521316]])\n\n # H is a chromatic adaptation matrix from D65 (because sRGB's reference\n # white is D65) to the calibration illuminant (which is a standard matrix\n # depending on the illuminant). For a D65 illuminant, the matrix is the\n # identity. For the A illuminant, the matrix uses the linear Bradford\n # adaptation method to map from D65 to A.\n # See: http://www.brucelindbloom.com/\n H_D65 = numpy.array([\n [ 1.0, 0.0, 0.0],\n [ 0.0, 1.0, 0.0],\n [ 0.0, 0.0, 1.0]])\n H_A = numpy.array([\n [ 1.2164557, 0.1109905, -0.1549325],\n [ 0.1533326, 0.9152313, -0.0559953],\n [-0.0239469, 0.0358984, 0.3147529]])\n H = [H_A, H_D65][illuminant]\n\n ###########################################################################\n # Per-model matrices (that should be the same for all units of a particular\n # phone/camera. These are statics in the HAL camera properties.\n\n # G is formed by taking the r,g,b gains and putting them into a\n # diagonal matrix.\n G = numpy.array([[gains[0],0,0], [0,gains[1],0], [0,0,gains[3]]])\n\n # S is just the CCM.\n S = numpy.array([ccm[0:3], ccm[3:6], ccm[6:9]])\n\n ###########################################################################\n # Per-unit matrices.\n\n # The per-unit calibration matrix for the given illuminant.\n CC = numpy.array([cal[0:3],cal[3:6],cal[6:9]])\n\n ###########################################################################\n # Derived matrices. These should match up with DNG-related matrices\n # provided by the HAL.\n\n # The color matrix and forward matrix are computed as follows:\n # CM = inv(H * W * S * G * CC)\n # FM = HH * W * S\n CM = numpy.linalg.inv(\n numpy.dot(numpy.dot(numpy.dot(numpy.dot(H, W), S), G), CC))\n FM = numpy.dot(numpy.dot(HH, W), S)\n\n # The color matrix is normalized so that it maps the D50 (PCS) white\n # point to a maximum component value of 1.\n CM = CM / max(numpy.dot(CM, (0.9642957, 1.0, 0.8251046)))\n\n return CM, FM",
"def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")",
"def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')",
"def main():\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n its.caps.skip_unless(its.caps.raw16(props) and\n its.caps.manual_sensor(props) and\n its.caps.read_3a(props) and\n its.caps.per_frame_control(props) and\n not its.caps.mono_camera(props))\n debug = its.caps.debug_mode()\n\n # Expose for the scene with min sensitivity\n exp_min, exp_max = props[\"android.sensor.info.exposureTimeRange\"]\n sens_min, _ = props[\"android.sensor.info.sensitivityRange\"]\n # Digital gains might not be visible on RAW data\n sens_max = props[\"android.sensor.maxAnalogSensitivity\"]\n sens_step = (sens_max - sens_min) / NUM_ISO_STEPS\n white_level = float(props[\"android.sensor.info.whiteLevel\"])\n black_levels = [its.image.get_black_level(i,props) for i in range(4)]\n # Get the active array width and height.\n aax = props[\"android.sensor.info.activeArraySize\"][\"left\"]\n aay = props[\"android.sensor.info.activeArraySize\"][\"top\"]\n aaw = props[\"android.sensor.info.activeArraySize\"][\"right\"]-aax\n aah = props[\"android.sensor.info.activeArraySize\"][\"bottom\"]-aay\n raw_stat_fmt = {\"format\": \"rawStats\",\n \"gridWidth\": aaw/IMG_STATS_GRID,\n \"gridHeight\": aah/IMG_STATS_GRID}\n\n e_test = []\n mult = 1.0\n while exp_min*mult < exp_max:\n e_test.append(int(exp_min*mult))\n mult *= EXP_MULT\n if e_test[-1] < exp_max * INCREASING_THR:\n e_test.append(int(exp_max))\n e_test_ms = [e / 1000000.0 for e in e_test]\n\n for s in range(sens_min, sens_max, sens_step):\n means = []\n means.append(black_levels)\n reqs = [its.objects.manual_capture_request(s, e, 0) for e in e_test]\n # Capture raw in debug mode, rawStats otherwise\n caps = []\n for i in range(len(reqs) / SLICE_LEN):\n if debug:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], raw_stat_fmt)\n last_n = len(reqs) % SLICE_LEN\n if last_n == 1:\n if debug:\n caps += [cam.do_capture(reqs[-last_n:], cam.CAP_RAW)]\n else:\n caps += [cam.do_capture(reqs[-last_n:], raw_stat_fmt)]\n elif last_n > 0:\n if debug:\n caps += cam.do_capture(reqs[-last_n:], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[-last_n:], raw_stat_fmt)\n\n # Measure the mean of each channel.\n # Each shot should be brighter (except underexposed/overexposed scene)\n for i,cap in enumerate(caps):\n if debug:\n planes = its.image.convert_capture_to_planes(cap, props)\n tiles = [its.image.get_image_patch(p, 0.445, 0.445, 0.11, 0.11) for p in planes]\n mean = [m * white_level for tile in tiles\n for m in its.image.compute_image_means(tile)]\n img = its.image.convert_capture_to_rgb_image(cap, props=props)\n its.image.write_image(img, \"%s_s=%d_e=%05d.jpg\" % (NAME, s, e_test))\n else:\n mean_image, _ = its.image.unpack_rawstats_capture(cap)\n mean = mean_image[IMG_STATS_GRID/2, IMG_STATS_GRID/2]\n\n print \"ISO=%d, exposure time=%.3fms, mean=%s\" % (\n s, e_test[i] / 1000000.0, str(mean))\n means.append(mean)\n\n\n # means[0] is black level value\n r = [m[0] for m in means[1:]]\n gr = [m[1] for m in means[1:]]\n gb = [m[2] for m in means[1:]]\n b = [m[3] for m in means[1:]]\n\n pylab.plot(e_test_ms, r, \"r.-\")\n pylab.plot(e_test_ms, b, \"b.-\")\n pylab.plot(e_test_ms, gr, \"g.-\")\n pylab.plot(e_test_ms, gb, \"k.-\")\n pylab.xscale('log')\n pylab.yscale('log')\n pylab.title(\"%s ISO=%d\" % (NAME, s))\n pylab.xlabel(\"Exposure time (ms)\")\n pylab.ylabel(\"Center patch pixel mean\")\n matplotlib.pyplot.savefig(\"%s_s=%d.png\" % (NAME, s))\n pylab.clf()\n\n allow_under_saturated = True\n for i in xrange(1, len(means)):\n prev_mean = means[i-1]\n mean = means[i]\n\n if np.isclose(max(mean), white_level, rtol=SATURATION_TOL):\n print \"Saturated: white_level %f, max_mean %f\"% (white_level, max(mean))\n break;\n\n if allow_under_saturated and np.allclose(mean, black_levels, rtol=BLK_LVL_TOL):\n # All channel means are close to black level\n continue\n\n allow_under_saturated = False\n # Check pixel means are increasing (with small tolerance)\n channels = [\"Red\", \"Gr\", \"Gb\", \"Blue\"]\n for chan in range(4):\n err_msg = \"ISO=%d, %s, exptime %3fms mean: %.2f, %s mean: %.2f, TOL=%.f%%\" % (\n s, channels[chan],\n e_test_ms[i-1], mean[chan],\n \"black level\" if i == 1 else \"exptime %3fms\"%e_test_ms[i-2],\n prev_mean[chan],\n INCREASING_THR*100)\n assert mean[chan] > prev_mean[chan] * INCREASING_THR, err_msg",
"def _visualize_cam_s(self, imgs, summed_filters=True, names=\"2DPositions\"):\n feed_dict = {}\n feed_dict[self.images_tf] = imgs\n conv6_val, output_val = self.sess.run([self.conv6, self.output],feed_dict=feed_dict)\n preds = output_val\n\n # <preds> to confidence (to softmax)\n softmaxs = f_sotfmax(preds)\n \n # Computes the position of the max per axis [-1;1]\n summed_viz = self.summed_vis(conv6_val)\n if names == \"2DPositions\":\n max_pos = lambda arr: (arr.argmax(axis=1) / float(arr.shape[-2]) *2)-1\n xxs = max_pos( summed_viz.sum(axis=1) )\n yys = max_pos( summed_viz.sum(axis=2) )\n named_preds = map(self.to_named_pred, preds, xxs, yys)\n \n # Computes the bounding boxes coordinates of classes\n if names == \"BoundingBoxes\":\n ls,ts,rs,bs = self._get_bounding_boxes(imgs, summed_viz)\n named_preds = map(self.to_named_pred, softmaxs, ls, ts, rs, bs)\n \n if summed_filters == True:\n conv6_val = summed_viz \n \n return named_preds, conv6_val",
"def plot_img():\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())\n\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n sample = model.sample_z(data) \n plt.imshow(sample)",
"def __call__(cls, image):\n\n logger.info('Gain Correcting Image')\n\n saturate = 0.\n gains = []\n for amp in decaminfo.amps:\n sec = section2slice(image['DATASEC' + amp])\n gain = image['GAIN' + amp]\n gains.append(gain)\n image.data[sec] *= gain\n\n # Adjust the weight or variance image if present:\n if image.weight is not None:\n image.weight[sec] *= 1. / (gain * gain)\n if image.variance is not None:\n image.variance[sec] *= gain * gain\n\n # Adjust keywords\n image['GAIN' + amp] = image['GAIN' + amp] / gain\n image['SATURAT' + amp] = image['SATURAT' + amp] * gain\n saturate = max(saturate, image['SATURAT' + amp])\n # Scale the SKYVAR if it's already here\n kw = 'SKYVAR' + amp\n if kw in image.header.keys():\n image[kw] = image[kw] * gain * gain\n # The FLATMED will keep track of rescalings *after* gain:\n image['FLATMED' + amp] = 1.\n\n # The SATURATE keyword is assigned to maximum of the two amps.\n image['SATURATE'] = saturate\n\n # Some other keywords that we will adjust crudely with mean gain\n # if they are present:\n gain = np.mean(gains)\n for kw in ('SKYBRITE', 'SKYSIGMA'):\n if kw in image.header.keys():\n image[kw] = image[kw] * gain\n\n # One other keyword to adjust:\n image['BUNIT'] = 'electrons'\n\n logger.debug('Finished applying Gain Correction')\n ret_code = 0\n return ret_code",
"def gamma_slide(self):\r\n gamma_input = self.horizontal.get() # Get the user input of gamma\r\n img = self.master.images[-1] # Choose the displayed image to perform gamma correcting\r\n hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # Convert the BGR to HSV type\r\n transformed_dim = gamma.gamma_correct(hsv_image[:, :, 2], gamma_input) # Perform gamma correcting on the\r\n # 'V' channel\r\n hsv_image[:, :, 2] = transformed_dim # Set the 'V' channel of the original image as the gamma corrected one\r\n color_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR) # Reconvert back the image to BGR\r\n self.master.display_image.display_image(img=color_image) # Display the reconverted image on the screen\r\n self.master.images.append(color_image) # Append the transformed image to the stack\r",
"def cdna_transformation(self, prev_image, cdna_input, reuse_sc=None):\n batch_size = int(cdna_input.get_shape()[0])\n height = int(prev_image.get_shape()[1])\n width = int(prev_image.get_shape()[2])\n\n DNA_KERN_SIZE = self.conf['kern_size']\n num_masks = self.conf['num_masks']\n color_channels = int(prev_image.get_shape()[3])\n\n # Predict kernels using linear function of last hidden layer.\n cdna_kerns = slim.layers.fully_connected(\n cdna_input,\n DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks,\n scope='cdna_params',\n activation_fn=None,\n reuse = reuse_sc)\n\n # Reshape and normalize.\n cdna_kerns = tf.reshape(\n cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks])\n cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT\n norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keepdims=True)\n cdna_kerns /= norm_factor\n cdna_kerns_summary = cdna_kerns\n\n # Transpose and reshape.\n cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])\n cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks])\n prev_image = tf.transpose(prev_image, [3, 1, 2, 0])\n\n transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')\n\n # Transpose and reshape.\n transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks])\n transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])\n transformed = tf.unstack(value=transformed, axis=-1)\n\n return transformed, cdna_kerns_summary",
"def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()",
"def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)",
"def reconstruct(self, lam = 0.0001, display=False):\n \n self.history = {}\n \n x = utils.initImage(self.data, self.fov, self.scales[0], self.pulse_ft)\n \n if display:\n plt.ion()\n plt.show()\n self.__fig, self.__axs = plt.subplots(1, 2)\n plt.subplots_adjust(wspace=-0.1)\n self.__display(x, np.zeros_like(x), 'Initialization')\n \n self.history['init'] = x\n \n for scale in self.scales:\n\n x = utils.upscaleImage(x, self.fov, scale, self.pulse)\n \n gammas = (utils.ftVectors(self.data['bi_uvcoord1'], self.fov, \n scale, self.pulse_ft),\n utils.ftVectors(self.data['bi_uvcoord2'], self.fov, \n scale, self.pulse_ft),\n utils.ftVectors(self.data['bi_uvcoord3'], self.fov, \n scale, self.pulse_ft))\n \n for beta in self.betas:\n \n # (a) solve for Z while keeping x constant\n Z = utils.mostLikelyPatches(x, beta, self.data, \n self.patch_size, self.gmm)\n \n # (b) solve for x while keeping Z constant\n x = utils.taylorExpansion(x, Z, beta, self.data, gammas,\n self.patch_size, lam=lam)\n \n if display:\n self.__axs[0].clear()\n self.__axs[1].clear()\n self.__display(x, Z, 'Scale: ' + str(scale) + '\\n' +\\\n r'$\\beta$: ' + str(beta))\n self.history[scale] = x\n \n if display:\n plt.ioff()\n \n self.res = np.rot90(utils.upscaleImage(x, self.fov, \n self.naxis, self.pulse),2)\n return self.res",
"def analysis_dFF_map(self):\r\n\r\n \r\n\r\n print ('Starting dF/F analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # smoothwin = int(self.imageData.shape[1]/8.)\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n \r\n\r\n mpl.figure(99)\r\n\r\n mpl.imshow(avgimg, vmin=0, vmax=np.max(np.max(avgimg, axis=0), axis=0))\r\n\r\n # self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n # self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n imgdatasm = scipy.ndimage.filters.gaussian_filter(self.imageData,[0,2,2],order=0,output=None,mode='reflect',cval=0.0,truncate=4.0)\r\n # field correction: smooth the average image, subtract it from the imagedata, then add back the mean value\r\n avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n #self.imageData = (self.imageData-avgimgsm)+ self.meanimagevalue\r\n\r\n mpl.figure(98)\r\n mpl.imshow(avgimgsm,vmin=0, vmax=np.max(np.max(avgimgsm, axis=0), axis=0))\r\n mpl.figure(97)\r\n mpl.imshow(np.mean(imgdatasm,axis=0))\r\n self.n_times = self.timebase\r\n\r\n periodsize = int(self.period*self.framerate)\r\n print('periodsize: ',periodsize)\r\n\r\n # windowsize = int(self.freqperiod*self.framerate) # window size for every response\r\n\r\n # r = range(0, self.imageData.shape[0], windowsize)\r\n\r\n sig = np.reshape(imgdatasm, (self.nrepetitions, periodsize, \r\n\r\n self.imageData.shape[1], self.imageData.shape[2]), order='C')\r\n\r\n delresp=np.zeros([19,256,256])\r\n repback = np.mean(sig[:,1:4,:,:],axis=1)\r\n resp = np.mean(sig[:,5:9,:,:],axis=1)\r\n for counter in range(19):\r\n delresp[counter,:,:]=(resp[counter,:,:]-repback[counter,:,:])/repback[counter,:,:]\r\n quot=np.mean(delresp,axis=0)\r\n quot=-quot\r\n print ('shape of quot: ', np.shape(quot))\r\n # quot=(resp-repback)/repback\r\n # quot[quot>0]=0\r\n # quot=-1000*quot\r\n\r\n mpl.figure(7)\r\n mpl.imshow(quot,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n\r\n quotsm = scipy.ndimage.filters.gaussian_filter(quot, 3, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n mpl.figure(8)\r\n mpl.imshow(quotsm,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n \r\n # bl = np.mean(sig[:, range(0, sig.shape[1], windowsize), :, :], axis=0)\r\n\r\n # bl = scipy.ndimage.filters.gaussian_filter(bl, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n\r\n\r\n # print (' windowsize: ', windowsize)\r\n\r\n # print (' periodsize: ', periodsize)\r\n # mc = matplotlib.cm\r\n\r\n # only use sequential maps here\r\n\r\n # clist = [mc.Reds, mc.YlOrBr, mc.Oranges, mc.Greens, mc.GnBu, mc.Blues, mc.RdPu, mc.Purples,mc.Reds,mc.Greens,mc.Blues,mc.Reds,mc.Reds,mc.Reds,mc.Reds]\r\n # clist2 = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'black','red','purple','green','blue','red','red','red','red']\r\n\r\n cs = {}\r\n\r\n # sigd = np.zeros((bl.shape[0], sig.shape[2], sig.shape[3]))\r\n# \r\n # localmax = {}\r\n\r\n # sigmax = 0.\r\n# \r\n # kernel = np.ones((5, 5))\r\n\r\n # psf = kernel / np.sum(kernel)\r\n\r\n # compute dF/F, and get maximum over all frequencies\r\n\r\n print (' sig shape: ', sig.shape)\r\n\r\n # print (' bl shape: ', bl.shape)\r\n\r\n # smax = np.zeros(bl.shape[0])\r\n\r\n # for i in range(bl.shape[0]):\r\n\r\n # sigd[i] = (np.mean(np.max(sig[:,range(i*windowsize, i*windowsize+windowsize),:,:], axis=0), axis=0) - bl[i,:,:])/bl[i,:,:]\r\n\r\n # sigd[i] = sigd[i]**2.0\r\n\r\n # smooth\r\n\r\n #sigd[i] = scipy.ndimage.filters.gaussian_filter(sigd[i], 1., order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # deconvolve\r\n\r\n # sigd[i] = restoration.richardson_lucy(sigd[i], psf, 5)\r\n\r\n# sm = sigd[i].max().max()\r\n\r\n# if sm > sigmax:\r\n\r\n# sigmax = sm\r\n\r\n# smax[i] = sm\r\n\r\n# print( ' i, sm: ', i, sm)\r\n\r\n# # now process for display\r\n\r\n# print (' sigd shape: ', sigd.shape)\r\n\r\n# wdat = np.mean(sig, axis=0)\r\n\r\n# wds = wdat.shape\r\n\r\n# print('wdat shape: ', wds)\r\n\r\n# # print (range(int(wds[1]/2.), int(3.*wds[1]/4.)), range(int(wds[2]/2.), int(3.*wds[2]/4.)))\r\n\r\n# print( 'reduced shape: ', wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))].shape)\r\n\r\n# wp = wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))]\r\n\r\n# wp = np.mean(np.mean(wdat, axis=1), axis=1)\r\n\r\n# mpl.figure(1)\r\n\r\n# mpl.plot(np.linspace(0., len(wp)*1./self.framerate, num=len(wp)), wp)\r\n\r\n\r\n\r\n# mpl.figure(2)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# sigd[i][sigd[i] < self.threshold*sigmax] = 0.\r\n\r\n# # find center of mass of areas above threshold\r\n\r\n# # mass = sigd[i].copy()\r\n\r\n# # mass[sigd[i] > 0.] = 1.\r\n\r\n# # structuring_element = [[0,1,0],[1,1,1],[0,1,0]]\r\n\r\n# # segmentation, segments = scipy.ndimage.label(mass, structuring_element)\r\n\r\n# # coords = scipy.ndimage.center_of_mass(sigd[i], segmentation, range(1,segments+1))\r\n\r\n# # xcoords = np.array([x[1] for x in coords])\r\n\r\n# # ycoords = np.array([x[0] for x in coords])\r\n\r\n# # cs[i] = (xcoords, ycoords)\r\n\r\n\r\n\r\n# # Calculating local maxima\r\n\r\n# lm = skif.peak_local_max(sigd[i], min_distance=2, threshold_rel=0.25, exclude_border=False, \r\n\r\n# indices=True, num_peaks=10, footprint=None, labels=None)\r\n\r\n# localmax[i] = [(m[0], m[1], sigd[i][(m[0], m[1])]) for m in lm]\r\n\r\n# # print ('i, local max: ',i, localmax)\r\n\r\n# mpl.subplot(5,5,i+1)\r\n# print ('shape of sigd: ',[np.shape(sigd),i])\r\n\r\n# imga1 = mpl.imshow(sigd[i], cmap=clist[i], vmin=0, origin='lower')\r\n\r\n# if len(localmax[i]) > 0:\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# else:\r\n\r\n# continue\r\n\r\n# scattersize = 30.\r\n\r\n# for k, lm in enumerate(localmax[i]):\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], edgecolors='k',\r\n\r\n# s=scattersize*lm[2]/max_fr, linewidths=0.125, alpha=0.5)\r\n\r\n# mpl.subplot(6,5,i+15+1)\r\n\r\n# wr = range(i*windowsize, i*windowsize+windowsize)\r\n\r\n# # print (' wr: len, min max: ', len(wr), min(wr), max(wr))\r\n\r\n# wmax = 0.\r\n\r\n# for lmax in localmax[i]: # was xcoords\r\n\r\n# wave = wdat[wr, lmax[0],lmax[1]]\r\n\r\n# wdff = (wave-wave[0])/wave[0]\r\n\r\n# if np.max(wdff) > wmax:\r\n\r\n# wmax = np.max(wdff)\r\n\r\n# mpl.plot(np.linspace(0., len(wave)*1./self.framerate, num=len(wave)),\r\n\r\n# wdff, color=clist2[i])\r\n\r\n# mpl.ylim(-0.1*wmax, wmax)\r\n\r\n# fig = mpl.figure(3)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# if len(localmax[i]) == 0:\r\n\r\n# continue\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# for lm in localmax[i]:\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], \r\n\r\n# s=scattersize*lm[2]/max_fr, alpha=0.5, edgecolors='k')\r\n\r\n# mpl.ylim(0, sigd.shape[2])\r\n\r\n# mpl.xlim(0, sigd.shape[1])\r\n\r\n# mpl.axis('equal')\r\n\r\n mpl.show()\r\n\r\n print (' DF/F analysis finished.\\n')",
"def plot_state(smac, model, x_points, y_points, x_smac, y_smac, step=None):\n from smac.optimizer.acquisition import EI\n\n # cost all points for x\n step = step or len(x_smac)\n x_smac_ = np.array([[x] for x in x_smac[:step]])\n y_smac_ = np.array([[y] for y in y_smac[:step]])\n # as an alternative, we could extract the points from the runhistory again\n # but these points will be scaled to a unit-hypercube\n # X, Y = smac.solver.rh2EPM.transform(runhistory)\n\n model.train(x_smac_, y_smac_)\n\n acq_func = EI(model=model)\n acq_func.update(model=model, eta=np.min(y_smac))\n\n x_points_ = np.array([[x] for x in x_points])\n acq_values = acq_func._compute(X=x_points_)[:, 0]\n\n # plot acquisition function\n y_mean, y_var = model.predict(x_points_)\n y_mean = y_mean[:, 0]\n y_std = np.sqrt(y_var)[:, 0]\n\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, acq_values)\n plt.title(\"Aquisition Function\")\n\n plt.savefig('fig%da.pdf' % step)\n\n # plot uncertainties\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.plot(x_points, y_mean)\n ax1.fill_between(x_points, y_mean - y_std,\n y_mean + y_std, alpha=0.5)\n ax1.plot(x_smac[:step], y_smac[:step], 'bo')\n ax1.plot(x_smac[:step], y_smac[:step], 'ro')\n ax1.plot(x_points, y_points, '--')\n plt.title(\"Uncertainty Predictions\")\n\n plt.savefig('fig%db.pdf' % step)",
"def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2",
"def late_gradient_fusion():\n pass",
"def test_chroma_plot(self):\n plt.xlabel('chromaticity x')\n plt.ylabel('chromaticity y')\n plt.title(\"Standard Gamut\")\n plt.axis([-0.1, 0.8, -0.4, 0.65])\n plt.grid(True)\n mplh.plot_spectrum_locus_76()\n mplh.plot_colorspace_gamut(colorspaces.ACES, lines_color=\"c\",\n upvp_conversion=True)\n mplh.plot_colorspace_gamut(colorspaces.REC709, lines_color=\"m\",\n upvp_conversion=True)\n plt.legend(loc=4)\n if DISPLAY:\n plt.show()\n plt.clf()\n plt.close()"
] | [
"0.65379626",
"0.6204276",
"0.5884279",
"0.5698037",
"0.5402223",
"0.53441495",
"0.5318425",
"0.5276183",
"0.52716607",
"0.5201549",
"0.51886076",
"0.51762164",
"0.51748544",
"0.51095164",
"0.5086669",
"0.50808096",
"0.5079905",
"0.507406",
"0.5071252",
"0.50696766",
"0.5058943",
"0.5050591",
"0.503622",
"0.50351673",
"0.5028049",
"0.50063354",
"0.4986489",
"0.49536157",
"0.49374664",
"0.49347425"
] | 0.66255313 | 0 |
Takes list of codes in form 'ASD)GGH' and turns them into dictionary. key is first three digits. value is list of last three (mult. occurances)Returns dictionary. | def make_orbit_dict(orbit_codes):
orbit_dict = {}
for code in orbit_codes:
if code[0:3] in orbit_dict.keys():
orbit_dict[code[0:3]].append(code[4:])
else:
orbit_dict[code[0:3]] = [code[4:]]
return orbit_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dict(lst, rcs=None, pos=None):\n digits = lst[0].union(*lst)\n no_digits = dict()\n for dig in digits:\n where = []\n for s in enumerate(lst):\n if dig in s[1]:\n if rcs == \"S\":\n where.append(pos[s[0]])\n elif rcs == \"R\":\n where.append((pos, s[0]))\n elif rcs == \"C\":\n where.append((s[0], pos))\n else:\n where.append(s[0])\n no_digits[dig] = where\n return no_digits",
"def parseDuMap(output):\n #z00du00(DB-SL-MSL-CH-SCH) : 00-00-0-0-0 01-01-0-0-0 04-04-2-0-0 05-05-2-0-0\n # 02-02-1-1-0 03-03-1-1-0 02-02-1-0-0 03-03-1-0-0\n duMap = {}\n for l in output:\n \n l_a = l.split(\":\")\n #print l_a\n #sys.exit(1)\n du = l_a[0]\n # string of 00-00-0-0-0 01-01-0-0-0\n sbChs = l_a[1]\n \n #z00du00(DB-SL-MSL-CH-SCH)\n # get 0 and from z00du0 9\n partDu = getDuPart(du)\n \n sbChArr = getAllSlChSbCh(sbChs)\n \n duMap[partDu] = sbChArr\n \n \n return duMap",
"def from_trace_codes_text(codes_text: str) -> Mapping[int, str]:\n return {int(s[0], 16): s[1] for s in map(lambda l: l.split(), codes_text.splitlines())}",
"def _get_codes_helper(tree: HuffmanTree, code: str,\n symbol_dict: Any = None) -> Dict[int, str]:\n\n if tree.is_leaf():\n symbol_dict[tree.symbol] = code\n return symbol_dict\n\n else:\n if symbol_dict is None:\n symbol_dict = {}\n\n symbol_dict = _get_codes_helper(tree.left, code + \"0\", symbol_dict)\n symbol_dict = _get_codes_helper(tree.right, code + \"1\", symbol_dict)\n\n return symbol_dict",
"def get_number(word, i_type='S'):\n\n resultdict = {}\n if word is None:\n return resultdict\n\n word = str(word)\n regexStr = None\n if i_type == 'S':\n regexStr = re.search(r'^[0-9\\-]+', word)\n else:\n regexStr = re.search(r'[0-9\\-]+', word)\n\n if regexStr is not None:\n # pdb.set_trace()\n numList = []\n if '-' in word:\n numList = word.split('-')\n else:\n numList.append(word)\n\n for idx, numWord in enumerate(numList):\n if idx > 1:\n resultdict = {}\n break\n \"\"\"\n Let's get number and suffix for number1\n and number2\n \"\"\"\n # to get the number\n regexNum = re.search(r'[0-9]+', numWord)\n key = 'number_' + str(idx + 1)\n if regexNum is not None:\n try:\n resultdict[key] = int(regexNum.group().split(' ')[0])\n except:\n pass\n # resultdict[key] = regexNum.group().split(' ')[0]\n\n # to get suffix\n regexSuff = re.search(r'[a-zA-Z]+', numWord)\n key = key + '_suff'\n if regexSuff:\n # resultdict[key] = regexSuff.group().split(' ')[0]\n \"\"\"\n dont think we should have suffix more than 1\n character\n there are few cases but we are ignoring them...\n \"\"\"\n suff = regexSuff.group().split(' ')[0]\n if i_type == 'S':\n if len(suff) == 1:\n resultdict[key] = suff\n else:\n resultdict = {}\n else:\n if len(suff) < 3:\n resultdict[key] = suff\n\n return resultdict",
"def handle_data(data):\n items = data.split(',')\n items.sort()\n results = {}\n i = 1\n for item in items:\n it = item.split(' ')\n results[i] = (it[0], it[1], int(it[2]))\n print str(i) + \" \" + it[0]\n i += 1\n return results",
"def get_string_stech_dict(stech_string):\n stech_dict = {}\n try:\n stech_lst = stech_string.split(\",\") # Generates a stech list: [\"A:3\", \"B:2\", ...]\n for stech in stech_lst:\n chain, number = stech.split(\":\")\n stech_dict[chain] = int(number) # Chain id as key and number as value: { \"A\": 3, \"B\": 2, ...}\n return stech_dict\n except:\n sys.stderr.write(\"Stechometry string format is wrong, please follow this format: A:2,B:11,C:4, ...\")\n sys.exit(1)",
"def create_dictionaries(chars):\n return dict((c, i) for i, c in enumerate(chars)), dict((i, c) for i, c in enumerate(chars))",
"def getT9dict():\r\n T9dict = {}\r\n all_letters = string.lowercase\r\n T9dict.update(mapkeystoletter(2, all_letters[0:3]))\r\n T9dict.update(mapkeystoletter(3, all_letters[3:6]))\r\n T9dict.update(mapkeystoletter(4, all_letters[6:9]))\r\n T9dict.update(mapkeystoletter(5, all_letters[9:12]))\r\n T9dict.update(mapkeystoletter(6, all_letters[12:15]))\r\n T9dict.update(mapkeystoletter(7, all_letters[15:19]))\r\n T9dict.update(mapkeystoletter(8, all_letters[19:22]))\r\n T9dict.update(mapkeystoletter(9, all_letters[22:26]))\r\n T9dict[' '] = 0\r\n\r\n return T9dict",
"def DictFunction2():\r\n print \"Create Second Dictionary\"\r\n NumberDict = dict(zip((i for i in range(16)), (hex(i) for i in range(16))))\r\n print NumberDict",
"def initiate_codes(lottery_file):\n # load the lottery data\n lottery = {}\n with open(lottery_file) as lf:\n head = lf.readline()\n prev = None\n for line in lf:\n info = line.strip().split('|')\n issue = info[0]\n nums = map(int, info[1:])\n lottery[issue] = {\"numbers\": nums, \"previous\":prev, \"issue\": issue}\n prev = issue\n\n # get the missing info for 20150901001\n issues = sorted(lottery.keys())\n lot_miss_info = {}\n for issue in issues[100:]:\n lot_miss_info[issue] = {}\n # 0: ten thousand, 1: thousand, 2: hundred, 3: ten, 4: unit\n for i in range(5):\n lot_miss_info[issue][i] = {}\n for dig in range(10):\n lot_miss_info[issue][i][dig] = 0\n mis_count = 0\n # trace back and get the previous appearence\n cur = issue\n while True:\n lot = lottery[cur]\n if lot[\"numbers\"][i] == dig:\n break\n else:\n mis_count += 1\n cur = lot[\"previous\"]\n lot_miss_info[issue][i][dig] = mis_count\n\n # compute the codes information\n codes = {}\n for issue in issues[100:]:\n # currently we only consider unit(4) and ten(3) digit codes\n # we have defined 7 codes\n # luo_ma: 当前中奖数字\n # leng_1_ma: 当前期中最大间隔的数字\n # leng_2_ma: 当前期中第二大间隔的数字\n # sha_ma: 十位(落码-1), 个位(落码*3+3)\n # chuan_1: 落码-1\n # chuan_2: 落码+1\n # 隔码: 上一期的落码\n codes[issue] = {}\n for dig in range(3, 5):\n code = compute_code(issue, dig, lottery, lot_miss_info)\n codes[issue][dig] = code\n\n # compute the match information\n matched = {} # 只匹配落/杀/冷12码\n full_matched = {}# 匹配所有6码\n match_keys = [\"luo_ma\", \"leng_1_ma\", \"leng_2_ma\", \"sha_ma\"]\n \n full_match_keys = match_keys + [\"chuan_1\", \"chuan_2\", \"ge_ma\"]\n for issue in issues[101:]:\n prev_id = lottery[issue][\"previous\"]\n numbers = lottery[issue][\"numbers\"]\n prev_code = codes[prev_id]\n flag, full_flag = update_match(lottery[issue], prev_code)\n matched[issue] = flag\n full_matched[issue] = full_flag\n\n # compute the l4z1hbz\n l4z1hbz_seq = {}\n for issue in issues[108:]:\n l4z1hbz_seq[issue] = compute_l4z1hbz(issue, matched, lottery)\n\n return lottery, lot_miss_info, codes, matched, full_matched, l4z1hbz_seq",
"def get_dict_refine_insee_code(ls_valid_ic):\n dict_refine_ic = {x: (x, x) for x in ls_valid_ic}\n ls_valid_ic_corse = [x for x in ls_valid_ic if re.match('2[AB]', x)]\n for ic in ls_valid_ic_corse:\n dict_refine_ic[ic[:1] + u'0' + ic[2:]] = (ic, ic) # assumed unicity was checked\n dict_ic_ardts = dict(list(itertools.product(map(str,range(13201, 13217)), ['13055']))+\\\n list(itertools.product(map(str,range(69381, 69390)), ['69123']))+\\\n list(itertools.product(map(str,range(75101, 75121)), ['75056'])))\n dict_ic_ardts = {k : (v,k) for k,v in dict_ic_ardts.items()}\n dict_refine_ic.update(dict_ic_ardts)\n return dict_refine_ic",
"def gene_ID_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene ID\"]\n resD[keyI] = valueI\n\n return resD",
"def gene_symbol_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene Symbol\"]\n resD[keyI] = valueI\n\n return resD",
"def buildCoder(shift):\n out_dic = {}\n lo = string.ascii_lowercase\n up = string.ascii_uppercase\n for i in lo:\n out_dic[i] = lo[(lo.index(i) + shift) % len(lo)]\n for i in up:\n out_dic[i] = up[(up.index(i) + shift) % len(up)]\n return out_dic",
"def get_letter_to_code_mappings():\n return {\n \"a\": \"Alfa\", \"b\": \"Bravo\", \"c\": \"Charlie\", \"d\": \"Delta\", \"e\": \"Echo\",\n \"f\": \"Foxtrot\", \"g\": \"Golf\", \"h\": \"Hotel\", \"i\": \"India\", \"j\":\n \"Juliett\", \"k\": \"Kilo\", \"l\": \"Lima\", \"m\": \"Mike\", \"n\": \"November\", \"o\":\n \"Oscar\", \"p\": \"Papa\", \"q\": \"Quebec\", \"r\": \"Romeo\", \"s\": \"Sierra\", \"t\":\n \"Tango\", \"u\": \"Uniform\", \"v\": \"Victor\", \"w\": \"Whiskey\", \"x\": \"Xray\",\n \"y\": \"Yankee\", \"z\": \"Zulu\", \"0\": \"Zero\", \"1\": \"One\", \"2\": \"Two\", \"3\":\n \"Three\", \"4\": \"Four\", \"5\": \"Five\", \"6\": \"Six\", \"7\": \"Seven\", \"8\":\n \"Eight\", \"9\": \"Niner\", \"=\": \"Equals\", \"?\": \"Query\", \"/\": \"Slash\", \",\":\n \"Comma\", \".\": \"Stop\", \":\": \"Colon\", \"'\": \"Apostrophe\", \"-\": \"Dash\",\n \"(\": \"Open\", \")\": \"Close\", \"@\": \"At\",\n }",
"def database(words):\n\n d={}\n if len(words) < 3:\n return\n \n for i,word in enumerate(words):\n try:\n first,second,third = (words[i], words[i+1], words[i+2])\n except IndexError:\n break\n key = (first,second)\n if key not in d:\n d[key] = []\n d[key].append(third)\n \n return d",
"def markov_analysis(text_list, num_pre=2):\n dictionary = dict()\n for i in range(len(text_list) - num_pre):\n\n prefix = tuple(text_list[i: i+num_pre])\n suffix = text_list[i+num_pre]\n\n if dictionary.get(prefix, 0) != 0:\n dictionary[prefix].append(suffix)\n else:\n dictionary[prefix] = [suffix]\n\n return dictionary",
"def _raw_misc_to_dict(raw):\n ret = {}\n for elem in raw:\n key, _, val = elem.partition(',')\n key = key.lstrip(\"(\").strip()\n val = val[:-1].strip()\n ret[key] = val\n return ret",
"def get_numbers_dict(input_file):\n numbers_dict = {}\n with open(input_file) as f:\n reader = csv.reader(f)\n\n for name, number in reader:\n # check phone number is valid\n assert len(number) == 13\n assert number.startswith('+44')\n\n # add to dict\n numbers_dict[name] = number\n\n return numbers_dict",
"def causes(lst):\r\n causes_dict = {}\r\n causes_dict[\"Dead\"] = int(lst[1])\r\n causes_dict[\"Cancer\"] = int(lst[3])\r\n causes_dict[\"Heart disease\"] = int(lst[10])\r\n return causes_dict",
"def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])",
"def to_dict(a_list):\n return dict(zip(map(str, range(len(a_list))), a_list))",
"def init_results_dict(fr_codes):\r\n \r\n res_dict = {}\r\n for k,v in fr_codes.items():\r\n res_dict[v] = 0\r\n \r\n return res_dict",
"def pro_code_dict(code=False, inverse=False, return_all=False):\n\n pro_code_dict = {\"0500\": \"Date\",\n \"0501\": \"height [> 0: top, < 0: bottom of elem.] (cm)\",\n \"0502\": \"element density (kg m-3)\",\n \"0503\": \"element temperature (degC)\",\n \"0504\": \"element ID (1)\",\n \"0506\": \"liquid water content by volume (%)\",\n \"0508\": \"dendricity (1)\",\n \"0509\": \"sphericity (1)\",\n \"0510\": \"coordination number (1)\",\n \"0511\": \"bond size (mm)\",\n \"0512\": \"grain size (mm)\",\n \"0513\": \"grain type (Swiss Code F1F2F3)\",\n \"0514\": \"grain type, grain size (mm), and density (kg m-3) of SH at surface\",\n \"0515\": \"ice volume fraction (%)\",\n \"0516\": \"air volume fraction (%)\",\n \"0517\": \"stress in (kPa)\",\n \"0518\": \"viscosity (GPa s)\",\n \"0519\": \"soil volume fraction (%)\",\n \"0520\": \"temperature gradient (K m-1)\",\n \"0521\": \"thermal conductivity (W K-1 m-1)\",\n \"0522\": \"absorbed shortwave radiation (W m-2)\",\n \"0523\": \"viscous deformation rate (1.e-6 s-1)\",\n \"0531\": \"deformation rate stability index Sdef\",\n \"0532\": \"natural stability index Sn38\",\n \"0533\": \"stability index Sk38\",\n \"0534\": \"hand hardness either (N) or index steps (1)\",\n \"0535\": \"optical equivalent grain size (mm)\",\n \"0540\": \"bulk salinity (g/kg)\",\n \"0541\": \"brine salinity (g/kg)\",\n \"0601\": \"snow shear strength (kPa)\",\n \"0602\": \"grain size difference (mm)\",\n \"0603\": \"hardness difference (1)\",\n \"0604\": \"ssi\",\n \"0605\": \"inverse texture index ITI (Mg m-4)\",\n \"0606\": \"critical cut length (m)\", }\n\n if inverse:\n inverse = {value: key for key, value in pro_code_dict.items()}\n return(inverse[code])\n if code:\n return (pro_code_dict[code])\n if return_all:\n return (pro_code_dict)",
"def potcar_str2dict(potcar_list: Optional[str]) -> dict:\n if potcar_list is None:\n return {}\n elif isinstance(potcar_list, str):\n potcar_list = potcar_list.split()\\\n\n d = {}\n for p in potcar_list:\n element = p.split(\"_\")[0]\n d[element] = p\n return d",
"def _to_dict(self, data_list):\n data_dict = dict(pair.split('=') for pair in data_list)\n return data_dict",
"def parse(result):\n return {make_tuple(key[4:]): val for key, val in result.items()}",
"def getM(F):\n last_char = None\n idx = 0\n m = dict()\n for c in list(F):\n if last_char is None or c != last_char:\n m[c] = idx\n last_char = c\n idx += 1\n return m",
"def get_codes(tree: HuffmanTree) -> Dict[int, str]:\n # Edge Case\n if tree is None or (tree.symbol is None and tree.is_leaf()):\n return {}\n else:\n return _get_codes_helper(tree, \"\")"
] | [
"0.6259482",
"0.6007973",
"0.59951216",
"0.58947045",
"0.5803456",
"0.5800195",
"0.56818557",
"0.5679202",
"0.5667462",
"0.56436056",
"0.5632375",
"0.5613897",
"0.5601778",
"0.5584296",
"0.55789876",
"0.5572728",
"0.5555314",
"0.5550332",
"0.55159205",
"0.5491724",
"0.54403156",
"0.53970623",
"0.5382271",
"0.5381656",
"0.53765905",
"0.53734374",
"0.53634083",
"0.536137",
"0.5351944",
"0.5351231"
] | 0.63621217 | 0 |
Accepts dictionary of orbit codes, and initial key (center). Returns dictionary of individual distances. | def make_distance_dict(orbit_dict, key1):
orbits = 0
distances = {key1:orbits}
key_set = set(orbit_dict.keys())
value_set = set()
for value in orbit_dict.values():
value_set = value_set.union(set(value))
orbit_set = key_set.union(value_set)
orbit_set.remove(key1)
current_vertices = orbit_dict[key1][:]
while len(orbit_set) !=0:
orbits += 1
# print(orbits)
new_vertices = []
while len(current_vertices) != 0:
current_vertex = current_vertices.pop(0)
distances[current_vertex] = orbits
if current_vertex in orbit_dict.keys():
for new_vertex in orbit_dict[current_vertex]:
new_vertices.append(new_vertex)
orbit_set.remove(current_vertex)
current_vertices = new_vertices
return distances | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance_to_objects(orbit_tree: Dict[str, str], satellite_name) -> Dict[str, int]:\n object_distances: Dict[str, int] = {}\n satellite = orbit_tree[satellite_name]\n distance = 0\n while satellite != 'COM':\n # Get distance\n object_distances[satellite] = distance # Start with distance to object directly orbited, =0 for out calc.\n distance += 1\n # Next satellite name\n satellite = orbit_tree[satellite]\n return object_distances",
"def _euclidean_dist_loc(map_x: object) -> dict:\n \n local_distance = {}\n for node, connections in enumerate(map_x.roads):\n nx, ny = map_x.intersections[node]\n for connection in connections:\n cx, cy = map_x.intersections[connection]\n distance = math.sqrt( (nx-cx)**2 + (ny-cy)**2 )\n local_distance.setdefault(node, {})\n local_distance[node].update( {connection: distance} )\n return local_distance",
"def make_orbit_dict(orbit_codes):\n orbit_dict = {}\n for code in orbit_codes:\n if code[0:3] in orbit_dict.keys():\n orbit_dict[code[0:3]].append(code[4:])\n else:\n orbit_dict[code[0:3]] = [code[4:]]\n return orbit_dict",
"def get_distances_from_center(center_coordinates, p_coordinates):\n \n center_y, center_x = center_coordinates\n p_y, p_x = p_coordinates\n return (p_y - center_y)**2 + (p_x - center_x)**2",
"def get_distance(qa_dict, qb_dict):\r\n d=0\r\n j_names=qa_dict.keys()\r\n if len(j_names)==0:\r\n rospy.loginfo(\"Length is 0\")\r\n return 0\r\n for jn in j_names:\r\n d+=abs(qb_dict[jn]-qa_dict[jn])\r\n d/=len(j_names)\r\n return d",
"def main():\n\n print(\"-------------------------\")\n print(\"| codedrome.com |\")\n print(\"| Great Circle Distance |\")\n print(\"-------------------------\\n\")\n\n starting_cities = [{\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275},\n {\"name\": \"London\", \"latitude1_degrees\": 51.507222, \"longitude1_degrees\": -0.1275}]\n\n destination_cities = [{\"name\": \"Tokyo\", \"latitude1_degrees\": 35.683333, \"longitude1_degrees\": 139.683333},\n {\"name\": \"New York\", \"latitude1_degrees\": 40.7127, \"longitude1_degrees\": -74.0059},\n {\"name\": \"New Delhi\", \"latitude1_degrees\": 28.613889, \"longitude1_degrees\": 77.208889},\n {\"name\": \"Sydney\", \"latitude1_degrees\": -33.865, \"longitude1_degrees\": 151.209444},\n {\"name\": \"Cape Town\", \"latitude1_degrees\": -33.925278, \"longitude1_degrees\": 18.423889},\n {\"name\": \"Rio de Janeiro\", \"latitude1_degrees\": -22.908333, \"longitude1_degrees\": -43.196389},\n {\"name\": \"Oblivion\", \"latitude1_degrees\": 91, \"longitude1_degrees\": 360}]\n\n gc = greatcircle.GreatCircle()\n\n for i in range(0, len(starting_cities)):\n\n gc.name1 = starting_cities[i][\"name\"]\n gc.latitude1_degrees = starting_cities[i][\"latitude1_degrees\"]\n gc.longitude1_degrees = starting_cities[i][\"longitude1_degrees\"]\n\n gc.name2 = destination_cities[i][\"name\"]\n gc.latitude2_degrees = destination_cities[i][\"latitude1_degrees\"]\n gc.longitude2_degrees = destination_cities[i][\"longitude1_degrees\"]\n\n gc.calculate()\n\n output(gc)",
"def dist_pred_dict(self, curr):\n dist = {}\n pred = {}\n for currency in self.currencies:\n dist[currency] = float('inf') # set all starting vertices to be infinite distance away\n pred[currency] = None\n\n dist[curr] = 0\n\n return dist, pred",
"def _add_distances_from_center(\n self, places_table: Dict[str, Any], district_name: str\n ) -> Dict[str, Any]:\n location = self.gmaps.find_place(\n input=district_name, input_type=\"textquery\", fields=[\"geometry\"]\n )\n location_coordinates = tuple(\n location[\"candidates\"][0][\"geometry\"][\"location\"].values()\n )\n place_coordinates = [\n (p[\"location_lat\"], p[\"location_lng\"]) for p in places_table\n ]\n distances = []\n for place_coordinates_chunk in _chunked_iterable(place_coordinates, 25):\n distances_from_center = self.gmaps.distance_matrix(\n origins=location_coordinates,\n destinations=list(place_coordinates_chunk),\n mode=\"walking\",\n )[\"rows\"][0][\"elements\"]\n for d in distances_from_center:\n distances.append(d[\"distance\"][\"value\"])\n\n for i in range(len(places_table)):\n places_table[i][\"distance_from_center\"] = distances[i]\n\n return places_table",
"def _euclidean_dist_glob(map_x: object, goal: int) -> dict:\n \n global_distance = {}\n xg, yg = map_x.intersections[goal]\n for node, (xn, yn) in map_x.intersections.items():\n distance = math.sqrt( (xg-xn)**2 + (yg-yn)**2 )\n global_distance[node] = distance\n return global_distance",
"def get_data_extra(self, initial):\n extra = {\n 'distance':'10',\n 'latitude':'0',\n 'longitude':'1'\n }\n return dict(initial.items() + extra.items())",
"def dist_dict_from_json(json_dists: Dict[str, float]) -> Dict[Tuple[str, str], float]:\n\n return {ast.literal_eval(custs_key): dist for custs_key, dist in json_dists.items()}",
"def to_distance_maps(\n keypoints: Sequence[Tuple[float, float]], height: int, width: int, inverted: bool = False\n) -> np.ndarray:\n distance_maps = np.zeros((height, width, len(keypoints)), dtype=np.float32)\n\n yy = np.arange(0, height)\n xx = np.arange(0, width)\n grid_xx, grid_yy = np.meshgrid(xx, yy)\n\n for i, (x, y) in enumerate(keypoints):\n distance_maps[:, :, i] = (grid_xx - x) ** 2 + (grid_yy - y) ** 2\n\n distance_maps = np.sqrt(distance_maps)\n if inverted:\n return 1 / (distance_maps + 1)\n return distance_maps",
"def calculate_l1_distance(dict1, dict2):\n res = 0.0\n for key in dict1.keys():\n d1 = dict1[key]\n d2 = dict2[key]\n res += abs(d1-d2)\n return res",
"def distance(self, keyOne, keyTwo):",
"def calcRs(distances):\n r = {}\n \n for key in distances.keys():\n summedDistances = 0\n for subkey in distances[key].keys():\n summedDistances += distances[key][subkey]\n r[key] = summedDistances/(len(distances.keys())-2)\n\n return r",
"def eccentricityMap(altMap, aziMap, altCenter, aziCenter):\r\n\r\n altMap2 = altMap * np.pi / 180\r\n aziMap2 = aziMap * np.pi / 180\r\n\r\n altCenter2 = altCenter * np.pi / 180\r\n aziCenter2 = aziCenter * np.pi / 180\r\n\r\n eccMap = np.zeros(altMap.shape)\r\n eccMap[:] = np.nan\r\n # for i in xrange(altMap.shape[0]):\r\n # for j in xrange(altMap.shape[1]):\r\n # alt = altMap2[i,j]\r\n # azi = aziMap2[i,j]\r\n # eccMap[i,j] = np.arctan(np.sqrt(np.tan(alt-altCenter2)**2 + ((np.tan(azi-aziCenter2)**2)/(np.cos(alt-altCenter2)**2))))\r\n eccMap = np.arctan(\r\n np.sqrt(\r\n np.square(np.tan(altMap2 - altCenter2))\r\n +\r\n np.square(np.tan(aziMap2 - aziCenter2)) / np.square(np.cos(altMap2 - altCenter2))\r\n )\r\n )\r\n\r\n eccMap = eccMap * 180 / np.pi\r\n return eccMap",
"def distance(p1, p2):\n dist = 0\n for k in set([*p1.keys(), *p2.keys()]):\n dist += (p1.get(k, 0) - p2.get(k, 0))**2\n return math.sqrt(dist)",
"def distance_to_galactic_center(galactic_coords, d):\n l, b = galactic_coords[0] * 3600, galactic_coords[1] * 3600\n h_star_gcp, d_star_sun = d * np.sin(b), d * np.cos(b)\n d_star_gc = np.sqrt(d_star_sun**2 + d_sun_GC**2 - 2*d_star_sun*d_sun_GC*np.cos(l))\n return d_star_gc",
"def get_distance(film_coordinates, latitude, longitude):\n film_distance = []\n for film in film_coordinates.keys():\n user_coordinates = (latitude, longitude)\n film_coord = (film[0], film[1])\n\n distance = great_circle(user_coordinates, film_coord).kilometers\n film_distance.append((distance, film[0], film[1], film_coordinates[film]))\n\n film_distance.sort(key=lambda x: x[0])\n return film_distance[:10]",
"def distance(data):\n p = pi/180\n a = 0.5 - np.cos((data['end_lat']-data['lat'])*p)/2 + np.cos(data['lat']*p) * np.cos(data['end_lat']*p) * (1-np.cos((data['end_lon']-data['lon'])*p))/2\n data['distance_to_end'] = 12742 * np.arcsin(np.sqrt(a)) * 1000 \n b = 0.5 - np.cos((data['start_lat']-data['lat'])*p)/2 + np.cos(data['lat']*p) * np.cos(data['start_lat']*p) * (1-np.cos((data['start_lon']-data['lon'])*p))/2\n data['distance_from_start'] = 12742 * np.arcsin(np.sqrt(b)) * 1000 \n return data",
"def eccentricityMap(self, altMap, aziMap, altCenter, aziCenter):\r\n\r\n altMap2 = altMap * np.pi / 180\r\n aziMap2 = aziMap * np.pi / 180\r\n\r\n altCenter2 = altCenter * np.pi / 180\r\n aziCenter2 = aziCenter * np.pi / 180\r\n\r\n eccMap = np.zeros(self.array.shape)\r\n # eccMap[:] = np.nan\r\n # for i in xrange(self.array.shape[0]):\r\n # for j in xrange(self.array.shape[1]):\r\n # if self.array[i,j]:\r\n # alt = altMap2[i,j]\r\n # azi = aziMap2[i,j]\r\n # eccMap[i,j] = np.arctan(np.sqrt(np.tan(alt-altCenter2)**2 + ((np.tan(azi-aziCenter2)**2)/(np.cos(alt-altCenter2)**2))))\r\n eccMap = np.arctan(\r\n np.sqrt(\r\n np.square(np.tan(altMap2 - altCenter2))\r\n +\r\n np.square(np.tan(aziMap2 - aziCenter2)) / np.square(np.cos(altMap2 - altCenter2))\r\n )\r\n )\r\n eccMap = eccMap * 180 / np.pi\r\n eccMap[self.array == 0] = np.nan\r\n return eccMap",
"def readZoneRadiusMap(zoneMap):\n centroids = zoneMap.centroid\n hulls = zoneMap.convex_hull\n zoneRadiusMap = {}\n for i in range(len(centroids)):\n corners = MultiPoint(hulls[i].exterior.coords)\n center = centroids[i]\n distSum = sum(np.sqrt((center.x - corner.x)**2 + (center.y - corner.y)**2) for corner in corners)\n\n # Average distance to centroid = (meters) / 1000 (meters) * Km -> Mile Factor\n zoneRadiusMap[i + 1] = distSum/len(corners) / 1000 * 0.621371 \n\n return zoneRadiusMap",
"def calc_distances(client_list):\n distances = {}\n for x in client_list:\n distances[x] = {}\n for y in client_list:\n distances[x][y] = dis(x, y)\n return distances",
"def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()",
"def _assign_barycenters(_in_points: List[Point], _in_barycenters: List[Barycenter]) -> Dict[Point, Barycenter]:\n distance = NewType(\"distance\", float)\n distances: Dict[Point, Dict[Barycenter, distance]] = {}\n # For each point: calculate the distance between the point and (all) the barycenters.\n for _point in _in_points:\n distances[_point] = {}\n for _barycenter in _in_barycenters:\n distances[_point][Barycenter(_barycenter)] = distance(_point.distance(_barycenter))\n result: Dict[Point, _point_barycenter] = {}\n for _point, dist in distances.items():\n result[_point] = min(dist, key=dist.get)\n return result",
"def distance_to_origin(self):\n\n self.D = edist(self.isomap,\n np.zeros([1, self.isomap.shape[1]])).flatten()",
"def get_distances_hypernym_dic(self):\n hypernym_paths = self.hypernym_paths()\n distances_dic = {}\n for p in hypernym_paths:\n for i in range(len(p)):\n hypernym = p[i]\n dist = len(p) - 1 - i\n if hypernym in distances_dic.keys():\n current_dist = distances_dic[hypernym]\n if dist < current_dist:\n distances_dic[hypernym] = dist\n else:\n distances_dic[hypernym] = dist\n return distances_dic",
"def distance(space, locations):\n\n country_linear_distance = 0\n city_linear_distance = 0\n\n coordinates_array = [(locations.find_one({\"_id\": dict['location']},\n {\"_id\": 0, \"geo_data\": 1})['geo_data'][0]['geometry']['location']['lng'],\n locations.find_one({\"_id\": dict['location']},\n {\"_id\": 0, \"geo_data\": 1})['geo_data'][0]['geometry']['location']['lat']) for dict in space if locations.find_one({\"_id\": dict['location']})['geo_data'][0]['address_components'][0]['types'][0] == 'country']\n\n len_coordinates_array = len(coordinates_array)\n coord_combination = [(val, val+1) for val in range(len_coordinates_array-1)]\n coord_combination = [(coordinates_array[tup[0]], coordinates_array[tup[1]]) for tup in coord_combination]\n\n for tup in coord_combination:\n\n term_left = (tup[1][1]-tup[0][1])**2\n term_right = (tup[1][0]-tup[0][0])**2\n dist = math.sqrt(term_left + term_right)\n country_linear_distance += dist\n\n # we repeat the operation but for city\n\n coordinates_array = [(locations.find_one({\"_id\": dict['location']},\n {\"_id\": 0, \"geo_data\": 1})['geo_data'][0]['geometry']['location'][\n 'lng'],\n locations.find_one({\"_id\": dict['location']},\n {\"_id\": 0, \"geo_data\": 1})['geo_data'][0]['geometry']['location'][\n 'lat']) for dict in space if\n locations.find_one({\"_id\": dict['location']})['geo_data'][0]['address_components'][0][\n 'types'][0] == 'locality']\n\n len_coordinates_array = len(coordinates_array)\n coord_combination = [(val, val + 1) for val in range(len_coordinates_array - 1)]\n coord_combination = [(coordinates_array[tup[0]], coordinates_array[tup[1]]) for tup in coord_combination]\n\n for tup in coord_combination:\n term_left = (tup[1][1] - tup[0][1]) ** 2\n term_right = (tup[1][0] - tup[0][0]) ** 2\n dist = math.sqrt(term_left + term_right)\n city_linear_distance += dist\n\n country_line_to_km = country_linear_distance * 111\n city_line_to_km = city_linear_distance * 111\n\n return {'country_coord_dist': country_linear_distance,\n 'km_btw_country': country_line_to_km,\n 'city_coord_dist': city_linear_distance,\n 'city_km_dist': city_line_to_km}",
"def meet_santa(orbitDict):\n santa_count, santa_path = does_orbit('SAN', orbitDict)\n your_count, your_path = does_orbit('YOU', orbitDict)\n santa_planets = set(santa_path)\n your_planets = set(your_path)\n common = len(santa_planets.intersection(your_planets))\n dist = santa_count + your_count - 2*common\n return dist",
"def keys_near(self, key, radius):\n\n minx, innerx = divmod(key[0], 16)\n minz, innerz = divmod(key[1], 16)\n minx = int(minx)\n minz = int(minz)\n\n # Adjust for range() purposes.\n maxx = minx + 1\n maxz = minz + 1\n\n # Adjust for leakiness.\n if innerx <= radius:\n minx -= 1\n if innerz <= radius:\n minz -= 1\n if innerx + radius >= 16:\n maxx += 1\n if innerz + radius >= 16:\n maxz += 1\n\n # Expand as needed.\n expand = int(radius // 16)\n minx -= expand\n minz -= expand\n maxx += expand\n maxz += expand\n\n return product(xrange(minx, maxx), xrange(minz, maxz))"
] | [
"0.6284505",
"0.54571176",
"0.5348561",
"0.53009427",
"0.52899563",
"0.5278169",
"0.5262005",
"0.51802576",
"0.5165019",
"0.51243347",
"0.5092222",
"0.5043042",
"0.5035513",
"0.5016536",
"0.5010947",
"0.4997802",
"0.49625573",
"0.49500382",
"0.4944014",
"0.49378332",
"0.4937191",
"0.4919564",
"0.48739982",
"0.48677975",
"0.48322216",
"0.48224458",
"0.48222616",
"0.48220962",
"0.48075747",
"0.48060325"
] | 0.674759 | 0 |
Accepts dictionary of orbit paths and a target vertex. Returns list of path to target vertex. | def path_maker(orbit_dict, vertex):
path_list = [vertex]
while path_list[-1] != 'COM':
# print(path_list)
target = path_list[-1]
for key in orbit_dict.keys():
if target in orbit_dict[key]:
path_list.append(key)
return path_list[::-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def path(self, source, target):\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]",
"def path(self, source, target):\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]",
"def get_path(input_dictionary, output_dictionary,\n input_species_list, output_species_list):\n\n input_operon_list = []\n path_queue = [(input_operon_list, input_species_list) ]\n\n final_operon_path_list = []\n final_species_path_list = []\n\n while path_queue != []:\n\n ###print \"\\nget_path: path queue:\",path_queue\n\n path_queue,\\\n final_operon_path_list,\\\n final_species_path_list = traverse(input_dictionary,\n output_dictionary,\n input_species_list,\n output_species_list,\n path_queue,\n final_operon_path_list,\n final_species_path_list)\n\n return final_operon_path_list, final_species_path_list",
"def build_path(search_tree, v):\n if search_tree == None or search_tree == {}:\n return []\n\n path = []\n while search_tree.has_key(v) and v != None:\n path.insert(0, v)\n v = search_tree[v]\n return path",
"def get_path(prev_vertices, vertices):\n cur_vertex = 0\n prev = prev_vertices[0, vertices]\n cur_vertices = vertices\n path = [0]\n while prev != 0:\n path.append(prev)\n cur_vertex = prev\n cur_vertices = frozenset(cur_vertices - set([cur_vertex]))\n prev = prev_vertices[cur_vertex, cur_vertices]\n return path[::-1]",
"def path_to(self, v):\n\n if self.has_path_to(v) is False:\n return None\n v_path = []\n x = v\n while x is not self._s:\n v_path.append(x)\n x = self._edge_to[x]\n v_path.append(self._s)\n return tuple(reversed(v_path))",
"def get_path(backlinks, target):\n path = []\n path_node = target\n while path_node is not None:\n path.append(path_node)\n path_node = backlinks.get(path_node, None)\n return list(reversed(path))",
"def path_to(self, vertex: int):\n path = None\n if self.connected(vertex):\n path = []\n while vertex != self._source:\n path.append(vertex)\n vertex = self._predecessor[vertex]\n path.append(self._source)\n\n return path",
"def path_to(self, vertex: int):\n path = None\n if self.connected(vertex):\n path = []\n while vertex != self._source:\n path.append(vertex)\n vertex = self._predecessor[vertex]\n path.append(self._source)\n\n return path",
"def reconstruct_path(goal: Vector2D, prev_node: dict) -> list:\n path = []\n prev = prev_node[goal] # remove 'goal' from path\n \n while prev != None:\n path.append(prev)\n prev = prev_node[prev]\n \n path = path[:-1] # remove 'start' from path\n path.reverse()\n return path",
"def pathTo(self, v): # O(# edges returned)\n if self.hasNegativeCycle():\n raise Exception(\"Negative cost cycle exists\")\n if not self.hasPathTo(v): return None\n path = [] # new Stack<DirectedEdge>()\n e = self._edgeTo[v]\n while e is not None: \n path.append(e) # push(e)\n e = self._edgeTo[e.get_from()]\n return path",
"def _target(path: List[Any], dictionary: Dict[str, Any]) -> Any:\n if not path:\n return dictionary\n current = dictionary\n for key in path:\n try:\n current = current[key]\n except KeyError as error:\n path = \" -> \".join(path)\n raise CertumException(f\"The path '{path}' doesn't exist\") from error\n return current",
"def path(self, target):\n return self.get_paths(target, use_edges=False, downwards=True)[0]",
"def get_path(self, cur_pose, target_pose):\n pass",
"def get_paths(self, target, use_edges=False, downwards=None):\n raise NotImplementedError()",
"def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def shortest_path(self, source, destination, parameter=None):\n paths = []\n for path in self.graph.shortest_paths(source, destination, parameter):\n paths.append({'hops': path})\n return jsonify({'paths': paths})",
"def pathDAG(graph, value, path, onePath):\n for node in graph:\n if node.value == value:\n for vertex in node.arrow:\n if vertex == None:\n path.append(onePath)\n break\n \n else:\n onePath.append(vertex.value)\n pathDAG(graph, vertex.value, path, onePath)\n onePath = [onePath[0]]\n \n return path",
"def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))",
"def construct_path(edge_dict, node_list):\n previous = None\n for item in node_list:\n if previous is None:\n previous = Path(item, 0)\n previous_item = item\n else:\n actions = edge_dict[previous_item]\n # assume that the actions are uniquely represented\n action_dict = {action.end: action for action in actions}\n correct_action = action_dict[item]\n previous_item = item\n previous = previous.extend_path(\n correct_action.end, correct_action.cost)\n return previous",
"def shortest_path(self, source, target):\r\n key = self.d.keys()\r\n #check that endpoints are in graph\r\n if source not in key or target not in key:\r\n raise KeyError(str(source) + \" and \" + str(target) + \" must be in graph\")\r\n #initialize V,Q and M\r\n V = []\r\n vis = dict()\r\n Q = deque()\r\n Q.append(source)\r\n M = set(source)\r\n #while target has not been visited\r\n while target not in M:\r\n #take first element of Q\r\n current = Q.popleft()\r\n #add element to visited\r\n V.append(current)\r\n neighbors = self.d[current]\r\n #for each neighbor of element\r\n for n in neighbors:\r\n #if element has not been checked, add it to queue\r\n #also save traveled edge in visited\r\n if n not in M:\r\n Q.append(n)\r\n vis.update({n:current})\r\n M.add(n)\r\n L = [target]\r\n #reverse the order of the traveled edges\r\n while L[-1] in vis.keys():\r\n L.append(vis[L[-1]])\r\n return L[::-1]",
"def solution_path(self) -> list[State]:",
"def find_paths(self, start_key, target_key):\n\n stack = [(start_key, [start_key])]\n while stack:\n node_key, path = stack.pop()\n node = self.nodes[node_key]\n for nxt in node.neighbors - set(path):\n if nxt == target_key:\n yield path + [nxt]\n else:\n stack.append((nxt, path + [nxt]))",
"def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def dfs_recursive(self, starting_vertex, target_vertex, visited=None, path=None):\n # if starting_vertex in visited:\n # return None\n # if starting_vertex == destination_vertex:\n # return [starting_vertex]\n # visited.add(starting_vertex)\n # for vertex in self.get_neighbors(starting_vertex):\n # path = self.dfs_recursive(vertex, destination_vertex, visited)\n # if path is not None: \n # return [starting_vertex] + path\n if visited == None:\n visited = set()\n if path == None:\n path = []\n visited.add(starting_vertex)\n path = path + [starting_vertex]\n if starting_vertex == target_vertex:\n return path\n for neighbor in self.get_neighbors(starting_vertex):\n if neighbor not in visited:\n new_path = self.dfs_recursive(neighbor, target_vertex, visited, path)\n if new_path is not None: \n return new_path\n return None",
"def get_component_paths(\n graph_client: GremlinClient, topology_id: str, topology_ref: str\n) -> List[List[str]]:\n\n sources_sinks: Dict[str, List[str]] = get_source_and_sink_comps(\n graph_client, topology_id, topology_ref\n )\n\n sgt: GraphTraversalSource = graph_client.topology_subgraph(\n topology_id, topology_ref\n )\n\n output: List[List[str]] = []\n\n for source in sources_sinks[\"sources\"]:\n # Pick a start vertex for this source\n start: Vertex = sgt.V().has(\"component\", source).next()\n for sink in sources_sinks[\"sinks\"]:\n LOG.debug(\n \"Finding paths from source component: %s to sink component: %s\",\n source,\n sink,\n )\n # Find one path from the source vertex to any sink vertex and emit the\n # components as well as the edges.\n full_path: List[Union[str, Edge]] = (\n sgt.V(start)\n .repeat(out(\"logically_connected\").simplePath())\n .until(has(\"component\", sink))\n .path()\n .by(\"component\")\n .by()\n .limit(1)\n .next()\n )\n\n # Filter out the edges and keep the component strings\n path: List[str] = [\n element for element in full_path if isinstance(element, str)\n ]\n\n output.append(path)\n\n return output",
"def pathFinder(startVert, endVert, sequencer):\n sequencer.push(startVert)\n prevVertex = {}\n prevVertex[startVert] = None\n while (not sequencer.empty()):\n current = sequencer.pop()\n for n in current.getConnections():\n if (n not in prevVertex):\n prevVertex[n] = current\n if n == endVert:\n return backtrack(startVert, n, prevVertex)\n sequencer.push(n)\n return None",
"def reconstructPath(came_from, current):\n path = [current]\n while current in came_from:\n current = came_from[current]\n path.append(current)\n print(f\"path: {path}\")\n return path",
"def get_attack_path(targets, map_, y, x):\n target_path = {}\n for t in targets:\n adjacent = map_.find_adjacent_open_squares(t.y, t.x)\n paths = []\n for (dy, dx) in adjacent:\n path = map_.bfs(y, x, dy, dx)\n if path is not None:\n paths.append(path)\n if not paths:\n continue\n target_path[dy, dx] = (t, min(paths, key=len))\n if not target_path:\n return None, None\n min_len = min([len(p[1]) for p in target_path.values()])\n min_paths = {k: v for (k, v) in target_path.items() if len(v[1]) == min_len}\n for k, v in sorted(min_paths.items()):\n return v[1][0]",
"def find_path(maze_map, start, target):\n path = [] # path list\n tried = set() # set for faster membership checks\n done = False\n curr_tile = start\n while not done:\n if curr_tile == target:\n done = True # if at target tile, we are done\n else:\n options = [ # possible moves\n (curr_tile[0] + 1, curr_tile[1]),\n (curr_tile[0] - 1, curr_tile[1]),\n (curr_tile[0], curr_tile[1] + 1),\n (curr_tile[0], curr_tile[1] - 1)\n ]\n test = (abs(target[0] - start[0]), abs(target[1] - start[0]))\n prefer = test.index(max(test[0], test[1]))\n if prefer == 0:\n options.sort(key=lambda x: x[0], reverse=True)\n else:\n options.sort(key=lambda x: x[1], reverse=True)\n backtrack = True # assume we must backtrack\n for opt in options:\n try:\n if maze_map[opt[0]][opt[1]] not in ('x', ) and opt not in tried:\n backtrack = False # if we haven't tried this option before, and it's not blocked\n path.append(opt) # then add to the path, and remember that it's been tried\n tried.add(opt)\n curr_tile = opt\n break\n except IndexError:\n continue\n if backtrack: # backtrack to the previous position in the path\n curr_tile = path.pop()\n return path"
] | [
"0.608171",
"0.608171",
"0.59479785",
"0.5850192",
"0.58495563",
"0.5833161",
"0.58198595",
"0.581729",
"0.581729",
"0.5787146",
"0.5774269",
"0.5771911",
"0.57635856",
"0.56759375",
"0.5617779",
"0.56149507",
"0.5577503",
"0.5557496",
"0.55098677",
"0.5485138",
"0.5476848",
"0.54685307",
"0.54454386",
"0.5398237",
"0.5341246",
"0.5340552",
"0.5317374",
"0.5316978",
"0.530299",
"0.52852225"
] | 0.78466034 | 0 |
return all organism listed in Kegg database | def get_org_list():
resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))
return resp.text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_organisms_genes(self):\n path = os.path.join(self.parent_path, \"organisms_genes.txt\")\n with open(path, \"w\") as freqs:\n freqs.write(\"Organism,Genes\\n\")\n for org, data in self.organisms.items():\n genes = \"\"\n for gene in data.get(self.GENE_IDS_KEY):\n genes = genes + \"{} \".format(gene[self.GENE_NAME_IDX])\n freqs.write(\"{},{}\\n\".format(org, genes))",
"def fetch_education(self):\r\n # intialize storage vars\r\n organizations = []\r\n education = set()\r\n\r\n ## 1. first get all the organization names using nltk\r\n \r\n # go through every sentence\r\n for sent in nltk.sent_tokenize(self.stringtext):\r\n # the through every POS-tagged chunk \r\n for chunk in nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sent))):\r\n # filter organizations \r\n if hasattr(chunk, 'label') and chunk.label() == 'ORGANIZATION':\r\n # append the matches to the result \r\n organizations.append(' '.join(c[0] for c in chunk.leaves()))\r\n \r\n # we search for each bigram and trigram for reserved words\r\n # (college, university etc...)\r\n for org in organizations:\r\n for word in SCHOOLWORDS:\r\n # append if it appears in the organization \r\n if org.lower().find(word) >= 0:\r\n education.add(org)\r\n \r\n return list(education)",
"def amenities(self):\n ''' for row in place_amenity: row.place_id and amenity.id\n == row.amenity_id:'''\n amenList = []\n for amenity in storage.all(Amenity).value():\n if self.id == amenity.place_id:\n amenList.append(amenity)\n return(amenList)",
"def atlas_organizations():\n pass",
"def get_genes_organisms(self):\n path = os.path.join(self.parent_path, \"genes_organisms.txt\")\n with open(path, \"w\") as f:\n f.write(\"Gene,Organisms\\n\")\n for gene in self.genes.keys():\n f.write(\"{},{}\".format(gene, \"/\".join(self.genes.get(gene).keys()) + \"\\n\"))",
"async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]",
"def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)",
"def show_all_amenities():\n\n amenities = storage.all(Amenity).values()\n new_list = []\n for amenity in amenities:\n new_list.append(amenity.to_dict())\n return jsonify(new_list)",
"def get_organism_names(results):\r\n\r\n organism_names = []\r\n\r\n for result in results:\r\n organism_names.append(result)\r\n\r\n return organism_names",
"def test_retrieve_l_organizations(self):\n pass",
"def organizations(self):\n self.elements('organizations')",
"def list(self) -> List[Organisation]:\n ...",
"def list_all_amenities():\n data = storage.all('Amenity')\n amenities = [v.to_dict() for k, v in data.items()]\n return jsonify(amenities)",
"def get_amenities():\n list_amenities = []\n for amenity in storage.all('Amenity').values():\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)",
"def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs",
"def all_amenities():\n amenities_list = []\n for amenity in storage.all(Amenity).values():\n amenities_list.append(amenity.to_dict())\n return jsonify(amenities_list)",
"def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))",
"def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)",
"def amenities(self):\n all_amenities = models.storage.all(Amenity)\n places = []\n for k, v in all_amenities.items():\n if v.id in self.amenity_ids:\n places.append(v)\n return places",
"def all_instructors(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Instructor(\n row[0], row[1], row[3], row[4]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.FirstName,\n i.LastName,\n i.SlackHandle,\n i.CohortId,\n c.Name\n from Instructor i\n join Cohort c on i.CohortId = c.CohortId\n order by i.CohortId\n \"\"\")\n\n all_instructors = db_cursor.fetchall()\n for instrutor in all_instructors:\n print(instrutor)",
"def get_amenities():\n amenities = []\n for amenity in storage.all(Amenity).values():\n amenities.append(amenity.to_dict())\n return jsonify(amenities)",
"def print_loc_acrnym():\n\n #Method2\n val = College.objects.values('acronym','contact')\n for i in val:\n print(i['acronym'],i['contact'])",
"def test_organizations_list(self):\n pass",
"def return_amenities():\n amenities = list(storage.all(Amenity).values())\n amenity_list = []\n for amenity in amenities:\n amenity_list.append(amenity.to_dict())\n return jsonify(amenity_list)",
"def get_amenities():\n amenities_dict_list = [amenity.to_dict() for amenity in\n storage.all(\"Amenity\").values()]\n return jsonify(amenities_dict_list)",
"def get_all_companies_and_people():",
"def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})",
"def organizations(self):\n return sorted(set([team.org for team in self.teams]), key=lambda o: o.title)",
"def get_people(team):",
"def organizations(self):\r\n return organizations.Organizations(self)"
] | [
"0.65354395",
"0.63119715",
"0.6283204",
"0.6249244",
"0.62227803",
"0.62135947",
"0.6140487",
"0.6117435",
"0.6089482",
"0.6086453",
"0.6074738",
"0.5990961",
"0.5985105",
"0.59681773",
"0.59571993",
"0.5947314",
"0.59222317",
"0.58978045",
"0.5895238",
"0.58123374",
"0.580739",
"0.57584006",
"0.5730681",
"0.5712248",
"0.565987",
"0.5659587",
"0.565564",
"0.5650788",
"0.5640142",
"0.5616323"
] | 0.7604763 | 0 |
return all pathway for an organism listed in Kegg database | def get_pathways_list(org='hsa'):
resp = requests.get(''.join([Kegg.BASE_URL, 'list/pathway/', org]))
if resp.status_code == 200:
d = csv.DictReader(resp.text.split('\n'),
delimiter='\t',
fieldnames=('id', 'name'))
return [row for row in d]
return {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pathway(identifier, organism):\n pass",
"def get_routes():\n\n return Db().get_line_ids()",
"def railway_all(osm_path): \n return (retrieve(osm_path,'lines',['railway'])).rename(columns={'railway': 'asset'})",
"def kegg_pathway_enrichment(degs, negs, dbpaths=dbpaths, show_all=True, pthresh=0.01):\n\n deg_num_ko, deg_keggs = cbir_to_kegg(degs)\n neg_num_ko, neg_keggs = cbir_to_kegg(negs)\n\n print \"%-4d kegg pathways from %d DEGs\" % (len(deg_keggs), len(degs) )\n print \"%-4d kegg pathways from %d nonDEGs\" % (len(neg_keggs), len(negs) )\n\n # create dictionary of kegg pathways {pathwaytype:{pathway:[ko1,ko2,ko3]}}\n pathwaytype_dict = {}\n pathway_dict = {}\n pathway_lookup = {}\n\n print \"extracting pathways...\"\n ko1_h = open(dbpaths['kegg'], 'rb')\n for line in ko1_h:\n if line[0] == 'B': # Kegg path type eg: B <b>Replication and repair</b>\n pathtype_f = re.search('B.*<b>(.*)<', line)\n if pathtype_f is not None:\n pathtype = pathtype_f.group(1)\n else:\n pathtype = 'unknown'\n pathwaytype_dict[pathtype] = {}\n elif line[0] == 'C': # Kegg Pathway eg: 01200 Carbon metabolism [PATH:ko01200]\n pathway_f = re.search(\"C +([0-9]*) *(.*)\\[PATH\", line)\n if pathway_f is not None:\n pathway_id = pathway_f.group(1)\n pathway_name = pathway_f.group(2)\n else:\n pathway_id = 'unknown'\n pathway_name = 'unknown'\n pathway_dict[pathway_id] = {}\n pathway_lookup[pathway_id] = pathway_name\n elif line[0] == 'D': # Kegg term eg: K00844 HK; hexokinase [EC:2.7.1.1]\n koterm_f = re.search(\"(K[0-9]*)\", line)\n if koterm_f is not None:\n koterm = koterm_f.group(1)\n else:\n koterm = 'unknown'\n pathwaytype_dict[pathtype][koterm] = 1\n pathway_dict[pathway_id][koterm] = 1\n\n\n print \"calculating enrichment...\"\n pathwaytype_ps = {}\n pathway_ps = {}\n # count number of degs and negs in each pathway:\n for pathwaytype in pathwaytype_dict:\n pwtsize = len(pathwaytype_dict)\n degs_in_path = sum([1 for ko in pathwaytype_dict[pathwaytype] if ko in deg_keggs])\n negs_in_path = sum([1 for ko in pathwaytype_dict[pathwaytype] if ko in neg_keggs])\n degs_not_in = len(deg_keggs) - degs_in_path\n negs_not_in = len(neg_keggs) - negs_in_path\n\n oddrat, pval = fisher_exact([ [degs_in_path, degs_not_in],\n [negs_in_path, negs_not_in] ],\n alternative='greater')\n pathwaytype_ps[pathwaytype] = pval\n\n if pval < pthresh:\n print \"%s\\n \\\n In Path Not in Path\\n\\\n DEG : %-7d %d\\n\\\n non-DEG: %-7d %d\\n\\\n Odds Ratio:%.3f\\n\\\n P-value:%.4f\\n\" % (pathwaytype,degs_in_path,degs_not_in,negs_in_path,negs_not_in,\n oddrat, pval)\n\n\n for pathway in pathway_dict:\n pwtsize = len(pathway_dict)\n degs_in_path = sum([1 for ko in pathway_dict[pathway] if ko in deg_keggs])\n negs_in_path = sum([1 for ko in pathway_dict[pathway] if ko in neg_keggs])\n degs_not_in = len(deg_keggs) - degs_in_path\n negs_not_in = len(neg_keggs) - negs_in_path\n\n oddrat, pval = fisher_exact([ [degs_in_path, degs_not_in],\n [negs_in_path, negs_not_in] ],\n alternative='greater')\n pathway_ps[pathway + ' ' + pathway_lookup[pathway]] = pval\n\n ## Fisher's Exact Test:\n # In Pathway: Not in Pathway:\n # DEG : degs_in_path degs_not_in\n # non-DEG : negs_in_path negs_not_in\n #\n\n return pathwaytype_ps, pathway_ps",
"def parse_pathways(pathway_dataframe):\n return {\n kegg_id: name\n for line, (kegg_id, name) in pathway_dataframe.iterrows()\n }",
"def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path",
"def pathways(self) -> str:\n return self._pathways",
"def getPaths(db,i,j):\n if i+j==0:\n return [[(0,0)]]\n paths = []\n if \"N\" in db[i,j]:\n paths += getPaths(db,i-1,j)\n if \"W\" in db[i,j]:\n paths += getPaths(db,i,j-1)\n if \"NW\" in db[i,j]:\n paths += getPaths(db,i-1,j-1)\n for path in paths:\n path.append((i,j))\n return paths",
"def hypernym_paths(self):\n paths = []\n hypernyms = self._direct_hypernyms\n if self.is_root():\n paths = [[self]]\n for hypernym in hypernyms:\n for ancestor_list in hypernym.hypernym_paths():\n ancestor_list.append(self)\n paths.append(ancestor_list)\n return paths",
"def InitWayR(session):\n global way_r\n q = session.query(melt.StreetAssoc)\n way_r = set([it.osm_way for it in q.all()])",
"def get_msigdb_pathways(species, remap=None):\n LOGGER.info(\"Fetching MSigDB pathways\")\n\n def _get_requests():\n for file in MSIGDB_FILES:\n url = MSIGDB_URL + file\n\n LOGGER.info(\"Fetching {}\".format(url))\n\n response = requests.get(url, stream=True)\n response.raise_for_status()\n\n yield response\n\n def _get_data(line):\n line = line.decode(\"utf-8\")\n name, _, genes = line.split(\"\\t\", 2)\n # name, _, _, spec = name.split(\"%\")\n # assert species == spec\n return name, set(i for i in genes.split(\"\\t\"))\n\n pathways_df = pd.DataFrame(\n data=[\n _get_data(line)\n for response in _get_requests()\n for line in response.iter_lines()\n ],\n columns=[\"name\", \"set\"],\n )\n\n if remap and species not in [\"Homo sapiens\"]:\n to_name = \"{}{}\".format(\n species.split(\" \")[0][0],\n species.split(\" \")[1],\n ).lower()\n\n LOGGER.info(\"Remapping MSigDB to {} ({})\".format(species, to_name))\n\n mapper = EnsemblMapper(\n from_type='entrez',\n to_type='entrez',\n from_organism='hsapiens',\n to_organism=to_name,\n )\n pathways_df[\"set\"] = pathways_df[\"set\"].apply(\n lambda row: set(mapper.map_ids(row))\n )\n\n return pathways_df",
"def get_routes_timetable():\n\n return Db().get_line_ids()",
"def get_roads(self):\n return self.roads",
"def roads_all(osm_path): \n return (retrieve(osm_path,'lines',['highway'])).rename(columns={'highway': 'asset'})",
"def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text",
"def flattenPathway(inPathway):\n allowedNodes = [\"abstract\", \"family\", \"miRNA\", \"protein\", \"rna\"]\n outPathway = Pathway({}, {})\n ## read and search componentMap for protein components\n componentMap = getComponentMap(inPathway.nodes, inPathway.interactions)\n for entity in componentMap.keys():\n seenNodes = set()\n elements = []\n expand = deepcopy(componentMap[entity])\n while len(expand) > 0:\n if expand[0] in seenNodes:\n expand.pop(0)\n continue\n seenNodes.update([expand[0]])\n if inPathway.nodes[expand[0]] == \"protein\":\n elements.append(expand[0])\n elif expand[0] in componentMap:\n expand += deepcopy(componentMap[expand[0]])\n expand.pop(0)\n componentMap[entity] = elements\n ## iterate over all interactions\n for source in inPathway.interactions.keys(): \n for target in inPathway.interactions[source].keys():\n ## update interactions map\n if inPathway.nodes[source] in allowedNodes:\n if inPathway.nodes[target] in allowedNodes:\n if source not in outPathway.nodes:\n outPathway.nodes[source] = inPathway.nodes[source]\n if target not in outPathway.nodes:\n outPathway.nodes[target] = inPathway.nodes[target]\n if source not in outPathway.interactions:\n outPathway.interactions[source] = {}\n outPathway.interactions[source][target] = inPathway.interactions[source][target]\n elif target in componentMap:\n for element in componentMap[target]:\n if source != element:\n if source not in outPathway.nodes:\n outPathway.nodes[source] = inPathway.nodes[source]\n if element not in outPathway.nodes:\n outPathway.nodes[element] = inPathway.nodes[element]\n if source not in outPathway.interactions:\n outPathway.interactions[source] = {}\n if inPathway.interactions[source][target] == \"component>\":\n outPathway.interactions[source][element] = \"-a>\"\n else:\n outPathway.interactions[source][element] = inPathway.interactions[source][target]\n elif source in componentMap:\n if inPathway.nodes[target] in allowedNodes:\n for element in componentMap[source]:\n if element not in outPathway.nodes:\n outPathway.nodes[element] = inPathway.nodes[element]\n if target not in outPathway.nodes:\n outPathway.nodes[target] = inPathway.nodes[target]\n if element not in outPathway.interactions:\n outPathway.interactions[element] = {}\n outPathway.interactions[element][target] = inPathway.interactions[source][target]\n elif target in componentMap:\n continue\n return(outPathway)",
"def get_all_locations(self):",
"def railway_stops(osm_path): \n return retrieve(osm_path,'points',['railway'],**{'railway':[\"='halt' or \",\"='subway_entrance' or \",\"='tram_stop'\"]})",
"def greedy_path():\n itinerary = []\n cities = all_cities(data_set)\n starting_city = randomize_city_start(cities.keys()) # start from a random city\n # print \"starting_city: %s\" % starting_city\n cities_visited = {}\n \n # iterate through all cities\n count = 1\n while True:\n possible_routes = []\n #distance = []\n # print \"starting_city: %s\" % starting_city\n for path in data_set:\n # we only start with city that we have assigned in starting_city\n if starting_city in path['city_start']:\n # we don't go to cities we have visited\n if path['city_end'] in cities_visited:\n continue\n else:\n # print \"path: \", path\n possible_routes.append(path) # add the city if not in the list\n \n if not possible_routes:\n break\n # append this to itinerary\n route = get_shortest_route(possible_routes)\n count += 1\n itinerary.append(route)\n # add this city to visited_cities list\n cities_visited[route[0]] = count\n starting_city = route[1]\n \n return itinerary",
"def gmaps_optical_nodes(request):\n # Cypher query to get all cables with cable type fiber that are connected\n # to two optical node.\n q = \"\"\"\n MATCH (cable:Cable)\n WHERE cable.cable_type = \"Dark Fiber\"\n MATCH (cable)-[Connected_to]->(port)\n WITH cable, port\n MATCH (port)<-[:Has*0..]-(equipment)\n WHERE (equipment:Optical_Node) AND NOT equipment.type =~ \"(?i).*tss.*\"\n WITH cable, port, equipment\n MATCH p2=(equipment)-[:Located_in]->()<-[:Has*0..]-(loc)\n WHERE (loc:Site)\n RETURN cable, equipment, loc\n \"\"\"\n result = nc.query_to_list(nc.graphdb.manager, q)\n nodes = {}\n edges = {}\n for item in result:\n node = {\n 'name': item['equipment']['name'],\n 'url': helpers.get_node_url(item['equipment']['handle_id']),\n 'lng': float(str(item['loc'].get('longitude', 0))),\n 'lat': float(str(item['loc'].get('latitude', 0)))\n }\n coords = {\n 'lng': float(str(item['loc'].get('longitude', 0))),\n 'lat': float(str(item['loc'].get('latitude', 0)))\n }\n edge = {\n 'name': item['cable']['name'],\n 'url': helpers.get_node_url(item['cable']['handle_id']),\n 'end_points': [coords]\n }\n nodes[item['equipment']['name']] = node\n if item['cable']['name'] in edges:\n edges[item['cable']['name']]['end_points'].append(coords)\n else:\n edges[item['cable']['name']] = edge\n response = HttpResponse(content_type='application/json')\n json.dump({'nodes': list(nodes.values()), 'edges': list(edges.values())}, response)\n return response",
"def PathEnzReg(dic,organism):\n\t#calculation\n\tppcorrp = [0] * (len(dic.plst)*len(dic.plst))\n\tppcorrn = [0] * (len(dic.plst)*len(dic.plst))\n\tfor e in dic.elst:\n\t\tcs = e.cs[0]\n\t\tr = e.rct\n\t\tif e.activation == None:\n\t\t\tcontinue\n\t\tfor (i,p1) in enumerate(dic.plst):\n\t\t\tfor (j,p2) in enumerate(dic.plst):\n\t\t\t\tif cs in p1.sclst and r not in p1.srlst and cs not in p2.sclst and r in p2.srlst and p1 not in p2.splst and p2 not in p1.splst and p1 != p2:\n\t\t\t\t\tif e.activation == True:\n\t\t\t\t\t\tppcorrp[i*len(dic.plst)+j] += 1\n\t\t\t\t\telif e.activation == False:\n\t\t\t\t\t\tppcorrn[i*len(dic.plst)+j] += 1\n\n\t\n\t#output\n\twith open('./results/'+organism+'/logfiles/logpath.log','w') as out:\n\t\tout.write('#List of the XX couples of most interactive pathways:\\n#those pathways that have the biggest number of enzymes (EITHER + OR -) going exclusively from one to the other.\\n#FORMAT:\\n#number of interactions from p1 to p2 (from p2 to p1) [from p1 to p2 with opposite sign, from p2 to p1 with opposite sign]\\n#p1 name \\t p2 name\\n\\nACTIVATION:\\n')\n\t\tppcorrtp = ppcorrp[:]\n\t\tppcorrtn = ppcorrn[:]\n\t\tfor i in range(20):\n\t\t\tj = ppcorrp.index(max(ppcorrp))\n\t\t\t(m,n) = ((j-j%len(dic.plst))/len(dic.plst),j%len(dic.plst))\n\t\t\t(p1,p2) = (dic.plst[m],dic.plst[n])\n\t\t\tout.write(p1.name+'\\t'+p2.name+'\\n'+str(ppcorrtp[j])+'\\t('+str(ppcorrtp[n*len(dic.plst)+m])+')\\t'+str([ppcorrtn[j],ppcorrtn[n*len(dic.plst)+m]])+'\\n')\n\t\t\tppcorrp[m*len(dic.plst)+n] = 0\n\t\t\tppcorrp[n*len(dic.plst)+m] = 0\n\n\n\t\tout.write('\\n-------------------------------------------------------------------------------------\\n\\nINHIBITION:\\n')\n\t\tfor i in range(20):\n\t\t\tj = ppcorrn.index(max(ppcorrn))\n\t\t\t(m,n) = ((j-j%len(dic.plst))/len(dic.plst),j%len(dic.plst))\n\t\t\t(p1,p2) = (dic.plst[m],dic.plst[n])\n\t\t\tout.write(p1.name+'\\t'+p2.name+'\\n'+str(ppcorrtn[j])+'\\t('+str(ppcorrtn[n*len(dic.plst)+m])+')\\t'+str([ppcorrtp[j],ppcorrtp[n*len(dic.plst)+m]])+'\\n')\n\t\t\tppcorrn[m*len(dic.plst)+n] = 0\n\t\t\tppcorrn[n*len(dic.plst)+m] = 0",
"def get_programs(e: str, ans: str, all_paths_around_e: List[List[str]]):\n all_programs = []\n for path in all_paths_around_e:\n for l, (r, e_dash) in enumerate(path):\n if e_dash == ans:\n # get the path till this point\n all_programs.append([x for (x, _) in path[:l + 1]]) # we only need to keep the relations\n return all_programs",
"def all_ways(context):\n current = context['current']\n\n roads = RoadSegment.objects.filter(prescription=current)\n trails = TrailSegment.objects.filter(prescription=current)\n ways = Way.objects.filter(prescription=current)\n inspections = SignInspection.objects.filter(way__prescription=current)\n traffic_diagrams = TrafficControlDiagram.objects.filter(\n roadsegment__prescription=current).exclude(name=\"custom\").distinct()\n\n for qs in [roads, trails, ways, inspections]:\n qs.modified = qs.aggregate(Max('modified'))[\"modified__max\"]\n\n return {\n \"roads\": roads,\n \"trails\": trails,\n \"ways\": ways,\n \"standard_traffic_diagrams\": traffic_diagrams,\n \"inspections\": inspections,\n \"modified\": max([modified for modified in\n roads.modified, trails.modified,\n ways.modified, inspections.modified,\n current.created\n if modified is not None])\n }",
"def mainRoads(osm_path): \n return retrieve(osm_path,'lines',['highway','oneway','lanes','maxspeed'],**{'highway':[\"='primary' or \",\"='trunk' or \",\"='motorway' or \",\"='motorway_link' or \",\"='trunk_link' or \",\"='primary_link' or \", \"='secondary' or \",\"='tertiary' or \",\"='tertiary_link'\"]})",
"def railway_areas(osm_path): \n return retrieve(osm_path,'multipolygons',['railway','landuse'],**{'railway':[\"='platform' or \",\"='station' or \",\"='tram_stop'\"],'landuse':[\"='railway'\"]})",
"def get_route_list_db(agency):\n \n all_routes = Route.objects.filter(agency__agency_tag=agency)\n routes = {}\n for r in all_routes:\n routes[r.route_tag] = r.title\n\n return routes",
"def all_paths(self, node, destination, dist, path):\n\n d=self.dict()\n p=[]\n for i in range(len(path)):\n p.append(path[i])\n p.insert(len(p),node)\n \n if len(p)-1==dist:\n if node==destination:\n return p\n else:\n return None\n\n my_paths=[]\n\n for a in d[node]:\n if a not in p:\n p1=self.all_paths(a,destination,dist,p)\n\n if p1!=None:\n if isinstance(p1[0],list):\n for i in range(len(p1)):\n my_paths.append(p1[i])\n else:\n my_paths.append(p1)\n\n if len(my_paths)!=0:\n return my_paths\n else:\n return None",
"def its_because_school(connection):\n print(\"Shortest path between two nodes\")\n answer = connection.execute(connection.get_path, 0, 4)\n for a in answer.values():\n print(a)\n print(\"Centrality closeness\")\n answer = connection.execute(connection.get_closeness, 1, True)\n for a in answer.values():\n print(a)\n print(\"Betweenness centrality\")\n answer = connection.execute(connection.get_betweenness, 2)\n for a in answer.values():\n print(a)\n print(\"Eigenvector\")\n answer = connection.execute(connection.get_eigenvector, 3)\n for a in answer.values():\n print(a)\n print(\"Degree centrality\")\n answer = connection.execute(connection.get_degree_centrality)\n for a in answer.values():\n print(a)",
"def path_maker(orbit_dict, vertex):\n path_list = [vertex]\n while path_list[-1] != 'COM':\n # print(path_list)\n target = path_list[-1]\n for key in orbit_dict.keys():\n if target in orbit_dict[key]:\n path_list.append(key)\n return path_list[::-1]",
"def get_all_sghop_info (nffg, return_paths=False):\n sg_map = {}\n for i in nffg.infras:\n for p in i.ports:\n for fr in p.flowrules:\n # if fr.external:\n # continue\n if fr.id not in sg_map:\n # The path is unordered!!\n path_of_shop = []\n flowclass = NFFGToolBox._extract_flowclass(fr.match.split(\";\"))\n sg_map[fr.id] = [None, None, flowclass, fr.bandwidth, fr.delay]\n # We have to find the BEGINNING of this flowrule sequence.\n inbound_link = NFFGToolBox._find_infra_link(nffg, p, outbound=False,\n accept_dyn=True)\n while inbound_link.type != 'DYNAMIC':\n path_of_shop.append(inbound_link)\n if inbound_link.src.node.type == 'SAP':\n break\n # The link is STATIC, and its src is not SAP so it is an Infra.\n prev_fr, prev_p = \\\n NFFGToolBox._get_flowrule_and_its_starting_port(\n inbound_link.src.node, fr.id)\n NFFGToolBox._check_flow_consistencity(sg_map, prev_fr)\n inbound_link = NFFGToolBox._find_infra_link(nffg, prev_p,\n outbound=False,\n accept_dyn=True)\n # 'inbound_link' is DYNAMIC here or it is STATIC and starts from\n # a SAP,\n # so the sequence starts here\n sg_map[fr.id][0] = inbound_link.src\n\n # We have to find the ENDING of this flowrule sequence.\n output_port = NFFGToolBox._get_output_port_of_flowrule(i, fr)\n if output_port is None:\n continue\n outbound_link = NFFGToolBox._find_infra_link(nffg, output_port,\n outbound=True,\n accept_dyn=True)\n while outbound_link.type != 'DYNAMIC':\n path_of_shop.append(outbound_link)\n if outbound_link.dst.node.type == 'SAP':\n break\n # The link is STATIC and its dst is not a SAP so it is an Infra.\n next_fr, _ = NFFGToolBox._get_flowrule_and_its_starting_port(\n outbound_link.dst.node, fr.id)\n # '_' is 'outbound_link.dst'\n next_output_port = NFFGToolBox._get_output_port_of_flowrule(\n outbound_link.dst.node, next_fr)\n NFFGToolBox._check_flow_consistencity(sg_map, next_fr)\n outbound_link = NFFGToolBox._find_infra_link(nffg,\n next_output_port,\n outbound=True,\n accept_dyn=True)\n # the 'outbound_link' is DYNAMIC here or finishes in a SAP, so the\n # flowrule sequence finished here.\n sg_map[fr.id][1] = outbound_link.dst\n\n if return_paths:\n sg_map[fr.id].append(path_of_shop)\n\n return sg_map"
] | [
"0.6855408",
"0.57694185",
"0.5750554",
"0.5700486",
"0.56512004",
"0.5650302",
"0.56490093",
"0.5596257",
"0.5583864",
"0.5557596",
"0.54984653",
"0.5467053",
"0.5422147",
"0.5395577",
"0.5370559",
"0.53676075",
"0.5362063",
"0.53475356",
"0.53179806",
"0.5292152",
"0.52909833",
"0.5269278",
"0.52608186",
"0.5254127",
"0.52490246",
"0.5235028",
"0.5220369",
"0.52135915",
"0.5208758",
"0.5200231"
] | 0.7151952 | 0 |
return Kegg KGML_pathway object | def get_kgml_obj(pathway_id):
if pathway_id.startswith('path:'):
pathway_id = pathway_id.replace('path:', '')
resp = requests.get(''.join([Kegg.BASE_URL,
'get/',
pathway_id,
'/kgml']))
if resp.status_code == 200:
return kgml_read(resp.text)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kegg_pathway_enrichment(degs, negs, dbpaths=dbpaths, show_all=True, pthresh=0.01):\n\n deg_num_ko, deg_keggs = cbir_to_kegg(degs)\n neg_num_ko, neg_keggs = cbir_to_kegg(negs)\n\n print \"%-4d kegg pathways from %d DEGs\" % (len(deg_keggs), len(degs) )\n print \"%-4d kegg pathways from %d nonDEGs\" % (len(neg_keggs), len(negs) )\n\n # create dictionary of kegg pathways {pathwaytype:{pathway:[ko1,ko2,ko3]}}\n pathwaytype_dict = {}\n pathway_dict = {}\n pathway_lookup = {}\n\n print \"extracting pathways...\"\n ko1_h = open(dbpaths['kegg'], 'rb')\n for line in ko1_h:\n if line[0] == 'B': # Kegg path type eg: B <b>Replication and repair</b>\n pathtype_f = re.search('B.*<b>(.*)<', line)\n if pathtype_f is not None:\n pathtype = pathtype_f.group(1)\n else:\n pathtype = 'unknown'\n pathwaytype_dict[pathtype] = {}\n elif line[0] == 'C': # Kegg Pathway eg: 01200 Carbon metabolism [PATH:ko01200]\n pathway_f = re.search(\"C +([0-9]*) *(.*)\\[PATH\", line)\n if pathway_f is not None:\n pathway_id = pathway_f.group(1)\n pathway_name = pathway_f.group(2)\n else:\n pathway_id = 'unknown'\n pathway_name = 'unknown'\n pathway_dict[pathway_id] = {}\n pathway_lookup[pathway_id] = pathway_name\n elif line[0] == 'D': # Kegg term eg: K00844 HK; hexokinase [EC:2.7.1.1]\n koterm_f = re.search(\"(K[0-9]*)\", line)\n if koterm_f is not None:\n koterm = koterm_f.group(1)\n else:\n koterm = 'unknown'\n pathwaytype_dict[pathtype][koterm] = 1\n pathway_dict[pathway_id][koterm] = 1\n\n\n print \"calculating enrichment...\"\n pathwaytype_ps = {}\n pathway_ps = {}\n # count number of degs and negs in each pathway:\n for pathwaytype in pathwaytype_dict:\n pwtsize = len(pathwaytype_dict)\n degs_in_path = sum([1 for ko in pathwaytype_dict[pathwaytype] if ko in deg_keggs])\n negs_in_path = sum([1 for ko in pathwaytype_dict[pathwaytype] if ko in neg_keggs])\n degs_not_in = len(deg_keggs) - degs_in_path\n negs_not_in = len(neg_keggs) - negs_in_path\n\n oddrat, pval = fisher_exact([ [degs_in_path, degs_not_in],\n [negs_in_path, negs_not_in] ],\n alternative='greater')\n pathwaytype_ps[pathwaytype] = pval\n\n if pval < pthresh:\n print \"%s\\n \\\n In Path Not in Path\\n\\\n DEG : %-7d %d\\n\\\n non-DEG: %-7d %d\\n\\\n Odds Ratio:%.3f\\n\\\n P-value:%.4f\\n\" % (pathwaytype,degs_in_path,degs_not_in,negs_in_path,negs_not_in,\n oddrat, pval)\n\n\n for pathway in pathway_dict:\n pwtsize = len(pathway_dict)\n degs_in_path = sum([1 for ko in pathway_dict[pathway] if ko in deg_keggs])\n negs_in_path = sum([1 for ko in pathway_dict[pathway] if ko in neg_keggs])\n degs_not_in = len(deg_keggs) - degs_in_path\n negs_not_in = len(neg_keggs) - negs_in_path\n\n oddrat, pval = fisher_exact([ [degs_in_path, degs_not_in],\n [negs_in_path, negs_not_in] ],\n alternative='greater')\n pathway_ps[pathway + ' ' + pathway_lookup[pathway]] = pval\n\n ## Fisher's Exact Test:\n # In Pathway: Not in Pathway:\n # DEG : degs_in_path degs_not_in\n # non-DEG : negs_in_path negs_not_in\n #\n\n return pathwaytype_ps, pathway_ps",
"def get_pathway(identifier, organism):\n pass",
"def explicit_path(cls, ndivsm, kpath_bounds):\n return cls._path(ndivsm, kpath_bounds=kpath_bounds, comment=\"Explicit K-path\")",
"def kwik_path(self):\n return self.model.path",
"def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None):\n if kpath_bounds is None:\n # Compute the boundaries from the input structure.\n from pymatgen.symmetry.bandstructure import HighSymmKpath\n sp = HighSymmKpath(structure)\n\n # Flat the array since \"path\" is a a list of lists!\n kpath_labels = []\n for labels in sp.kpath[\"path\"]:\n kpath_labels.extend(labels)\n\n kpath_bounds = []\n for label in kpath_labels:\n red_coord = sp.kpath[\"kpoints\"][label]\n #print(\"label %s, red_coord %s\" % (label, red_coord))\n kpath_bounds.append(red_coord)\n\n return cls(mode=KSamplingModes.path, num_kpts=ndivsm, kpts=kpath_bounds,\n comment=comment if comment else \"K-Path scheme\")",
"def path_from_structure(cls, ndivsm, structure):\n return cls._path(ndivsm, structure=structure, comment=\"K-path generated automatically from structure\")",
"def gen_kpath( atoms, lattice, Nkpts=60 ):\n #\n points = get_special_points(atoms.cell, lattice)\n paths = parse_path_string(special_paths[lattice])\n #print(paths[0])\n kpts_spec = [points[k] for k in paths[0]]\n kpts, x, Xkpt = get_bandpath(kpts_spec,atoms.cell,Nkpts)\n #\n # TODO: also return string for special k-points\" symbol\n # probably using variable `paths`.\n return kpts, x, Xkpt",
"def __getitem__(self, pathway):\n xmlpath = self.local_kgml_dir + pathway + '.xml'\n if exists(xmlpath):\n tree = ElementTree.parse(xmlpath)\n root = tree.getroot()\n else:\n try:\n r = requests.get(self.link_to_kgml.format(pathway), timeout=5, headers=self.headers)\n r.raise_for_status()\n root = ElementTree.fromstring(r.text)\n except requests.exceptions.HTTPError:\n self.logger.warning('Unable to download pathway xml: {}'.format(pathway))\n return None\n except requests.exceptions.ConnectTimeout:\n self.logger.warning('Unable to download pathway xml: {}'.format(pathway))\n return None\n except ElementTree.ParseError:\n self.logger.warning('Unable to parse pathway xml: {}'.format(pathway))\n return None\n except Exception:\n self.logger.warning('Unknown error getting pathway xml: {}'.format(pathway))\n return None\n\n if self.save_local:\n with open(xmlpath, 'w') as fo:\n fo.write(r.text)\n\n return self.parseKGML(root)",
"def path(self):\n if bool(self._path_parameters):\n payload = {inflection.underscore(k): v for k, v, in self._path_parameters.items()}\n else:\n payload = dict()\n PathTuple = namedtuple('PathTuple', sorted(payload))\n the_tuple = PathTuple(**payload)\n return the_tuple",
"def getWKT(self):\n logger.debug(\"Entering in ocentricWKT.getWkt\")\n\n # building WKT string\n wkt = OcentricWKT.GEODCRS % (\n self.getGeoGcsName(), self.getDatumName(), self.getSpheroidName(), self.getRadius(), self.getInverseFlattening(),\n self.getRadius(), self.getAuthorityName(), self.getAuthorityCode()\n )\n\n logger.debug(\"Exiting from ocentricWKT.getWkt\")\n return wkt",
"def getPath(obj):",
"def get_kml_document(kml_obj: fastkml.kml.KML) -> fastkml.Document:\n\t\n\treturn next(kml_obj.features())",
"def get_root(self) -> object:",
"def create_min_path():\n path = {}\n path['biysk'] = ['biysk']\n path['barnaul'] = []\n path['novosibirsk'] = []\n path['belokurikha'] = []\n path['tomsk'] = []\n path['krasnoyarsk'] = []\n path['omsk'] = []\n return path",
"def get_kml_coordinates(kml_obj: fastkml.kml.KML) -> Path:\n\t\n\tgeometry_obj = next(get_kml_document(kml_obj).features()).geometry\n\tcoords_path = [\n\t\tCoordinates(*co_tuple)\n\t\tfor co_tuple in geometry_obj.coords\n\t]\n\t\n\treturn coords_path",
"def parse_pathways(pathway_dataframe):\n return {\n kegg_id: name\n for line, (kegg_id, name) in pathway_dataframe.iterrows()\n }",
"def fk_to_ik(node):\n # Get relevant data\n ik_pole_off = get_parent(node.ik_pole_conn)\n\n world_trans_ik_pole_off = get_world_trans(ik_pole_off)\n world_trans_fk_01 = get_world_trans(node.fk_01_conn)\n world_trans_fk_02 = get_world_trans(node.fk_02_conn)\n world_trans_fk_03 = get_world_trans(node.fk_03_conn)\n world_trans_ik_pole = get_world_trans(node.ik_pole_conn)\n\n world_rot_fk_03 = get_world_rot(node.fk_03_conn)\n\n # calculate ik pole position\n ik_pole_mid_point = (world_trans_fk_01 + world_trans_fk_03) / 2\n ik_pole_base = world_trans_fk_02 - ik_pole_mid_point\n\n # Handle the case when the leg is fully stretched\n if ik_pole_base.length() <= 0.0001:\n rot_fk_01 = get_rot_as_quat(node.fk_01_conn)\n rot_fk_02 = get_rot_as_quat(node.fk_02_conn)\n\n rot = rot_fk_01 * rot_fk_02\n\n ik_pole_base = oMa.MVector(2 * (rot.x * rot.z + rot.w * rot.y),\n 2 * (rot.y * rot.z - rot.w * rot.x),\n 1 - 2 * (rot.x * rot.x + rot.y * rot.y))\n\n ik_pole_len = (world_trans_ik_pole - world_trans_fk_02).length()\n\n pos_ik_pole = world_trans_fk_02 + ik_pole_base.normalize() * ik_pole_len - world_trans_ik_pole_off\n\n # Get the destination MPlugs\n ik_main_trans_plugs = get_trans_plugs(node.ik_main_conn)\n ik_main_rot_plugs = get_rot_plugs(node.ik_main_conn)\n ik_pole_trans_plugs = get_trans_plugs(node.ik_pole_conn)\n\n # Set the new values\n for i, plug in enumerate(ik_main_trans_plugs):\n plug.setFloat(world_trans_fk_03[i])\n\n for i, plug in enumerate(ik_main_rot_plugs):\n plug.setMAngle(oMa.MAngle(world_rot_fk_03[i], oMa.MAngle.kRadians))\n\n for i, plug in enumerate(ik_pole_trans_plugs):\n plug.setFloat(pos_ik_pole[i])",
"def generate_leaf_kml(self, d, content=\"\"):\n return (\"\"\"\\\n <Folder>\n <Region>\n <Lod>\n <minLodPixels>%(minlodpixels)d</minLodPixels>\n <maxLodPixels>%(maxlodpixels)d</maxLodPixels>\n </Lod>\n <LatLonAltBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonAltBox>\n </Region>\n <GroundOverlay>\n <drawOrder>%(draw_order)d</drawOrder>\n <Icon>\n <href>%(image_url)s</href>\n </Icon>\n <LatLonBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonBox>\n </GroundOverlay>\"\"\" % d\n + \"\"\"\\\n%s\n </Folder>\"\"\" % content)",
"def generate_garmin_kml(self, d ):\n return (\"\"\"\n <GroundOverlay>\n <Icon>\n <href>%(image_url)s</href>\n <DrawOrder>%(draw_order)d</DrawOrder>\n </Icon>\n <LatLonBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonBox>\n </GroundOverlay>\"\"\" % d )",
"def getGraph():\n graph = ConjunctiveGraph()\n graph.parse(\"trains.n3\", format=\"n3\", publicID=TT['disk#context'])\n return graph",
"def get_path(self, grid, start_wp, end_wp):\n # The open and closed sets\n openset = set()\n closedset = set()\n\n # Add the starting point to the open set\n openset.add(start_wp)\n\n # While the open set is not empty\n while openset:\n # Find the waypoint in the open set with the lowest G + H score\n current_wp = min(openset, key=lambda o: o.G + o.H)\n # Found the goal\n if current_wp == end_wp:\n path = []\n while current_wp.parent:\n path.append(current_wp)\n current_wp = current_wp.parent\n path.append(current_wp)\n print(\"Path found in {} moves: {}\".format(len(path), path))\n return path[::-1]\n\n # Remove the waypoint from the open set\n openset.remove(current_wp)\n # Add it to the closed set\n closedset.add(current_wp)\n\n # Generate children\n children = current_wp.generate_children(grid)\n\n for waypoint in children:\n # If it is already in the closed set, skip it\n if waypoint in closedset:\n continue\n # Otherwise if it is already in the open set\n if waypoint in openset:\n # Check if we beat the G score\n new_g = current_wp.G + 1\n\n if waypoint.G > new_g:\n # If so, update the waypoint to have a new parent\n waypoint.G = new_g\n waypoint.parent = current_wp\n else:\n # If it isn't in the open set, calculate the G and H score for the waypoint\n if waypoint.orientation != current_wp.orientation:\n waypoint.G = current_wp.G + 1.5 # Avoiding zigzag move by increase the cost of a rotation\n else:\n waypoint.G = current_wp.G + 1\n\n waypoint.H = abs(waypoint.x - end_wp.x) + abs(waypoint.y - end_wp.y)\n # Set the parent to our current_wp\n waypoint.parent = current_wp\n # Add it to the set\n openset.add(waypoint)\n\n # If there is no solution\n return [start_wp, end_wp]",
"def get_graph(self, path):\n raise NotImplementedError",
"def makepkl():\n # Old osgeo.ogr approach\n from osgeo import ogr\n # USTimeZones.kml source is unknown, but was freely available and\n # Has been converted to a pkl file\n kmlpath = os.path.join(os.path.dirname(__file__), 'USTimeZones.kml')\n driver = ogr.GetDriverByName('KML')\n datasource = driver.Open(kmlpath)\n layer = datasource.GetLayer()\n layerDefn = layer.GetLayerDefn()\n oldfeats = [i_ for i_ in layer]\n featDefn = layer.GetLayerDefn()\n feat = ogr.Feature(featDefn)\n nbFeat = layer.GetFeatureCount()\n outfeat = file(uspklpath, 'w')\n featout = [(feat.GetField(0), feat.GetGeometryRef().ExportToWkt()) for feat in oldfeats]\n pickle.dump(featout, file(uspklpath, 'w'))\n\n # WorldTimeZones.kml source is below and was freely available and\n # Has been converted to a pkl file\n # https://productforums.google.com/forum/?fromgroups=#!msg/gec-tools/EdR18tz_5k8/MRPV85OxXIkJ\n kmlpath = os.path.join(os.path.dirname(__file__), 'WorldTimeZones.kml')\n driver = ogr.GetDriverByName('KML')\n datasource = driver.Open(kmlpath)\n layer = datasource.GetLayer()\n layerDefn = layer.GetLayerDefn()\n oldfeats = [i_ for i_ in layer]\n featDefn = layer.GetLayerDefn()\n feat = ogr.Feature(featDefn)\n nbFeat = layer.GetFeatureCount()\n outfeat = file(worldpklpath, 'w')\n featout = [(feat.GetField(0), feat.GetGeometryRef().ExportToWkt()) for feat in oldfeats]\n pickle.dump(featout, file(worldpklpath, 'w'))",
"def getPath(self): #$NON-NLS-1$\r",
"def GetRootKey(self):",
"def lovliest_path(G):\n m = 0\n ma = None\n mb = None\n for node in G.keys():\n for conn in G[node].keys():\n if G[node][conn] > m:\n m = G[node][conn]\n ma = node\n mb = conn\n print \"found lovliest_path of %s to %s with weight %s\" % (ma,mb,m)\n return (ma,mb)",
"def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path",
"def path_convert(self):\n pub_path = Exp_msg()\n for i in self.path:\n epoint = Cordi()\n (epoint.x, epoint.y) = i\n pub_path.bliss.append(epoint)\n return(pub_path)",
"def get_pathways_list(org='hsa'):\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/pathway/', org]))\r\n if resp.status_code == 200:\r\n d = csv.DictReader(resp.text.split('\\n'),\r\n delimiter='\\t',\r\n fieldnames=('id', 'name'))\r\n return [row for row in d]\r\n return {}",
"def get_path2(self, src, dst, weight):\r\n #shortest_paths = self.awareness.shortest_paths\r\n # Create bandwidth-sensitive datapath graph.\r\n if weight == self.WEIGHT_MODEL['hop']:\r\n graph = self.awareness.graph\r\n return nx.shortest_path(graph,src,dst,method='dijkstra')\r\n\r\n elif weight == self.WEIGHT_MODEL['bw']:\r\n graph = self.monitor.graph\r\n path = nx.shortest_path(graph, src, dst, weight='bandwidth', method='dijkstra')\r\n return path\r\n else:\r\n pass"
] | [
"0.57640004",
"0.56418306",
"0.55628306",
"0.55624956",
"0.55441225",
"0.55076295",
"0.54885745",
"0.5414888",
"0.52626216",
"0.5243093",
"0.52299154",
"0.51183206",
"0.50963235",
"0.5062012",
"0.5043354",
"0.5041966",
"0.5028515",
"0.5019303",
"0.5013263",
"0.5009062",
"0.4979949",
"0.4953999",
"0.49529156",
"0.49423438",
"0.49310336",
"0.49250895",
"0.49132526",
"0.4906163",
"0.48971578",
"0.48887184"
] | 0.6912723 | 0 |
Generate the config file for a server. | async def generate_default_config_file(self, server_id, owner_id):
parser = configparser.ConfigParser()
# Create each section that we need by default; future cogs
# may need to handle writing code to modify the config to add sections
parser.add_section('ServerSettings')
parser.add_section('BotAdmins')
parser.add_section('ConfigSettings')
parser.add_section('RoleAssignment')
parser.add_section('JoinPart')
parser.add_section('BettingGame')
parser.add_section('ApiCommands')
parser.set('ServerSettings', 'owner_id', 'NOT_SET')
parser.set('ServerSettings', 'server_id', 'NOT_SET')
parser.set('BotAdmins', 'bot_admin_users', 'NOT_SET')
parser.set('BotAdmins', 'bot_admin_roles', 'NOT_SET')
parser.set('ConfigSettings', 'not_accepted_channel_id', 'NOT_SET')
parser.set('RoleAssignment', 'enabled', 'false')
parser.set('RoleAssignment', 'role_list', 'NOT_SET')
parser.set('RoleAssignment', 'assignment_channel_id', 'NOT_SET')
parser.set('JoinPart', 'member_join_enabled', 'false')
parser.set('JoinPart', 'member_part_enabled', 'false')
parser.set('JoinPart', 'welcome_channel_id', 'NOT_SET')
parser.set('JoinPart', 'leave_channel_id', 'false')
parser.set('JoinPart', 'welcome_message', 'Welcome to {server}\'s Discord, {user}! Relax and have some fun!')
parser.set('JoinPart', 'part_message', '{name} ({display_name}) has left the server.')
parser.set('JoinPart', 'assign_role_enabled', 'false')
parser.ser('JoinPart', 'role_assignment_id', 'NOT_SET')
parser.set('BettingGame', 'minimum_bet', '10')
parser.set('BettingGame', 'enabled', 'false')
parser.set('BettingGame', 'bet_channel_id', 'NOT_SET')
parser.set('BettingGame', 'helpme_cooldown', '86400')
parser.set('BettingGame', 'helpme_minimum', '500')
parser.set('BettingGame', 'force_multiple', '100')
parser.set('BettingGame', 'helpme_start_min', '500')
parser.set('BettingGame', 'helpme_bonus', '100')
parser.set('ApiCommands', 'enabled', 'false')
parser.set('ApiCommands', 'api_channel_id', 'NOT_SET')
with open(
'%s.ini' % (
os.path.join(
self.server_settings_path,
str(server_id))), 'w'
) as configfile:
parser.write(configfile)
return await self.bot.say(
"Configuration file generated. You will need to " \
"configure the file to your desired settings."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_server_config(filename=\"server_config.json\") -> None:\n with open(filename, \"w\") as f:\n data = {\"name\": \"Serveur de test local\", \"ip\": \"0.0.0.0\", \"port\": 25566}\n f.write(json.dumps(data, indent=4))",
"def _dump_server_config(self, _server_config):\n\n _url = _server_config.get(\"url\")\n _server_config.update({\"ssl\":\"off\"})\n if _url.startswith(\"https\"):\n _server_config.update({\"ssl\":\"on\"})\n\n _host = _url[_url.index(\"//\")+2:]\n _host = _host.split(\":\")\n\n if len(_host) == 1:\n _server_config.update({\"name\": _host[0]})\n if _servcer_config.get(\"ssl\") == \"on\":\n _server_config.update({\"port\": 443})\n else:\n _server_config.update({\"port\": 80})\n \n if len(_host) == 2:\n _server_config.update({\"name\": _host[0], \"port\": int(_host[1])})\n \n _config = {\n \"config_status\": CONFIG_STATUS.SERVER,\n \"server\": _server_config\n }\n _dump_config(_config)\n return",
"def configServer():\n try:\n config = open(r\"./server.conf\",\"r+\")\n except IOError,e:\n print e\n return 0\n configLines = []\n try:\n while True:\n configLines.append(config.next())\n except StopIteration:\n pass\n finally:\n config.close()\n configInfo = {}\n for line in configLines:\n if line[0] == \"#\" or line[0] == \"\\n\":\n continue\n configLineArgumentList = line[:-1].split(\"=\")\n key = configLineArgumentList[0]\n value = configLineArgumentList[1]\n configInfo.update({key:value})\n logging.info(\"Configuration done sucssesfully\")\n return configInfo",
"def __create_config_file__(fileparser):\n fileparser['server'] = {\n 'server': Configuration.server + \" # Server IP\",\n 'port': str(Configuration.port) +\n \" # Values allowed: \" + str(Configuration.port_min) +\n \"..\" + str(Configuration.port_max),\n 'certfile': Configuration.certfile +\n \" # Use an absolute path\",\n 'timeout': str(Configuration.timeout) +\n \" # Timeout of the connection request\"\n }\n fileparser['client'] = {\n 'curve1': Configuration.curve1 +\n \" # Values allowed: secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher1': Configuration.cipher1 +\n \" # Values allowed: aes-128-cbc, aes-256-cbc, etc.\",\n 'curve2': Configuration.curve2 +\n \" # Values allowed: None, secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher2': Configuration.cipher2 +\n \" # Values allowed: None, aes-128-cbc, aes-256-cbc, etc.\",\n 'curve3': Configuration.curve3 +\n \" # Values allowed: None, secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher3': Configuration.cipher3 +\n \" # Values allowed: None, aes-128-cbc, aes-256-cbc, etc.\"\n }\n fileparser['ui'] = {\n 'lock': str(Configuration.lock) +\n \" # Lock screen - Values allowed: 0 or a positive integer\",\n 'colour': str(Configuration.colour) +\n \" # If available use colours (1) or not (0)\",\n 'colourB': Configuration.colourB +\n \" # Colour for editable widgets (button, input box...)\",\n 'colourD': Configuration.colourD +\n \" # Colour for decoration (label, frame...)\",\n 'colourT': Configuration.colourT +\n \" # Colour for titles\",\n 'colourM': Configuration.colourM +\n \" # Colour for messages\"\n }\n with open(Configuration.configfile, 'w') as configfile:\n fileparser.write(configfile)\n os.chmod(Configuration.configfile,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IREAD | stat.S_IWRITE)",
"def configfile(ctxt=None):\n if not ctxt:\n ctxt = _ctxt.DEFAULT_CTXT\n with open(_ctxt.TEST_CFG, 'wt') as cfg:\n cfg.write(get_template('server.cfg.jinja').render({'config': ctxt}))\n return _ctxt.TEST_CFG",
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out1\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n },\n {\n \"l3out\": {\n \"name\": \"l3out2\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def generate_config(container_data, file_path):\n pass",
"def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()",
"def generateDefaultConfig(self):\n\n\t\t# Open config.ini in write mode\n\t\tf = open(self.fileName, \"w\")\n\n\t\t# Set keys to config object\n\t\tself.config.add_section(\"db\")\n\t\tself.config.set(\"db\", \"host\", \"localhost\")\n\t\tself.config.set(\"db\", \"username\", \"root\")\n\t\tself.config.set(\"db\", \"password\", \"\")\n\t\tself.config.set(\"db\", \"database\", \"ripple\")\n\t\tself.config.set(\"db\", \"pingtime\", \"600\")\n\n\t\tself.config.add_section(\"server\")\n\t\tself.config.set(\"server\", \"server\", \"tornado\")\n\t\tself.config.set(\"server\", \"host\", \"0.0.0.0\")\n\t\tself.config.set(\"server\", \"port\", \"5001\")\n\t\tself.config.set(\"server\", \"localizeusers\", \"1\")\n\t\tself.config.set(\"server\", \"outputpackets\", \"0\")\n\t\tself.config.set(\"server\", \"outputrequesttime\", \"0\")\n\t\tself.config.set(\"server\", \"timeoutlooptime\", \"100\")\n\t\tself.config.set(\"server\", \"timeouttime\", \"100\")\n\n\t\tself.config.add_section(\"flask\")\n\t\tself.config.set(\"flask\", \"threaded\", \"1\")\n\t\tself.config.set(\"flask\", \"debug\", \"0\")\n\t\tself.config.set(\"flask\", \"logger\", \"0\")\n\n\t\tself.config.add_section(\"ci\")\n\t\tself.config.set(\"ci\", \"key\", \"changeme\")\n\n\t\t# Write ini to file and close\n\t\tself.config.write(f)\n\t\tf.close()",
"def _generate_global_config() -> str:\n logger = getLogger(__name__)\n dst = os.path.join(os.path.expanduser(\"~\"),\n \".aiscalator/config/aiscalator.conf\")\n logger.info(\"Generating a new configuration file for aiscalator:\\n\\t%s\",\n dst)\n pattern = [\n \"testUserID\",\n \"generation_date\",\n ]\n replace_value = [\n generate_user_id(),\n '\"' + str(datetime\n .utcnow()\n .replace(tzinfo=timezone(\"UTC\"))) +\n '\" // in UTC timezone',\n ]\n dst_dir = os.path.dirname(dst)\n if dst_dir:\n os.makedirs(dst_dir, exist_ok=True)\n copy_replace(data_file(\"../config/template/aiscalator.conf\"),\n dst, pattern=pattern, replace_value=replace_value)\n open(os.path.join(dst_dir, \"apt_packages.txt\"), 'a').close()\n open(os.path.join(dst_dir, \"requirements.txt\"), 'a').close()\n open(os.path.join(dst_dir, \"lab_extensions.txt\"), 'a').close()\n return dst",
"def generate_config(args):\n default_config = resource_string('webrpg', 'scripts/templates/default_config.txt').decode('utf-8')\n if args.sqla_connection_string:\n default_config = default_config.replace('%(sqlalchemy_url)s', args.sqla_connection_string)\n else:\n default_config = default_config.replace('%(sqlalchemy_url)s', get_user_parameter('SQL Alchemy Connection String', 'sqlite:///%(here)s/pyire_test.db'))\n\n with open(args.filename, 'w') as out_f:\n out_f.write(default_config)",
"def generate_config_template():\n lines = ['# Lines starting with # will be skipped.']\n lines.append('# Only one argument on each line.')\n lines.append('#-s This option is always assumed to be true.')\n lines.append('#-p')\n lines.append('#-m')\n lines.append('#-o')\n lines.append('#-c')\n lines.append('-l')\n lines.append('#-a')\n lines.append('#-d')\n\n with open('export_config.txt', 'wb') as f_new:\n f_new.write('\\r\\n'.join(lines))\n print 'Template generated. Edit this file as you please and call this script '\\\n 'with the -f option enabled.'",
"def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output",
"def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())",
"def server_supervisor_conf(\n path=ASS_SERVICE_SETTINGS['default_conf_file'],\n command=ASS_SERVICE_SETTINGS['command'],\n args=ASS_SERVICE_SETTINGS['args'],\n log_dir=SERVICES_DIR,\n autostart=ASS_SERVICE_SETTINGS['autostart'],\n):\n crete_config(path, command, args, log_dir, 'asoustic_sight_server', autostart)",
"def create_configfile():\n config = ConfigParser.ConfigParser()\n config.add_section('Common')\n config.set('Common', 'renewal days', 20)\n config.set('Common', 'delayed installation days', 5)\n config.set('Common', 'include chain', True)\n config.set('Common', 'account key', './config/key.pem')\n config.add_section('Load Balancer')\n config.set('Load Balancer', 'cluster', True)\n config.set('Load Balancer', 'Host 1', 'lb1.example.com')\n config.set('Load Balancer', 'Host 2', 'lb2.example.com')\n config.set('Load Balancer', 'username', 'admin')\n config.set('Load Balancer', 'password', 'password01')\n config.set('Load Balancer', 'datagroup', 'acme_responses_dg')\n config.set('Load Balancer', 'datagroup partition', 'Common')\n config.add_section('Certificate Authority')\n config.set('Certificate Authority', 'Directory URL',\n 'https://acme-v01.api.letsencrypt.org/directory')\n config.set('Certificate Authority', 'use proxy', False)\n config.set('Certificate Authority', 'proxy',\n 'http://proxy.example.com:8080')\n\n # As the config file contains password, we should be careful with permissions\n with os.fdopen(os.open(CONFIG_FILE, os.O_WRONLY | os.O_CREAT, 0o660), 'w') as config_file:\n config.write(config_file)",
"def mk_vars_file(work_dir, server_cfg, provider_name):\n f = open(work_dir + '/vars', 'w')\n f.write('# generated by pentaho_cloud')\n if (server_cfg.ssl):\n f.write('\\nssl=1')\n else:\n f.write('\\nssl=0')\n if server_cfg.passwords:\n for i, p in enumerate(server_cfg.passwords):\n f.write(\"\\npasswords[%d]='%s'\" % (i, p))\n packages = vers[server_cfg.version]\n for k in packages.keys():\n f.write(\"\\n%s='%s'\" % (k, packages[k]))\n f.write(\"\\nprovider='%s'\" % provider_name)\n f.close()\n return f.name",
"def create_config_file_after(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def get_config_file_content(self):\n\n config_content: List[str] = [\n 'server {',\n\t ' listen {};'.format(self.port),\n '',\n ' ##',\n ' # PHP-FPM',\n ' ##',\n ' #location ~ \\.php$ {',\n \t ' #include /etc/nginx/fastcgi_params;',\n\t\t ' #root /var/www/src;',\n ' #fastcgi_split_path_info ^(.+?\\.php)(/.*)$;',\n ' #fastcgi_pass\tphpfpm:3002;',\n\t\t ' #fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;',\n ' #}',\n '',\n ' location / {',\n\t\t ' root /var/www/src;',\n ' index index.html;'\n\t\t ' #index index.php;',\n\t\t ' #rewrite ^ /index.php?$args last; break;',\n\t ' }',\n '}'\n ]\n return config_content",
"def configs() -> None:\n config_data = cast(bytes, pkgutil.get_data('DLA', 'config.yml'))\n server_config_data = cast(\n bytes, pkgutil.get_data('DLA.server', 'server_config.yml')\n )\n\n current_path = Path('.')\n\n (current_path / 'config.yml').write_bytes(config_data)\n (current_path / 'server_config.yml').write_bytes(server_config_data)\n\n click.echo('Copied default configuration files.')",
"def generate_configuration(directory):\n \n # conf.py file for Sphinx\n conf = osp.join(get_module_source_path('spyderlib.utils.inspector'),\n 'conf.py')\n\n # Docstring layout page (in Jinja):\n layout = osp.join(osp.join(CONFDIR_PATH, 'templates'), 'layout.html')\n \n os.makedirs(osp.join(directory, 'templates'))\n os.makedirs(osp.join(directory, 'static'))\n shutil.copy(conf, directory)\n shutil.copy(layout, osp.join(directory, 'templates'))\n open(osp.join(directory, '__init__.py'), 'w').write('')\n open(osp.join(directory, 'static', 'empty'), 'w').write('')",
"def deploy_cfg():\n return '{buildout}.cfg'.format(buildout=env.host.split('.')[0])",
"def generate_config(self):\n self.log.debug(\"generate-config\")\n self.qemu.args = [\n \"-nodefaults\",\n \"-only-migratable\",\n \"-cpu {cpu_model},enforce\",\n # Watch out: kvm.name is used for sanity checking critical actions.\n \"-name {name},process=kvm.{name}\",\n \"-chroot {{chroot}}\",\n \"-runas nobody\",\n \"-serial file:/var/log/vm/{name}.log\",\n \"-display vnc={{vnc}}\",\n \"-pidfile {{pidfile}}\",\n \"-vga std\",\n # We use this '-m' flag to find what a running VM is actually\n # using at the moment. If this flag is changed then that code must\n # be adapted as well. This is used in incoming.py and qemu.py.\n \"-m {memory}\",\n \"-readconfig {{configfile}}\",\n ]\n self.qemu.args = [a.format(**self.cfg) for a in self.qemu.args]\n\n vhost = ' vhost = \"on\"' if self.vhost else \"\"\n\n netconfig = []\n for net, net_config in sorted(self.cfg[\"interfaces\"].items()):\n ifname = \"t{}{}\".format(net, self.cfg[\"id\"])\n netconfig.append(\n \"\"\"\n[device]\n driver = \"virtio-net-pci\"\n netdev = \"{ifname}\"\n mac = \"{mac}\"\n\n[netdev \"{ifname}\"]\n type = \"tap\"\n ifname = \"{ifname}\"\n script = \"/etc/kvm/kvm-ifup\"\n downscript = \"/etc/kvm/kvm-ifdown\"\n{vhost}\n\"\"\".format(\n ifname=ifname, mac=net_config[\"mac\"], vhost=vhost\n )\n )\n\n with open(self.vm_config_template) as f:\n tpl = f.read()\n accelerator = (\n ' accel = \"{}\"'.format(self.accelerator)\n if self.accelerator\n else \"\"\n )\n machine_type = detect_current_machine_type(self.machine_type)\n self.qemu.config = tpl.format(\n accelerator=accelerator,\n machine_type=machine_type,\n disk_cache_mode=self.qemu.disk_cache_mode,\n network=\"\".join(netconfig),\n **self.cfg,\n )",
"def _createConfigFile(self):\n configFile = self._configFile()\n try:\n with open(configFile) as fh:\n pass\n except IOError:\n try:\n with open(configFile, 'w') as fh:\n fh.write(\"[settings]\\n\")\n fh.write(\"debug = false\\n\")\n fh.write(\"hidefilenames = false\\n\")\n except IOError:\n pass",
"def create_config_file_before(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\",\n },\n {\n \"contract_name\": \"contract-2\",\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def _create_config(env_path):\n s2e_yaml = 's2e.yaml'\n version_path = os.path.join(os.path.dirname(__file__), '..', 'dat', 'VERSION')\n\n with open(version_path, 'r', encoding='utf-8') as fp:\n context = {\n 'creation_time': str(datetime.datetime.now()),\n 'version': fp.read().strip(),\n }\n\n render_template(context, s2e_yaml, os.path.join(env_path, s2e_yaml))",
"def create_yaml(self, filename):\n log_dir = os.environ.get(\"DAOS_TEST_LOG_DIR\", \"/tmp\")\n\n # Convert the parameters into a dictionary to write a yaml file\n yaml_data = {\"servers\": []}\n for name in self.get_param_names():\n if name != \"servers_per_host\":\n value = getattr(self, name).value\n if value is not None and value is not False:\n if name.endswith(\"log_file\"):\n yaml_data[name] = os.path.join(\n log_dir, value)\n else:\n yaml_data[name] = value\n for server_params in self.server_params:\n yaml_data[\"servers\"].append({})\n for name in server_params.get_param_names():\n value = getattr(server_params, name).value\n if value is not None and value is not False:\n if name.endswith(\"log_file\"):\n yaml_data[\"servers\"][-1][name] = os.path.join(\n log_dir, value)\n else:\n yaml_data[\"servers\"][-1][name] = value\n\n # Don't set scm_size when scm_class is \"dcpm\"\n for index in range(len(self.server_params)):\n srv_cfg = yaml_data[\"servers\"][index]\n scm_class = srv_cfg.get(\"scm_class\", \"ram\")\n if scm_class == \"dcpm\" and \"scm_size\" in srv_cfg:\n del srv_cfg[\"scm_size\"]\n\n # Write default_value_set dictionary in to AVOCADO_FILE\n # This will be used to start with daos_server -o option.\n try:\n with open(filename, 'w') as write_file:\n yaml.dump(yaml_data, write_file, default_flow_style=False)\n except Exception as error:\n print(\"<SERVER> Exception occurred: {0}\".format(error))\n raise ServerFailed(\n \"Error writing daos_server command yaml file {}: {}\".format(\n filename, error))\n return filename",
"def GenerateConfig(context):\r\n \r\n module = \"frontend\"\r\n cc = config_merger.ConfigContext(context.properties, module)\r\n \r\n return {\r\n 'resources': [{\r\n 'name': 'simple_frontend',\r\n 'type': 'simple_frontend.py',\r\n 'properties': context.properties\r\n }], \r\n 'outputs': [{\r\n 'name': 'env_name',\r\n 'value': context.properties[\"envName\"]\r\n },{\r\n 'name': 'context',\r\n 'value': cc.configs['CONTEXT']\r\n },{\r\n 'name': 'HQ_Address',\r\n 'value': cc.configs['HQ_Address']\r\n },{\r\n 'name': 'ServiceName',\r\n 'value': cc.configs['ServiceName']\r\n },{\r\n 'name': 'versionNR',\r\n 'value': cc.configs['versionNR']\r\n },{\r\n 'name': 'outp_3',\r\n 'value':str(cc.configs)\r\n }]\r\n \r\n }",
"def run(self):\n write_config(self.filename)\n print('Wrote default config to', self.filename)"
] | [
"0.6640288",
"0.65607977",
"0.6517467",
"0.638775",
"0.6378636",
"0.6340267",
"0.63106245",
"0.62837476",
"0.626191",
"0.6231101",
"0.62090766",
"0.6187176",
"0.6173179",
"0.614317",
"0.6139799",
"0.6139589",
"0.6129176",
"0.60518533",
"0.6040522",
"0.5917994",
"0.59154946",
"0.59021205",
"0.5867946",
"0.5858253",
"0.58582294",
"0.5842656",
"0.5836799",
"0.5815965",
"0.57773614",
"0.5768026"
] | 0.68829614 | 0 |
Resamples image to given spacing and size. | def imgResample(img, spacing, size=[], useNearest=False, origin=[], outsideValue=0):
if len(spacing) != img.GetDimension(): raise Exception("len(spacing) != " + str(img.GetDimension()))
# Set Size
if size == []:
inSpacing = img.GetSpacing()
inSize = img.GetSize()
size = [int(math.ceil(inSize[i]*(inSpacing[i]/spacing[i]))) for i in range(img.GetDimension())]
else:
if len(size) != img.GetDimension(): raise Exception("len(size) != " + str(img.GetDimension()))
if origin == []:
origin = [0]*img.GetDimension()
else:
if len(origin) != img.GetDimension(): raise Exception("len(origin) != " + str(img.GetDimension()))
# Resample input image
interpolator = [sitk.sitkLinear, sitk.sitkNearestNeighbor][useNearest]
identityTransform = sitk.Transform()
identityDirection = list(sitk.AffineTransform(img.GetDimension()).GetMatrix())
return sitk.Resample(img, size, identityTransform, interpolator, origin, spacing, identityDirection, outsideValue) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resample_img(img, img_type, size, spacing):\n resampler = sitk.ResampleImageFilter()\n resampler.SetOutputDirection(img.GetDirection())\n resampler.SetOutputOrigin(img.GetOrigin())\n resampler.SetOutputSpacing(spacing)\n resampler.SetSize(size)\n if img_type is \"Label\":\n resampler.SetInterpolator(sitk.sitkNearestNeighbor)\n elif img_type is \"Image\":\n resampler.SetInterpolator(sitk.sitkLinear)\n imgResampled = resampler.Execute(img)\n\n #axis have to be switched since np.array and keras use them in different order...\n x = np.transpose(sitk.GetArrayFromImage(imgResampled).astype(dtype=np.float), [2, 1, 0])\n return x",
"def resample(scan, image, new_spacing=[1,1,1]):\n spacing = np.array([scan[0].SliceThickness] + scan[0].PixelSpacing, dtype=np.float32)\n\n resize_factor = spacing / new_spacing\n new_real_shape = image.shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize_factor = new_shape / image.shape\n new_spacing = spacing / real_resize_factor\n\n image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')\n\n batch = []\n for i in range(0, image.shape[0] - 3, 3):\n tmp = []\n for j in range(3):\n img = image[i + j]\n img= cv2.resize(img, (224, 224))\n tmp.append(img)\n\n tmp = np.array(tmp)\n batch.append(tmp)\n\n batch = np.array(batch)\n\n return batch",
"def resample(medical_image, image, new_spacing=[1, 1, 1]):\n image_shape = np.array(list(medical_image.header.get_data_shape()))\n spacing = np.array(list(medical_image.header.get_zooms()))\n resize_factor = spacing / new_spacing\n\n new_shape = image_shape * resize_factor\n new_shape = np.round(new_shape)\n\n real_resize_factor = new_shape / image_shape\n\n new_spacing = spacing / real_resize_factor\n resampled_image = scipy.ndimage.interpolation.zoom(image, real_resize_factor)\n\n return resampled_image, new_spacing",
"def resample_image(image, out_spacing=(1.0, 1.0, 1.0), out_size=None, is_label=False, pad_value=0):\n\n original_spacing = np.array(image.GetSpacing())\n original_size = np.array(image.GetSize())\n\n if out_size is None:\n out_size = np.round(np.array(original_size * original_spacing / np.array(out_spacing))).astype(int)\n else:\n out_size = np.array(out_size)\n\n original_direction = np.array(image.GetDirection()).reshape(len(original_spacing), -1)\n original_center = (np.array(original_size, dtype=float) - 1.0) / 2.0 * original_spacing\n out_center = (np.array(out_size, dtype=float) - 1.0) / 2.0 * np.array(out_spacing)\n\n original_center = np.matmul(original_direction, original_center)\n out_center = np.matmul(original_direction, out_center)\n out_origin = np.array(image.GetOrigin()) + (original_center - out_center)\n\n resample = sitk.ResampleImageFilter()\n resample.SetOutputSpacing(out_spacing)\n resample.SetSize(out_size.tolist())\n resample.SetOutputDirection(image.GetDirection())\n resample.SetOutputOrigin(out_origin.tolist())\n resample.SetTransform(sitk.Transform())\n resample.SetDefaultPixelValue(pad_value)\n\n if is_label:\n resample.SetInterpolator(sitk.sitkNearestNeighbor)\n else:\n resample.SetInterpolator(sitk.sitkBSpline)\n\n return resample.Execute(image)",
"def resample_image(I,spacing,desiredSize, spline_order=1,zero_boundary=False,identity_map=None):\n if len(I.shape) != len(desiredSize)+2:\n desiredSize = desiredSize[2:]\n sz = np.array(list(I.size()))\n # check that the batch size and the number of channels is the same\n nrOfI = sz[0]\n nrOfC = sz[1]\n\n desiredSizeNC = np.array([nrOfI,nrOfC]+list(desiredSize))\n\n newspacing = spacing*((sz[2::].astype('float')-1.)/(desiredSizeNC[2::].astype('float')-1.)) ###########################################\n if identity_map is not None:\n idDes= identity_map\n else:\n idDes = torch.from_numpy(py_utils.identity_map_multiN(desiredSizeNC,newspacing)).to(I.device)\n # now use this map for resampling\n ID = py_utils.compute_warped_image_multiNC(I, idDes, newspacing, spline_order,zero_boundary)\n\n return ID, newspacing",
"def resample(img, hdr, target_spacing, bspline_order=3, mode='constant'):\n if isinstance(target_spacing, numbers.Number):\n target_spacing = [target_spacing] * img.ndim\n \n # compute zoom values\n zoom_factors = [old / float(new) for new, old in zip(target_spacing, header.get_pixel_spacing(hdr))]\n\n print \"Zoom Factors\"\n print zoom_factors\n\n oldImageShape = img.shape\n \n # zoom image\n img = zoom(img, zoom_factors, order=bspline_order, mode=mode)\n\n newImageShape = img.shape\n old_pixel_spacing = header.get_pixel_spacing(hdr)\n new_pixel_spacing = np.round(np.divide(np.multiply(oldImageShape,old_pixel_spacing),newImageShape),7)\n print \"Target Pixel Spacing\"\n print target_spacing\n\n print \"Actual Pixel Spacing\"\n print new_pixel_spacing\n # set new voxel spacing\n header.set_pixel_spacing(hdr, new_pixel_spacing)\n \n return img, hdr",
"def resample(image, pixsize):\n assert pixsize in [1, 2, 4, 8, 16] # return error and exit otherwise\n imsize = 128/pixsize\n newimage = np.zeros((imsize, imsize))\n for xn, x in enumerate(np.arange(0, 128, pixsize)):\n for yn, y in enumerate(np.arange(0, 128, pixsize)):\n newimage[xn, yn] = np.nansum(image[x:x+pixsize, y:y+pixsize]) # Nansum is important as sum of masked array can be nan\n return newimage",
"def resample_img(image, target_shape, mode='nearest'):\n print(target_shape)\n resize_factor = np.array(target_shape)/image.shape\n resampled = scipy.ndimage.interpolation.zoom(image, resize_factor,\n mode=mode)\n return resampled",
"def resample(self, image, target_shape, mode='nearest'):\n resize_factor = image.shape / np.array(target_shape)\n resampled = scipy.ndimage.interpolation.zoom(image, resize_factor,\n mode=mode)\n return resampled",
"def _resample(ct_scan, origin, spacing, spacing_x, spacing_y, spacing_z,\n interpolator=sitk.sitkLinear):\n img = LungsLoader._get_itk_from_scan(ct_scan, origin, spacing)\n # Compute new dimensions\n spacing = img.GetSpacing()\n size = img.GetSize()\n fact_x = spacing[0] / spacing_x\n fact_y = spacing[1] / spacing_y\n fact_z = spacing[2] / spacing_z\n size_x = int(round(size[0] * fact_x))\n size_y = int(round(size[1] * fact_y))\n size_z = int(round(size[2] * fact_z))\n # to do resampling\n f = sitk.ResampleImageFilter()\n f.SetReferenceImage(img)\n f.SetOutputOrigin(img.GetOrigin())\n f.SetOutputSpacing((spacing_x, spacing_y, spacing_z))\n f.SetSize((size_x, size_y, size_z))\n f.SetInterpolator(interpolator)\n result = f.Execute(img)\n return np.around(sitk.GetArrayFromImage(result))",
"def resample(image_array, target_pixel_dims_list, is_seg=False):\n # if dealing with segmentation images, do not interpolate\n # this is done by choosing interpolation order == 0\n order = 0 if is_seg == True else 3\n\n original_dims_in_pixels = [image_array.shape[d]\n for d in range(len(image_array.shape))]\n compression_list = [target_pixel_dims_list[d] / original_dims_in_pixels[d]\n for d in range(len(image_array.shape))]\n\n if (is_seg):\n resized_image = ndimage.interpolation.zoom(\n image_array, zoom=compression_list, order=order, cval=0)\n return resized_image\n else:\n resized_image = ndimage.interpolation.zoom(\n image_array, zoom=compression_list, order=order, cval=0)\n return resized_image",
"def resample(img, newSize):\n # use float as datatype to preserve float precision\n # will convert to np.uint8 when display result\n # get the new dimensions\n nH, nW = newSize\n if np.ndim(img) == 2:\n H, W = img.shape\n res = np.zeros((nH, nW), dtype=np.float32)\n elif np.ndim(img) == 3:\n H, W, _ = img.shape\n res = np.zeros((nH, nW, _), dtype=np.float32)\n else:\n raise ValueError(\"input image has invalid dimension %s\" % (img.shape))\n\n # interpolate the value for the result\n for idx in range(nH * nW):\n i = idx // nW\n j = idx % nW\n orig_i = int((i * H) // nH)\n orig_j = int((j * W) // nW)\n res[i, j] = img[orig_i, orig_j]\n return res",
"def resample(source, target_size, transform):\n dtype = source.dtype\n dev = source.device\n\n height_, width_ = target_size\n ur_ = torch.arange(width_, dtype=dtype, device=dev) + 0.5\n vr_ = torch.arange(height_, dtype=dtype, device=dev) + 0.5\n\n height, weight = source.shape[2:]\n ur = 2 * ((ur_ + transform[0, 1]) / transform[0, 0]) / weight - 1\n vr = 2 * ((vr_ + transform[1, 1]) / transform[1, 0]) / height - 1\n\n v, u = torch.meshgrid(vr, ur)\n v = v.unsqueeze(2)\n u = u.unsqueeze(2)\n\n grid = torch.cat((u, v), dim=2)\n grid = grid.unsqueeze(0).expand(len(source), -1, -1, -1)\n\n return torch.nn.functional.grid_sample(source, grid)",
"def resample(image, target_affine, target_shape, interpolation='trilinear', pad=False, dtype=None, align_corners=True,\n margin=1e-6):\n if dtype:\n image = image.to(dtype)\n if (torch.all(torch.abs(image.affine - target_affine) < margin)\n and torch.all(torch.tensor(image.shape[-3:]) == torch.tensor(target_shape))):\n return image\n mode = monai_interpolation_mode(interpolation)\n resampler = SpatialResample(mode=mode, align_corners=align_corners)\n\n return resampler(img=image, dst_affine=target_affine, spatial_size=target_shape)",
"def thumbnail(self, size, resample=BICUBIC):\r\n # preserve aspect ratio\r\n x, y = self.size\r\n if x > size[0]:\r\n y = int(max(y * size[0] / x, 1))\r\n x = int(size[0])\r\n if y > size[1]:\r\n x = int(max(x * size[1] / y, 1))\r\n y = int(size[1])\r\n size = x, y\r\n if size == self.size:\r\n return\r\n self.draft(None, size)\r\n self._instance = self.resize(size, resample, image=self._instance)\r\n self.readonly = 0\r\n self.pyaccess = None",
"def resample_by(image_array, compression_factor_list, is_seg=False):\n original_dims_in_pixels = [image_array.shape[d]\n for d in range(len(image_array.shape))]\n target_pixel_dims_list = [int(math.floor(compression_factor_list[d] * original_dims_in_pixels[d]))\n for d in range(len(image_array.shape))]\n\n resized_image = resample(image_array, target_pixel_dims_list, is_seg)\n\n return(resized_image)",
"def image_resample(f, oversamp=1.0):\n img = imageio.imread(f)\n x = np.linspace(0,img.shape[0],img.shape[0])\n y = np.linspace(0,img.shape[1],img.shape[1])\n #\n # 2x oversample the image since we'll dither it.\n #\n xnew = np.linspace(0, img.shape[0], img.shape[0]*oversamp)\n ynew = np.linspace(0, img.shape[1], img.shape[1]*oversamp)\n from scipy import interpolate\n rc = interpolate.interp2d(x, y, img[:,:,0].flatten(), kind='linear')\n gc = interpolate.interp2d(x, y, img[:,:,1].flatten(), kind='linear')\n bc = interpolate.interp2d(x, y, img[:,:,2].flatten(), kind='linear')\n rgb_new = np.stack([rc(xnew.flatten(), ynew.flatten()),\n gc(xnew.flatten(), ynew.flatten()),\n bc(xnew.flatten(), ynew.flatten())],-1).transpose(1,0,2).astype(np.uint8)\n plt.imshow(rgb_new)\n return rgb_new",
"def resampled_to_img(self, target_image, interpolation=None):\n # IMPORTANT: Polymorphism can be implemented by walking the \n # MRO and finding a method that does not raise\n # NotImplementedError. \n raise NotImplementedError",
"def resample(self):\n pass",
"def upsample(x, name, size):\n with tf.name_scope(name):\n outputs = tf.image.resize_bilinear(x, size)\n # Return layer's output\n return outputs",
"def rescale_scan(ct_scan, origin, spacing, new_width, new_height, new_depth,\n normalize=True, interpolator=sitk.sitkLinear):\n if not np.all(origin == 0):\n raise ValueError(\"Please feed only resampled images to this function.\")\n if ct_scan.shape[2] != ct_scan.shape[1]:\n raise ValueError(\"Scans should have the same width and height.\")\n img = LungsLoader._get_itk_from_scan(ct_scan, origin, spacing)\n f = sitk.ResampleImageFilter()\n f.SetReferenceImage(img)\n f.SetOutputOrigin(img.GetOrigin())\n spacing_x, spacing_y, spacing_z = img.GetSpacing()\n size_x, size_y, size_z = img.GetSize()\n spacing_x /= (new_width / size_x)\n spacing_y /= (new_height / size_y)\n spacing_z /= (new_depth / size_z)\n size_x = new_width\n size_y = new_height\n size_z = new_depth\n f.SetOutputSpacing((spacing_x, spacing_y, spacing_z))\n f.SetSize((size_x, size_y, size_z))\n f.SetInterpolator(interpolator)\n result = f.Execute(img)\n new_scan = sitk.GetArrayFromImage(result)\n if normalize:\n new_scan = (new_scan - np.mean(new_scan)) / np.std(new_scan)\n return new_scan, result.GetOrigin(), result.GetSpacing()",
"def resample_filters(self):\n a = len(self.filtered_ids)\n b = len(self.orig_image_ids)\n imbalance_ratio = a / b\n min_ratio = 0.5\n if imbalance_ratio > min_ratio:\n return\n minr = min_ratio\n num_req = int((minr * b - a) / (1 - minr))\n new_ids = ((num_req) // a) * self.filtered_ids\n if num_req % a != 0:\n some_more = random.sample(self.filtered_ids, k=(num_req % a))\n new_ids += some_more\n self.image_ids = self.orig_image_ids + new_ids\n print(\"Resampled total:\", len(self.image_ids))",
"def Resampler(name):\n\n def resample_average(path, dsquery, dstile, image_format):\n for i in range(1, dstile.RasterCount+1):\n res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i), \"average\")\n if res != 0:\n raise ImageOutputException(\"RegenerateOverview() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n def resample_antialias(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n array = numpy.zeros((querysize, querysize, 4), numpy.uint8)\n for i in range(dstile.RasterCount):\n array[:,:,i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i+1), 0, 0, querysize, querysize)\n im = Image.fromarray(array, 'RGBA') # Always four bands\n im1 = im.resize((tilesize,tilesize), Image.ANTIALIAS)\n\n if os.path.exists(path):\n im0 = Image.open(path)\n im1 = Image.composite(im1, im0, im1)\n\n ensure_dir_exists(path)\n\n if image_format == \"JPEG\":\n im1.save(path, image_format, quality=jpeg_quality)\n else:\n im1.save(path, image_format)\n\n\n if name == \"average\":\n return resample_average\n elif name == \"antialias\":\n return resample_antialias\n\n resampling_methods = {\n \"near\" : gdal.GRA_NearestNeighbour,\n \"bilinear\" : gdal.GRA_Bilinear,\n \"cubic\" : gdal.GRA_Cubic,\n \"cubicspline\" : gdal.GRA_CubicSpline,\n \"lanczos\" : gdal.GRA_Lanczos\n }\n\n resampling_method = resampling_methods[name]\n\n def resample_gdal(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n dsquery.SetGeoTransform( (0.0, tilesize / float(querysize), 0.0, 0.0, 0.0, tilesize / float(querysize)) )\n dstile.SetGeoTransform( (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) )\n\n res = gdal.ReprojectImage(dsquery, dstile, None, None, resampling_method)\n if res != 0:\n raise ImageOutputException(\"ReprojectImage() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n return resample_gdal",
"def upsample_bilinear(input, size=None, scale_factor=None):\n return interpolate(input, size, scale_factor, 'linear', align_corners=True)",
"def random_stretch_squeeze(inputs,\n resample_offset,\n seed=None):\n if inputs.shape.rank != 2:\n raise ValueError('inputs.shape.rank:%d must be 2' % inputs.shape.rank)\n\n inputs_shape = inputs.shape.as_list()\n batch_size = inputs_shape[0]\n sequence_length = inputs_shape[1]\n\n image = tf.expand_dims(inputs, 2) # feature\n image = tf.expand_dims(image, 3) # channels\n\n resample = 1.0 # when it is equal to 1 - no stretching or squeezing\n time_stretch_squeeze = tf.random.uniform(\n shape=[batch_size],\n minval=resample - resample_offset,\n maxval=resample + resample_offset,\n dtype=tf.float32,\n seed=seed)\n tf.print(time_stretch_squeeze)\n print(time_stretch_squeeze)\n shape = tf.shape(inputs)\n outputs = tf.TensorArray(inputs.dtype, 0, dynamic_size=True)\n for i in tf.range(batch_size):\n image_resized = tf.image.resize(\n images=image[i],\n size=(tf.cast((tf.cast(shape[1], tf.float32) * time_stretch_squeeze[i]),\n tf.int32), 1),\n preserve_aspect_ratio=False)\n image_resized_cropped = tf.image.resize_with_crop_or_pad(\n image_resized,\n target_height=sequence_length,\n target_width=1,\n )\n\n outputs = outputs.write(i, image_resized_cropped)\n\n outputs = tf.squeeze(outputs.stack(), axis=[2, 3])\n outputs.set_shape(inputs_shape)\n return outputs",
"def downsample_image(img, output_path, ratio=2):\n assert ratio>=1, ratio\n if ratio == 1:\n return True\n old_im = Image.open(img)\n old_size = old_im.size\n new_size = (int(old_size[0]/ratio), int(old_size[1]/ratio))\n\n new_im = old_im.resize(new_size, PIL.Image.LANCZOS)\n new_im.save(output_path)\n return True",
"def upsample_2d(x, size=(2, 2)):\n h, w, _ = x.get_shape().as_list()[1:]\n size_x, size_y = size\n output_h = h * size_x\n output_w = w * size_y\n return tf.image.resize_bilinear(x, (output_h, output_w), align_corners=None, name='upsampling')",
"def resize(im, new_size, preserve_aspect_ratio=True, prefilter=True):\n factors = [new_size[i] / im.shape[i] for i in range(2)]\n\n #assert factors[0] == factors[1], \"Must have same factor for now\"\n f = factors[0] \n \n if f < 1:\n im2 = pyramid_reduce(im, downscale=1/f)\n elif f > 1:\n im2 = pyramid_expand(im, upscale=f)\n else:\n im2 = im\n\n assert im2.shape[:2] == tuple(new_size), \"{0} != {1} (original size: {2})\".format(im2.shape, new_size, im.shape)\n \n return im2",
"def upsampleImage( arr, kernelSize ):\n return scipy.ndimage.zoom( arr, kernelSize )",
"def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BILINEAR,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n size = get_size_dict(size)\n if \"height\" not in size or \"width\" not in size:\n raise ValueError(f\"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}\")\n output_size = (size[\"height\"], size[\"width\"])\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )"
] | [
"0.7943881",
"0.7643791",
"0.75856775",
"0.74093217",
"0.7148586",
"0.6985898",
"0.69473743",
"0.67742175",
"0.6648978",
"0.66368556",
"0.6539819",
"0.6505706",
"0.6414896",
"0.6389543",
"0.6281422",
"0.6270175",
"0.618115",
"0.6171765",
"0.61706066",
"0.6149616",
"0.6079704",
"0.60759836",
"0.6055361",
"0.6035711",
"0.5997403",
"0.599585",
"0.597461",
"0.59737337",
"0.59681654",
"0.5965529"
] | 0.78588027 | 1 |
Normalize image to unity sum | def imgNormalize(img):
constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())
return img/constant | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalise(image):",
"def normalize(img):\r\n return ((img / 255.0) - 0.5) / 0.5",
"def normalize(image):\r\n return image / 127.5 - 1.",
"def normalize(image):\n return image / 127.5 - 1.",
"def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg",
"def normalize_image(image):\n return image / 255.",
"def normalize(img):\r\n min = img.min()\r\n max = img.max()\r\n x = 2.0 * (img - min) / (max - min) - 1.0\r\n return x",
"def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))",
"def normalise_image(image, use_torch=True):\n if use_torch:\n image = torch.abs(image)\n else:\n image = np.abs(image)\n if (image.max() - image.min()) < 1e-5:\n return image - image.min() + 1e-5\n else:\n return (image - image.min()) / (image.max() - image.min())",
"def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2",
"def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255",
"def normalize(img):\n\n def normalize_pixel(x):\n return (x - 128) / 128\n\n normalize_vector = np.vectorize(normalize_pixel)\n return normalize_vector(img)",
"def normalization(img):\n max_val = img.max()\n min_val = img.min()\n\n return ((img-min_val)*255)/(max_val-min_val)",
"def normalization_brain(img, mask):\n zone1 = img[mask != 0]\n imge = img.copy()\n imge[mask != 0] = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n imge[mask == 0] = 0\n return imge",
"def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))",
"def normalize((image)):\n new = np.zeros((7,7))\n index = 0\n for i in [0, 4, 7, 10, 14, 17, 21]:\n for j in [0, 4, 7, 10, 14, 17, 21]:\n new[index / 7][index % 7] += float(np.sum(image[i:(i+6),j:(j+6)]))/49\n index += 1\n return new",
"def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)",
"def normalize_image(img):\n min_, max_ = float(np.min(img)), float(np.max(img))\n return (img - min_) / (max_ - min_)",
"def normalize_image(image):\n image = image.astype(np.float32) / 255.0\n\n return image",
"def preprocess(self, img):\n return img - np.mean(img)",
"def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im",
"def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret",
"def normalize(im: np.ndarray) -> np.ndarray:\n im = im.astype(np.float32)\n return (im - im.min()) / (im.max() - im.min())",
"def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img",
"def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1",
"def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img",
"def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img",
"def normalize(img):\n norm = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_32F, 1)\n cvCopy(img, norm)\n cvNormalize(norm, norm, 1, 0, CV_MINMAX)\n norm_u = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_8U, 1)\n cvConvertScale(norm, norm_u, 255)\n return norm_u",
"def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))",
"def normalize_img(img):\n channel_mean = img.mean(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n channel_std = img.std(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n return (img - channel_mean) / channel_std"
] | [
"0.80221593",
"0.77209437",
"0.7649952",
"0.7572828",
"0.7504156",
"0.74678594",
"0.7401729",
"0.7398767",
"0.7394718",
"0.7377331",
"0.73535275",
"0.7282052",
"0.72608453",
"0.7258828",
"0.72510463",
"0.72449976",
"0.72304803",
"0.72275317",
"0.7222405",
"0.7201099",
"0.71781653",
"0.7117907",
"0.7115866",
"0.70907426",
"0.7085814",
"0.7036521",
"0.7025415",
"0.7011262",
"0.6928852",
"0.6892968"
] | 0.7762996 | 1 |
Sets up the global logger as configured in the `config` object. config The userdefined logging configuration | def setup_logging_with_config(config: DynaBox):
global logger
logger = setup_logging_threatbus(config, logger_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_logger(config):\n filename = config[\"LOGGER_FILE\"]\n log_dir = '/'.join(filename.split('/')[0:-1]) + \"/\"\n\n check_and_create_directory(log_dir)\n\n level = config[\"LOGGER_LOGLEVEL\"].upper()\n filemode = 'a'\n _format = '%(asctime)s %(name)8s %(module)15s %(funcName)12s %(' \\\n 'levelname)7s: %(message)s'\n _dateformat = '(%d.%m.%Y, %H:%M:%S)'\n\n logging.basicConfig(filename=filename, filemode=filemode, level=level,\n format=_format, datefmt=_dateformat)\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"werkzeug\").setLevel(logging.WARNING)\n\n # Display log simultaneously on console\n if config[\"CONSOLE_LOGGING\"]:\n add_terminal_logging(_format, level)",
"def _setup_logging(config):\n if config.debug:\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(message)s\", level=logging.DEBUG\n )\n else:\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(message)s\", level=logging.INFO\n )",
"def set_config(config):\n global _config\n logging.config.dictConfig(config)\n _configure_ulog_bridge()\n _config = config",
"def setup_logging():\n if not app.debug:\n if app.config.get('LOG_CFG'):\n # initialize the Flask logger (removes all handlers)\n _ = app.logger\n dictConfig(app.config.get('LOG_CFG'))\n else:\n # capability with previous config settings\n # Should have LOG_FILE and LOG_LEVEL set\n if app.config.get('LOG_FILE') is not None:\n handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100)\n else:\n handler = StreamHandler(stream=sys.stderr)\n\n handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(app.config.get('LOG_LEVEL', DEBUG))\n app.logger.addHandler(handler)",
"def setup():\n config['global']['log.access_file'] = ''\n config['global']['log.error_file'] = ''\n config['global']['log.screen'] = False\n log_level = getattr(logging, config.log_level)\n logging.root.setLevel(logging.NOTSET)\n file_log.setLevel(log_level)\n logging.root.addHandler(file_log)\n if config.log_screen:\n console_log.setLevel(log_level)\n logging.root.addHandler(console_log)",
"def setup_logging():\r\n import ConfigParser # change this to configparser for Python 3\r\n # import logging\r\n import logging.config\r\n global logger\r\n\r\n try:\r\n \tlogging.config.fileConfig(\"celog.conf\")\r\n except ConfigParser.NoSectionError: \r\n\t# if there is no configuration file setup a default configuration\r\n logging.basicConfig(filename='code_extract.log',level= _logging_level,\r\n\t\t\tformat='%(asctime)s %(levelname)s - %(message)s',\r\n\t\t\tdatefmt='%Y %b %d, %a %H:%M:%S'\r\n\t\t\t)\r\n \r\n logger = logging.getLogger('%s' % __name__)\r\n\r\n logger.debug('logger ready')",
"def _configure_logging(config):\n # Initialize exception logging to Sentry with client DSN URL from SENTRY_DSN envvar;\n # does nothing if SENTRY_DSN does not exist, is empty, or is not recognized by Sentry\n sentry_sdk.init()\n if \"publisher\" in config[\"logging\"]:\n # Publish log messages to distributed logging aggregator\n logging_config = config[\"logging\"][\"publisher\"]\n logging_config[\"handlers\"][\"zmq_pub\"][\"context\"] = context\n host = config[\"zmq\"][\"host\"]\n port = config[\"zmq\"][\"ports\"][\"logging\"][NAME]\n addr = f\"tcp://*:{port}\"\n logging_config[\"handlers\"][\"zmq_pub\"][\"interface_or_socket\"] = addr\n logging.config.dictConfig(logging_config)\n for handler in logger.root.handlers:\n if isinstance(handler, zmq.log.handlers.PUBHandler):\n handler.root_topic = NAME\n handler.formatters = {\n logging.DEBUG: logging.Formatter(\"%(message)s\\n\"),\n logging.INFO: logging.Formatter(\"%(message)s\\n\"),\n logging.WARNING: logging.Formatter(\"%(message)s\\n\"),\n logging.ERROR: logging.Formatter(\"%(message)s\\n\"),\n logging.CRITICAL: logging.Formatter(\"%(message)s\\n\"),\n }\n # Not sure why, but we need a brief pause before we start logging\n # messages\n time.sleep(0.25)\n msg = f\"publishing logging messages to {addr}\"\n else:\n # Write log messages to local file system\n #\n # Replace logging RotatingFileHandlers with WatchedFileHandlers so\n # that we notice when log files are rotated and switch to writing to\n # the new ones\n logging_config = config[\"logging\"]\n logging_handlers = logging_config[\"handlers\"]\n rotating_handler = \"logging.handlers.RotatingFileHandler\"\n watched_handler = \"logging.handlers.WatchedFileHandler\"\n for handler in logging_handlers:\n if logging_handlers[handler][\"class\"] == rotating_handler:\n logging_handlers[handler][\"class\"] = watched_handler\n del logging_handlers[handler][\"backupCount\"]\n logging.config.dictConfig(logging_config)\n msg = \"writing logging messages to local file system\"\n return msg",
"def setup_logging(config: Any) -> Logger:\n green = \"\\033[32m\"\n reset = \"\\033[0m\"\n logger = setup_logger(\n name=f\"{green}[ignite]{reset}\",\n level=logging.DEBUG if config.debug else logging.INFO,\n format=\"%(name)s: %(message)s\",\n filepath=config.output_dir / \"training-info.log\",\n )\n return logger",
"def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)",
"def set_loggers(config):\n\n if not config:\n return\n\n logging.config.dictConfig(config)",
"def setup_logging( cfg ):\n global _LOGGING_FORMAT_, _DATE_FORMAT_\n format,date = _LOGGING_FORMAT_,_DATE_FORMAT_\n \n if not cfg.get('logging', True):\n logging.basicConfig(handler=logging.NullHandler)\n return\n \n #check passed in cfgs if formats changed\n if cfg.get('log_format', False):\n format = cfg.get('log_format')\n if cfg.get('log_date_format',False):\n date = cfg.get('log_date_format')\n \n if cfg.get('log_debug', False):\n logging.basicConfig(level=logging.DEBUG,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path', 'errors.log'))\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n logging.getLogger().addHandler(console)\n \n elif cfg.get('log_warnings', False):\n logging.basicConfig(level=logging.WARNING,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))\n \n else:# Errors are always logged. deal.\n logging.basicConfig(level=logging.ERROR,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))",
"def config_logging():\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('deepcomp').setLevel(logging.WARNING)\n logging.getLogger('deepcomp.main').setLevel(logging.INFO)\n logging.getLogger('deepcomp.util.simulation').setLevel(logging.INFO)\n # logging.getLogger('deepcomp.env.entities.user').setLevel(logging.DEBUG)\n # logging.getLogger('deepcomp.env.multi_ue.multi_agent').setLevel(logging.DEBUG)\n logging.getLogger('matplotlib').setLevel(logging.WARNING)\n logging.getLogger('tensorflow').setLevel(logging.ERROR)\n gym.logger.set_level(gym.logger.ERROR)\n # structlog.configure(logger_factory=LoggerFactory())\n structlog.configure(logger_factory=LoggerFactory(),\n processors=[\n structlog.stdlib.filter_by_level,\n FloatRounder(digits=LOG_ROUND_DIGITS, not_fields=['sinr', 'signal', 'interference']),\n structlog.dev.ConsoleRenderer()\n ])",
"def configure_logging(config):\n logging.basicConfig(level=logging.getLevelName(config.logging.level),\n format=config.logging.format)\n\n if config.subtask_debug:\n logging.getLogger('mercury.rpc.ping').setLevel(logging.DEBUG)\n logging.getLogger('mercury.rpc.ping2').setLevel(logging.DEBUG)\n logging.getLogger('mercury.rpc.jobs.monitor').setLevel(logging.DEBUG)\n\n if config.asyncio_debug:\n logging.getLogger('mercury.rpc.active_asyncio').setLevel(logging.DEBUG)",
"def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)",
"def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)",
"def _configure_logging(self):\n pass",
"def initialize_logging():\n\n print 'Setting up logging...'\n\n log_level = app.config['LOGGING_LEVEL']\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')",
"def log_settings(config):\n LOGGER.propagate = False\n formatter = ViseronLogFormat(config.logging)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.addFilter(DuplicateFilter())\n LOGGER.addHandler(handler)\n\n LOGGER.setLevel(LOG_LEVELS[config.logging.level])\n logging.getLogger(\"apscheduler.scheduler\").setLevel(logging.ERROR)\n logging.getLogger(\"apscheduler.executors\").setLevel(logging.ERROR)",
"def configure_logging(config):\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"path\"]:\n logfile_path = os.path.expanduser(config[\"path\"])\n else:\n logfile_path = config[\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n log_level = get_logging_level(config.get(\"level\", \"info\"))\n rootlogger.setLevel(log_level)\n formatter_str = set_formatter_string(config)\n formatter = logging.Formatter(formatter_str)\n handler = None\n\n if config.get(\"rich\") is not False:\n handler = RichHandler(\n rich_tracebacks=True,\n show_time=config.get(\"timestamp\", True),\n show_path=config.get(\"extended\", True),\n )\n\n if logfile_path:\n file_handler = RotatingFileHandler(\n logfile_path, maxBytes=config.get(\"file-size\", 50e6)\n )\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n # If we are running in a non-interactive shell (without a tty)\n # then use simple logging instead of rich logging\n # Config value always overrides\n running_in_non_interactive_shell = False\n console = config.get(\"test_logging_console\", sys.stderr)\n if config.get(\"console\") is True:\n handler = logging.StreamHandler(stream=console)\n handler.setFormatter(formatter)\n else:\n if config.get(\"console\") is None and not console.isatty():\n running_in_non_interactive_shell = True\n handler = logging.StreamHandler(stream=console)\n handler.setFormatter(formatter)\n\n # If we still don't have the handler, we are assuming that\n # the user wants to switch off logging, let's log only\n # Critical errors\n if not handler:\n handler = logging.StreamHandler(stream=console)\n handler.setFormatter(formatter)\n log_level = get_logging_level(\"critical\")\n\n if config.get(\"filter\") and handler:\n handler.addFilter(ParsingFilter(config, config[\"filter\"]))\n if handler:\n handler.setLevel(log_level)\n rootlogger.addHandler(handler)\n\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"Started opsdroid %s.\"), __version__)\n if running_in_non_interactive_shell:\n _LOGGER.warning(\n \"Running in non-interactive shell - falling back to simple logging. You can override this using 'logging.config: false'\"\n )",
"def __init__(self, config):\n self._config = config\n self.logging = logging.getLogger(\"Settings\")\n self.logging.propagate = False\n level = logging.INFO\n if \"DEBUG\" in os.environ and (\n os.environ[\"DEBUG\"]\n or os.environ[\"DEBUG\"].lower() in (\"true\", \"t\", \"yes\", \"y\")\n ):\n level = logging.DEBUG\n self.logging.setLevel(level)\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(logging.Formatter(\"%(asctime)s [Settings] %(message)s\"))\n self.logging.addHandler(handler)\n self.logging.debug(\"Running in debug mode.\")",
"def config_logger( self, ):\r\n logger = logging.getLogger( self.logger_id )\r\n\r\n logger.handlers = []\r\n logger.setLevel( self.parameters.logging_level ) # DEBUG , INFO WARNING ERROR CRITICAL\r\n\r\n # create the logging file handler.....\r\n fh = logging.FileHandler( self.parameters.pylogging_fn )\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fh.setFormatter( formatter )\r\n logger.addHandler( fh )\r\n\r\n msg = \"Done config_logger\"\r\n print( msg )\r\n logger.info( msg ) # .debug .info .warn .error\r\n AppGlobal.set_logger( logger )\r\n\r\n return logger",
"def init_logger(config_path, verbosity):\n logging.config.fileConfig(config_path)\n logger = logging.getLogger()\n if verbosity:\n logger.setLevel(logging.DEBUG)",
"def setup_logging(log_dir: Optional[str] = None) -> None:\n config: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\"console\": {\"format\": \"%(asctime)s:\\t%(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n LOG_NAME: {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": False}\n },\n }\n if log_dir is not None:\n config[\"loggers\"][LOG_NAME][\"handlers\"].append(\"file\")\n config[\"formatters\"][\"file\"] = {\n \"format\": \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n }\n config[\"handlers\"][\"file\"] = {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"file\",\n \"filename\": os.path.join(log_dir, LOG_NAME + \".log\"),\n \"maxBytes\": 1000000,\n \"backupCount\": 3,\n }\n logging.config.dictConfig(config)",
"def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')",
"def init():\n global logger\n\n with open(\"/app/log.json\", \"r\") as fd:\n logging.config.dictConfig(json.load(fd))\n\n logger = logging.getLogger()",
"def init_logging():\n global logger\n logger = logging.getLogger('autogen_quartus')",
"def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)",
"def _configure_logger():\n try:\n log_dir = os.environ['AUTOMINE_LOG_DIR']\n log_name = _log_name()\n cfg_path = os.path.join(log_dir, 'logging_config.json')\n with open(cfg_path) as src:\n cfg = json.load(src)\n handlers = cfg.get('handlers')\n for handler in iter(handlers.values()):\n filename = handler.get('filename')\n if filename:\n filename = filename.replace('{{AUTOMINE_LOG_DIR}}',\n log_dir)\n filename = filename.replace('{{__name__}}', log_name)\n handler['filename'] = filename\n loggers = cfg.get('loggers')\n if '__name__' in loggers:\n loggers[log_name] = loggers.pop('__name__')\n\n # add logging to the console if env var is set\n log_to_console = 'AUTOMINE_LOG_TO_CONSOLE' in os.environ\n if log_to_console and 'console' in handlers:\n logger_handlers = loggers[log_name].get('handlers')\n if logger_handlers:\n logger_handlers.append('console')\n\n dictConfig(cfg)\n except Exception as err: # pylint: disable=broad-except\n logging.basicConfig()\n raise err",
"def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()",
"def _setup_logging(self, config, channel):\r\n\r\n logfile = getattr(config, '%s_logfile' % channel)\r\n if not logfile:\r\n return\r\n\r\n maxbytes = getattr(config, '%s_logfile_maxbytes' % channel)\r\n backups = getattr(config, '%s_logfile_backups' % channel)\r\n fmt = '%(message)s'\r\n if logfile == 'syslog':\r\n warnings.warn(\"Specifying 'syslog' for filename is deprecated. \"\r\n \"Use %s_syslog instead.\" % channel, DeprecationWarning)\r\n fmt = ' '.join((config.name, fmt))\r\n self.mainlog = loggers.handle_file(\r\n config.options.getLogger(),\r\n filename=logfile,\r\n fmt=fmt,\r\n rotating=not not maxbytes, # optimization\r\n maxbytes=maxbytes,\r\n backups=backups)\r\n\r\n if getattr(config, '%s_syslog' % channel, False):\r\n fmt = config.name + ' %(message)s'\r\n loggers.handle_syslog(self.mainlog, fmt)"
] | [
"0.81296",
"0.7842135",
"0.7503782",
"0.7463685",
"0.7462076",
"0.74387175",
"0.7358102",
"0.7341398",
"0.7239315",
"0.72150725",
"0.70655876",
"0.70594084",
"0.70494735",
"0.7005609",
"0.6990049",
"0.69833773",
"0.698039",
"0.6976895",
"0.6956055",
"0.6928867",
"0.69224036",
"0.6916429",
"0.68983173",
"0.6866957",
"0.6859466",
"0.6827646",
"0.68244046",
"0.68219256",
"0.6814425",
"0.68020785"
] | 0.84035206 | 0 |
Cancels all async tasks. | async def cancel_async_tasks():
global async_tasks
for task in async_tasks:
if task is not asyncio.current_task():
task.cancel()
del task
async_tasks = []
return await asyncio.gather(*async_tasks) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cancel_async_tasks():\n tornado.ioloop.IOLoop.instance().stop()",
"async def cancel_tasks(self) -> None:\n if self._qos_task:\n self._qos_task.cancel()\n try:\n await self._qos_task\n except asyncio.CancelledError:\n pass\n self._qos_task = None\n if self._expire_task:\n self._expire_task.cancel()\n try:\n await self._expire_task\n except asyncio.CancelledError:\n pass\n self._expire_task = None",
"async def _exit_tasks() -> None:\n current_task = asyncio.Task.current_task()\n all_tasks = asyncio.Task.all_tasks()\n not_current_tasks = [task for task in all_tasks if task is not current_task]\n\n for task in not_current_tasks:\n task.cancel()",
"def cancel_tasks(self):\n # tasks will be removed by the done callback.\n for t in self._tasks.values():\n t.cancel()",
"def clear_tasks(self):\n LOGGER.info(\"Removing tasks from event loop `%s` (on thread `%s`)\",\n self.ident, self.thread.ident)\n for task in asyncio.Task.all_tasks(loop=self.loop):\n task.cancel()",
"async def cancel_spider_tasks(self):\r\n tasks = [t for t in asyncio.all_tasks(\r\n ) if t is not asyncio.current_task() and 'coro=<Spider.' in str(t)]\r\n [t.cancel() for t in tasks]\r\n logger.info(f\"Cancelling {len(tasks)} outstanding tasks.\")\r\n return await asyncio.gather(*tasks, return_exceptions=True)",
"async def async_cleanup(self) -> None:\n self._async_cancel_timer()\n if not self._task:\n return\n self._task.cancel()\n try:\n await self._task\n except asyncio.CancelledError:\n pass\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Error cleaning up task\")",
"def shutdown(self, loop):\n\n tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]\n\n for task in tasks:\n task.cancel()\n\n asyncio.gather(*tasks)\n loop.stop()",
"def cancel_all():\n\twhile _running:\n\t\t_running[0].cancel(noerror=True)",
"def cancel_tasks(self) -> None:\n for group in self._queue.values():\n for expected_response in group.values():\n expected_response.set(None)\n self._queue = defaultdict(OrderedDict)",
"async def cancel(self):\n\n await self.cb_0.cancel()\n await self.cb_1.cancel()",
"async def cancel_tasks_by_name(name: str):\n\n tasks = [tasks for tasks in asyncio.all_tasks() if tasks.get_name() == name]\n for task in tasks:\n task.cancel()\n with suppress(asyncio.CancelledError):\n await task",
"async def _exit_jobs() -> None:\n logger.info(f\"Exiting {len(tasks)} active jobs.\")\n\n for task in tasks:\n task.cancel()\n\n await asyncio.gather(*tasks, return_exceptions=True)",
"def do_cancel_all_jobs(self, args):\n self._leet.cancel_all_jobs()",
"def cancel_tasks(tasks: Iterable[asyncio.Future]) -> asyncio.Future:\n\n future = asyncio.get_event_loop().create_future()\n future.set_result(None)\n\n if not tasks:\n return future\n\n cancelled_tasks = []\n exc = asyncio.CancelledError()\n\n for task in tasks:\n if task.done():\n continue\n\n if isinstance(task, asyncio.Task):\n task.cancel()\n cancelled_tasks.append(task)\n\n elif isinstance(task, asyncio.Future):\n task.set_exception(exc)\n\n else:\n log.warning(\n \"Skipping object %r because it's not a Task or Future\", task,\n )\n\n if not cancelled_tasks:\n return future\n\n waiter = asyncio.ensure_future(\n asyncio.gather(\n *cancelled_tasks, return_exceptions=True\n ),\n )\n\n return waiter",
"def cancel_all(self):\n for timer in self._timers:\n timer.Stop()",
"async def clear_all(self) -> None:",
"async def async_cancel(self):\n raise NotImplementedError",
"def cleanup_loop(self, timeout=None):\n\n if timeout is None:\n timeout_string = \"no\"\n timeout = -1\n else:\n timeout_string = \"{0} s\".format(timeout)\n\n logger.info(\"Cleaning up all futures with {0} timeout\"\n .format(timeout_string))\n\n t0 = time.Time.now().unix\n badstatuslist = ['cancelled', 'error', 'lost']\n while len(self.futures):\n elapsedtime = time.Time.now().unix - t0\n if (elapsedtime > timeout) and (timeout >= 0):\n badstatuslist += ['pending']\n self.cleanup(badstatuslist=badstatuslist)\n sleep(10)",
"def hard_cancel(self, exec_info: ExecutionInfo) -> None:\n for task in exec_info.tasks.values():\n if not task.done():\n task.cancel()",
"def abort_everything(self, ensure_ready=True):\n with self.waiting_futures.lock:\n self.waiting_futures.futures.clear()\n while not self.waiting_futures.queue.empty():\n self.waiting_futures.queue.get()",
"def cancel(self) -> asyncio.Future:\n pass # pragma: no cover",
"async def cancel():\n await asyncio.get_running_loop().run_in_executor(None, cancel_inner)",
"def stop(self):\n for task in self._tasks:\n task.stop()",
"def clear_tasks(self):\n self.last_task = None\n self.tasks = []",
"async def stop(self) -> None:\n self._acme_task.cancel()\n self._acme_task = None",
"def cancel(self):\n import googleapiclient\n\n # projects.locations.operations/cancel\n operations = self._api.projects().locations().operations()\n\n for job in self.active_jobs:\n request = operations.cancel(name=job.jobname)\n logger.debug(\"Cancelling operation {}\".format(job.jobid))\n try:\n self._retry_request(request)\n except (Exception, BaseException, googleapiclient.errors.HttpError):\n continue\n\n self.shutdown()",
"def cancel(self):\n self._task.cancel()",
"async def cleanup(self):\n if self.preparing_task:\n self.preparing_task.cancel()",
"def cancel_workers(self):\n pass"
] | [
"0.7555089",
"0.7543023",
"0.7509475",
"0.7426591",
"0.73742604",
"0.7083023",
"0.7019093",
"0.6982878",
"0.68172",
"0.6789454",
"0.6785657",
"0.67286825",
"0.67189366",
"0.6642451",
"0.6610406",
"0.6495576",
"0.64625335",
"0.637788",
"0.6308285",
"0.63072467",
"0.6290157",
"0.6251773",
"0.62453663",
"0.62418395",
"0.622269",
"0.61990094",
"0.61482084",
"0.6127265",
"0.611339",
"0.61093277"
] | 0.80176604 | 0 |
Periodically writes metrics to a file. every The interval to write metrics, in seconds to the filepath to write to | async def write_metrics(every: int, to: str):
while True:
line = f"pyvast-threatbus,host={socket.gethostname()} "
start_length = len(line)
for m in metrics:
if not m.is_set:
continue
if type(m) is Gauge or type(m) is InfiniteGauge:
if len(line) > start_length:
line += ","
line += f"{m.name}={m.value}"
if type(m) is Summary:
if len(line) > start_length:
line += ","
line += (
f"{m.name}_min={m.min},{m.name}_max={m.max},{m.name}_avg={m.avg}"
)
m.reset()
if len(line) > start_length:
# only update the file if there were metrics collected.
line += f" {time.time_ns()}" # append current nanoseconds ts
with open(to, "a") as f:
f.write(line + "\n")
await asyncio.sleep(every) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writefile():\n\n print(\"Writing to file...\")\n\n # Open the heartbeat file in append mode and save the current time.\n with open(settings.ROOT_DIR + \"/heartbeat\", \"a\") as f:\n f.write(str(time()))",
"def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())",
"def collect_to_file(sensor):\n temperature_settings = settings.SENSORS.get(\"TEMPERATURE\")\n\n frequency = float(temperature_settings[1][1])\n period = float( temperature_settings[2][1])\n last_collection_time = temperature_settings[4][1]\n\n while 1: \n s = []\n count = 0 \n logger.info(\"collecting\")\n \n while(count <= period):\n s.append(os.path.join(time.strftime(\"%Y_%j_%H_%M_%S_\"),str(sensor.readTemperature())))\n time.sleep(1)\n count = count + 1\n print count\n \n write_to_file(s)\n logger.info(\"done counting\")\n last_collection_time = datetime.datetime.utcnow()\n logger.info( last_collection_time)\n time.sleep(frequency)\n\n return True",
"def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')",
"def write_metrics(metrics, db_path):\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DELETE FROM metrics')\n for metric in metrics:\n c.execute(\n 'INSERT INTO metrics '\n '(timestamp, callerid, uniqueid, channel, channel_extension, name) '\n 'VALUES (datetime(?),?,?,?,?,?)',\n (metric['timestamp'],\n metric['callerid'],\n metric['uniqueid'],\n metric['channel'],\n metric['channel_extension'],\n metric['name']))\n conn.commit()\n conn.close()",
"def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)",
"def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))",
"def saveStateRegularly(self, fname, frequency=600):\n loop = LoopingCall(self.saveState, fname)\n loop.start(frequency)\n return loop",
"def write_metrics(log_dir, params):\n metrics_file = os.path.join(log_dir, 'metrics.json')\n with open(metrics_file, 'w') as f:\n f.write(json.dumps(params))",
"def writeMeasurementsToFile(self, location, paths):\n logger.info(\"Measuring the values of paths on the%ssimulator...\" %\n (\" \" if self.name == \"\" else (\" %s \" % self.name)))\n\n try:\n measurementsFileHandler = open(location, \"w\")\n except EnvironmentError as e:\n errMsg = (\"Error writing the path values to the file located \"\n \"at %s: %s\" % (location, e))\n raise GameTimeError(errMsg)\n else:\n with measurementsFileHandler:\n measurements = self.measurePaths(paths)\n for pathNum, value in enumerate(measurements):\n measurementsFileHandler.write(\"%d\\t%d\\n\" %\n ((pathNum + 1), value))\n\n logger.info(\"Measurement of all path values complete.\")",
"def __write_measurement(self, measurement):\n with self.__filename.open(mode='a') as history_file:\n history_file.write(measurement + '\\n')",
"def _writer(path: str, sink: Queue):\n writer = Write2File(path)\n logger = settings.LOGGER\n count = 0\n while True:\n article = sink.get()\n if article == 'EXIT':\n logger.info(f'All {count} articles saved to {path}.')\n return\n writer(article)\n count += 1\n if count % 10000 == 0:\n logger.info(f'{count} articles processed.')",
"def time_write_to_file(file, write_mode):\n outfile = open(file, write_mode)\n new_data = time.localtime(time.time())\n for data in new_data:\n outfile.write(str(data) + ' ')\n outfile.write('\\n')\n outfile.close()",
"def update_freq_dist(filename):\r\n pass",
"def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)",
"def log_tofile(self, inst):\n self._tick += 1\n if self._tick >= self._second:\n self.logger.log(inst)\n self._tick = 0",
"def write(self, fname):\n pass",
"def monitor(self, logdir, model, interval=10):\n self._watch_file = True\n self.experiment.log_other(\"model_uid\", model.uid)\n self.thread = threading.Thread(\n target=self._monitor_log_file,\n args=(\n os.path.join(logdir, f\"{model.uid}.log\"),\n interval,\n ),\n )\n self.thread.start()",
"def writeProfile(fname,prof):\n t = np.linspace(0,1,prof.shape[0],endpoint=False)\n fh = open(fname,'w')\n for x in range(prof.shape[0]):\n fh.write('%.7e %.7e\\n' % (t[x],prof[x]))\n fh.close()",
"def write_file(self, filename, fileformat=\"json\"):\n if self.df_avg is None:\n self.collect_stats()\n if fileformat == \"json\":\n self.write_json(filename)\n elif fileformat == \"excel\":\n self.write_excel(filename)",
"def _update_counters(self, filepath, step):\n\n counters = {}\n\n # Load\n if os.path.exists(self.counters_file):\n with open(self.counters_file) as f:\n counters = json.load(f)\n\n counters[filepath] = dict(step=step)\n\n # Save\n with open(self.counters_file, \"w\") as f:\n json.dump(counters, f, indent=4)",
"def write_to_file(self, filename: str) -> None:",
"def _write_output(\n all_metrics,\n output_path,\n):\n records = []\n for file_pair, metrics in all_metrics.items():\n records.append(\n collections.OrderedDict(\n **file_pair.as_dict(),\n **metrics.as_dict(),\n )\n )\n\n if output_path is None:\n s = _records_to_string(records)\n logging.info(s)\n print(s)\n return\n\n write_metrics_fn = {\n '.txt': _write_txt,\n '.json': _write_json,\n '.tsv': _write_tsv,\n }.get(output_path.suffix)\n if write_metrics_fn is None:\n raise ValueError(\n f'Output path `{output_path}` has unsupported suffix '\n f'`{output_path.suffix}`. Supported values are \".txt\", \".json\", and '\n '\".tsv\".'\n )\n write_metrics_fn(output_path, records)",
"def write_timed(\n self, data: AnyWritableBuf, freq: int | Timer, /, *, mode: int = NORMAL\n ) -> None:",
"def timerAction():\n timer = threading.Timer(30.0, timerAction)\n timer.daemon = True\n timer.start()\n save()",
"def notification_to_file(filename, notification):\n outfile = open(filename, 'a')\n timestamp = time.localtime(time.time())\n for data in timestamp:\n outfile.write(str(data) + ' ') \n outfile.write(notification + '\\n')\n outfile.close()",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def save_ttest_metrics(self, ttest_metrics, fname, no_genes=20):\n\n top_genes = self.fetch_gene_descriptions(ttest_metrics, nih_fetch_num=no_genes, printme=False)\n eids = [int(i[0]) for i in top_genes]\n myfig = self.effect_size_distr(ttest_metrics, genes_of_interest=eids[0:no_genes], return_fig=True)\n plt.savefig(fname+'.png')\n\n with open(fname+'.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile)\n for i in top_genes:\n writer.writerow([i[0], i[3], i[1], i[2], i[4]])",
"def log(self, interval=600, flag='w', filename=None):\n\n flag=flag.strip().lower()\n if flag not in ['a', 'w']:\n raise ValueError(\"Unsupported file opening flag!\")\n\n if filename is None:\n filename = \"{:s}.log\".format(datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n\n eprint(\"Logging into file {:s}{:s}\".format(filename, \n \"...\" if flag == 'w' else ' with appending mode...'))\n\n with open(filename, flag) as f:\n if f.tell() == 0:\n f.write(\"Time, Unix Time, Elapsed time (min), Input H2O Temp (C), Output H2O Temp (C), Helium Temp (C), Oil Temp (C), High Side Pressure (PSIA), Low Side Pressure (PSIA)\\n\")\n f.flush()\n\n # signal handling.\n # stores the original signals\n original_sigint = signal.getsignal(signal.SIGINT)\n original_sighup = signal.getsignal(signal.SIGHUP)\n original_sigterm = signal.getsignal(signal.SIGTERM)\n\n # set the new signal handlers\n signal.signal(signal.SIGINT, lambda s, f: self.set_signal())\n signal.signal(signal.SIGHUP, lambda s, f: self.set_signal())\n signal.signal(signal.SIGTERM, lambda s, f: self.set_signal())\n\n while not self.log_sig:\n try:\n now = time.strftime(\"\\\"%d %b %Y %H:%M:%S\\\"\", time.localtime())\n unix_now = int(time.time())\n\n # elapsed time: minutes\n elapsed = self.readvar('\\x45\\x4c', 0)\n\n # input water temperature: converted from 0.1 degree C to degree C\n inH2O_t = self.readvar('\\x0d\\x8f', 0)/10.0\n\n # output water temperature: degree C\n outH2O_t = self.readvar('\\x0d\\x8f', 1)/10.0\n\n # helium temperature: degree C\n he_t = self.readvar('\\x0d\\x8f', 2)/10.0\n\n # Oil temperature: degree C\n oil_t = self.readvar('\\x0d\\x8f', 3)/10.0\n\n # High side pressure: PSIA\n hi_p = self.readvar('\\xaa\\x50', 0)/10.0\n\n # Low side pressure: PSIA\n lo_p = self.readvar('\\xaa\\x50', 1)/10.0\n\n # Diode volt: micro volts\n # diode_v = self.readvar('\\x8e\\xea',0)\n #\n # # diode temperature: K\n # # this is a more problematic one since two numbers are used\n # diode_t_l, diode_t_h = self.readvar('\\x58\\x13', 0), self.readvar('\\x58\\x13', 1)\n # diode_t = (diode_t_h << 8 + diode_t_l)/100\n\n logstr = \"{:s}, {:d}, {:d}, {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}\\n\".\\\n format(now, unix_now, elapsed, inH2O_t, outH2O_t, he_t, oil_t, hi_p, lo_p)\n eprint(logstr, end='')\n f.write(logstr)\n f.flush()\n\n time.sleep(interval)\n\n except IOError:\n eprint(\"WARNING: status reading failed for a parameter. Retrying...\")\n time.sleep(1)\n continue\n ### END WHILE \n \n # restore the original handlers\n signal.signal(signal.SIGINT, original_sigint)\n signal.signal(signal.SIGHUP, original_sighup)\n signal.signal(signal.SIGTERM, original_sigterm)\n self.log_sig = False\n f.flush()"
] | [
"0.6465442",
"0.63257843",
"0.61773896",
"0.6038639",
"0.5976731",
"0.593824",
"0.592454",
"0.59220535",
"0.58738124",
"0.5758625",
"0.56762564",
"0.5554106",
"0.55432475",
"0.5534424",
"0.5518398",
"0.5488493",
"0.54417235",
"0.54338413",
"0.5410348",
"0.5400014",
"0.535765",
"0.5333409",
"0.5299535",
"0.529697",
"0.5274314",
"0.5255913",
"0.52521235",
"0.52521235",
"0.5249236",
"0.5244523"
] | 0.75694364 | 0 |
Starts a zmq subscriber on the given endpoint and listens for new messages that are published on the given topic (zmq prefix matching). Depending on the topic suffix, Indicators are enqueued to the indicator_queue. | async def receive(pub_endpoint: str, topic: str, indicator_queue: asyncio.Queue):
global logger
socket = zmq.Context().socket(zmq.SUB)
socket.connect(f"tcp://{pub_endpoint}")
socket.setsockopt(zmq.SUBSCRIBE, topic.encode())
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
logger.info(f"Receiving via ZMQ on topic {pub_endpoint}/{topic}")
while True:
socks = dict(
poller.poll(timeout=100)
) # note that smaller timeouts may increase CPU load
if socket in socks and socks[socket] == zmq.POLLIN:
try:
topic, msg = socket.recv().decode().split(" ", 1)
except Exception as e:
logger.error(f"Error decoding message: {e}")
continue
# the topic is suffixed with the message type
if not topic.endswith("indicator"):
# pyvast-threatbus is not (yet) interested in Sightings or SnapshotRequests
logger.debug(f"Skipping unsupported message: {msg}")
continue
await indicator_queue.put(msg)
else:
await asyncio.sleep(0.05) # free event loop for other tasks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subscribe_to_ticks_publisher(topic):\n ConfigFile = \"../config/kuber.conf\"\n config = configparser.ConfigParser()\n config.read(ConfigFile)\n\n zmq_conf = config['ZMQ CONFIGURATION']\n publish_port = zmq_conf['publish_port']\n\n print(\"Subscribing to topic %s at %s\" % (topic, publish_port))\n sub = TopicSubscriber()\n\n try: \n sub.init(topic, publish_port)\n except Exception as e:\n print(\"\"\"\n Subscriber init failed: {}\n \"\"\".format(e))\n sys.exit(0)\n\n # Return the subscriber context.\n return sub",
"def subscribe(self, topic):\n self.topic = topic\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.loop_start()",
"def _subscribe_to_peers(self):\n if not self.config['PEERS']:\n return\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.setsockopt(zmq.SUBSCRIBE, '')\n\n for ip, pub_port, api_port in self.config['PEERS']:\n if not self._is_self(ip, pub_port):\n address = '%s:%s' % (ip, pub_port)\n self.logger.debug('Subscribing to peer at: %s' % address)\n socket.connect('tcp://%s' % address)\n\n def new_msg_handler(sender, msg=None):\n topic, delimiter, packed = msg.partition(' ')\n topic = int(topic)\n message_dict = msgpack.unpackb(packed)\n #self.logger.debug('News for topic %s:%s arrived' %\n # (topic, constants.topics.get(topic)))\n self._handle_topic(topic, message_dict)\n\n sig = signal(constants.NEW_MESSAGE_TOPIC)\n sig.connect(new_msg_handler, weak=False)\n\n while True:\n msg = socket.recv()\n sig.send(self, msg=msg)\n gevent.sleep(.1)",
"def subscribe(endpoint: str, topic: str, snapshot: int, timeout: int = 5):\n global logger\n logger.info(f\"Subscribing to topic '{topic}'...\")\n action = {\"action\": \"subscribe\", \"topic\": topic, \"snapshot\": snapshot}\n return send_manage_message(endpoint, action, timeout)",
"def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)",
"def start_listener(self, input_topic, output_topic, publish_topic):\n try:\n # Create publisher to publish the speed measure\n self._publisher = self.node.create_publisher(\n Int64,\n publish_topic,\n qos_profile=QoSProfile(depth=1)\n )\n\n # Get type of the input topic and subscribe to it\n input_topic_type = get_msg_class(self.node, input_topic,\n blocking=True)\n self._sub_input_topic = self.node.create_subscription(\n input_topic_type,\n input_topic,\n self.input_topic_callback,\n qos_profile=QoSProfile(depth=1)\n )\n\n # Get type of the output topic and subscribe to it\n output_topic_type = get_msg_class(self.node, output_topic,\n blocking=True)\n self._sub_output_topic = self.node.create_subscription(\n output_topic_type,\n output_topic,\n self.output_topic_callback,\n qos_profile=QoSProfile(depth=1)\n )\n except Exception as e:\n error(self.node, \"%s\" % str(e))\n return False\n\n return True",
"def subscribe( self, topic ):\n logging.info( \"Subscribing to topic %s\" %topic )\n try:\n self.client.subscribe( topic )\n except Exception as error:\n print( error )",
"def subscribe_mqtt(self, topic):\n if topic not in self.subscriptions:\n self.subscriptions.append(topic)\n self.mqtt.subscribe(topic)",
"def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)",
"def _subscribe(topic:str) -> None:\n\t\t\tif topic in self.subscribedTopics:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.WARNING, f'MQTT: topic already subscribed: {topic}')\n\t\t\t\treturn\n\t\t\tif (r := self.mqttClient.subscribe(topic))[0] == 0:\n\t\t\t\tt = MQTTTopic(topic = topic, mid=r[1], callback=callback, callbackArgs=kwargs)\n\t\t\t\tself.subscribedTopics[topic] = t\n\t\t\telse:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot subscribe: {r[0]}')",
"def subscribe(self, topic: str, call_back):\n self.callback_dict[topic] = call_back\n self.client.subscribe(topic)",
"def setup_subscriber(publisher_address):\n print(\"Subscribing to server on {}\".format(publisher_address))\n context = zmq.Context()\n socket = context.socket(zmq.SUB)\n socket.connect(publisher_address)\n filter = \"\"\n # the following two lines are for Python2 compatability\n if isinstance(filter, bytes):\n filter = filter.decode(\"ascii\")\n socket.setsockopt_string(zmq.SUBSCRIBE, filter)\n return socket",
"def listen_to_streaming_inference(\n dir,\n output_topic_name,\n type='SQS',\n aws_role=None,\n external_id=None\n):\n config = _read_config(dir)\n streaming_inference_client = StreamingInferenceClient(\n aws_profile=config.aws_profile,\n aws_region=config.aws_region,\n aws_role=aws_role,\n external_id=external_id\n )\n\n return streaming_inference_client.listen_to_streaming_inference(\n output_topic_name=output_topic_name,\n type=type\n )",
"def subscribe(self, broker):\n if self.subscribed == False:\n for attr in self.parm_list:\n if attr.direction == attr.SUB:\n self.logging.debug(\"Subscribing: \"+attr.label)\n self.mqtt_client.subscribe(attr.topic)\n self.subscribed = True\n else:\n self.logging.debug(\"Already subscribed ... ignoring\")",
"def listen(agent, config):\n base_topic = config['base_topic']\n short_topics = ['cpu_percent', 'memory_percent', 'disk_percent']\n topics = [base_topic + '/' + x for x in short_topics]\n seen_topics = set()\n\n def add_topic(peer, sender, bus, topic, headers, messages):\n seen_topics.add(topic)\n\n agent.vip.pubsub.subscribe('pubsub', base_topic,\n callback=add_topic)\n\n max_wait = 1 + max([value for key, value in _test_config.items()\n if key.endswith('_interval')])\n\n all_topics_seen = lambda: set(topics) <= seen_topics\n\n assert poll_gevent_sleep(max_wait, all_topics_seen)",
"def topic_listener(\n topic,\n bootstrap_servers: str,\n offset_reset: str = \"earliest\",\n group: str = None,\n test: bool = False,\n):\n\n # Configure dask client\n dask_client = dask.distributed.Client(\n address=f\"{config['dask_pgir']['host']}:{config['dask_pgir']['scheduler_port']}\"\n )\n\n # init each worker with AlertWorker instance\n worker_initializer = WorkerInitializer()\n dask_client.register_worker_plugin(worker_initializer, name=\"worker-init\")\n\n # Configure consumer connection to Kafka broker\n conf = {\n \"bootstrap.servers\": bootstrap_servers,\n \"default.topic.config\": {\"auto.offset.reset\": offset_reset},\n }\n if group is not None:\n conf[\"group.id\"] = group\n else:\n conf[\"group.id\"] = os.environ.get(\"HOSTNAME\", \"kowalski\")\n\n # make it unique:\n conf[\n \"group.id\"\n ] = f\"{conf['group.id']}_{datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S.%f')}\"\n\n # Start alert stream consumer\n stream_reader = PGIRAlertConsumer(topic, dask_client, instrument=\"PGIR\", **conf)\n\n while True:\n try:\n # poll!\n stream_reader.poll()\n\n except EopError as e:\n # Write when reaching end of partition\n log(e.message)\n if test:\n # when testing, terminate once reached end of partition:\n sys.exit()\n except IndexError:\n log(\"Data cannot be decoded\\n\")\n except UnicodeDecodeError:\n log(\"Unexpected data format received\\n\")\n except KeyboardInterrupt:\n log(\"Aborted by user\\n\")\n sys.exit()\n except Exception as e:\n log(str(e))\n _err = traceback.format_exc()\n log(_err)\n sys.exit()",
"def start_listening(self):\n assert not self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n self._recv_socket = ctx.socket(zmq.SUB)\n self._recv_poller = zmq.Poller()\n self._recv_socket.setsockopt(zmq.SUBSCRIBE, b\"\")\n self._recv_poller.register(self._recv_socket, zmq.POLLIN)\n for i in range(N):\n if i != self.ID:\n address = NODE_INFOS[i].address\n port = NODE_INFOS[i].port\n self._recv_socket.connect(f\"tcp://{address}:{port}\")\n self.listening = True",
"async def subscribe(self, topic: str, callback: aiowamp.SubscriptionHandler, *,\n match_policy: aiowamp.MatchPolicy = None,\n node_key: str = None,\n options: aiowamp.WAMPDict = None) -> int:\n ...",
"def subscribe_broker(self, subscriber):\n # Register given feed callback\n self._broker_subscribers.add(subscriber)",
"def main(connection_file):\n\n ctx = zmq.Context.instance()\n\n with open(connection_file) as f:\n cfg = json.loads(f.read())\n\n reg_url = cfg['interface']\n iopub_port = cfg['iopub']\n iopub_url = f\"{reg_url}:{iopub_port}\"\n\n session = Session(key=cfg['key'].encode('ascii'))\n sub = ctx.socket(zmq.SUB)\n\n # This will subscribe to all messages:\n sub.SUBSCRIBE = b''\n # replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout\n # 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes\n # to everything from engine 1, but there is no way to subscribe to\n # just stdout from everyone.\n # multiple calls to subscribe will add subscriptions, e.g. to subscribe to\n # engine 1's stderr and engine 2's stdout:\n # sub.SUBSCRIBE = b'engine.1.stderr'\n # sub.SUBSCRIBE = b'engine.2.stdout'\n sub.connect(iopub_url)\n while True:\n try:\n idents, msg = session.recv(sub, mode=0)\n except KeyboardInterrupt:\n return\n # ident always length 1 here\n topic = idents[0].decode('utf8', 'replace')\n if msg['msg_type'] == 'stream':\n # stdout/stderr\n # stream names are in msg['content']['name'], if you want to handle\n # them differently\n print(\"{}: {}\".format(topic, msg['content']['text']))\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)",
"def mqtt_sub():\n global args\n args = parse_args()\n init(args)\n mqtt_connection = setup_connection(args)\n\n connect_future = mqtt_connection.connect()\n # Future.result() waits until a result is available\n connect_future.result()\n print(\"Connected!\")\n\n # Subscribe\n print(f\"Subscribing to topic '{args.subscribe_topic}'...\")\n subscribe_future, packet_id = mqtt_connection.subscribe(\n topic=args.subscribe_topic,\n qos=mqtt.QoS.AT_LEAST_ONCE,\n callback=on_message_received,\n )\n\n subscribe_result = subscribe_future.result()\n print(\"Subscribed with {}\".format(str(subscribe_result[\"qos\"])))\n\n # Wait for all messages to be received.\n # This waits forever if count was set to 0.\n if args.count != 0 and not received_all_event.is_set():\n print(\"Waiting for all messages to be received...\")\n\n received_all_event.wait()\n print(f\"{received_count} message(s) received.\")\n\n # Disconnect\n print(\"Disconnecting...\")\n disconnect_future = mqtt_connection.disconnect()\n disconnect_future.result()\n print(\"Disconnected!\")",
"def listen(self, topics):\n logging.debug(f'Listen to {list(map(lambda x: x.name, topics))}')\n\n for topic in map(lambda x: x.name, topics):\n try:\n self.subscribe(topic)\n logging.debug(f'Subscribed the {topic} topic')\n except Exception:\n logging.debug(f\"Can't subscribe the {topic} topic\")",
"def _create_subscriber(self, topic_name):\n if self._sub:\n self._sub.unregister()\n self._sub = rospy.Subscriber(topic_name, Image, self._image_callback)\n rospy.loginfo(\"Listening to %s -- spinning ..\" % self._sub.name)\n self._widget.setWindowTitle(\"Label plugin, listening to (%s)\" % self._sub.name)",
"def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)",
"def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)",
"def __init__(self,sub_topic=\"\",pub_topic=\"\",data_type=None,tag=\"\",alt_type=None):\n self.sub_topic=sub_topic;\n self.pub_topic=pub_topic;\n self.data_type=data_type;\n self.alt_type=alt_type;\n self.tag=tag;\n self.subscriber=rospy.Subscriber(self.sub_topic+self.tag,self.data_type, self.callback_function,queue_size=20);\n self.message_publisher=None;",
"def subscribe(self, topic, protocol, endpoint):\r\n params = {'ContentType' : 'JSON',\r\n 'TopicArn' : topic,\r\n 'Protocol' : protocol,\r\n 'Endpoint' : endpoint}\r\n response = self.make_request('Subscribe', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)",
"def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)",
"def declare_queue(self, topic):\n #from trove.rpc.impl_kombu import Connection\n from trove.openstack.common.rpc import create_connection\n with create_connection() as conn:\n consumer = conn.declare_topic_consumer(topic=topic)",
"def subscribe(host, mqtt_port, rest_port, topic):\n click.echo(\"Subscribing to topic: \" + topic + \".\")\n service = Service(host, mqtt_port, rest_port)\n if service.mqtt_client.is_connected:\n if not service.subscribe(topic):\n click.secho(\"There was an error subscribing to this topic!\",\n fg=\"red\", bold=True)\n else:\n click.secho(\"The client was unable to connect to the mqtt broker!\",\n fg=\"red\", bold=True)"
] | [
"0.61110973",
"0.5609849",
"0.55908597",
"0.55545825",
"0.5521547",
"0.5483101",
"0.5444674",
"0.54024506",
"0.5349773",
"0.5335479",
"0.52988833",
"0.5185165",
"0.5182671",
"0.51694936",
"0.5152659",
"0.51308894",
"0.512426",
"0.5122229",
"0.50869435",
"0.5054334",
"0.5046267",
"0.5043559",
"0.50409484",
"0.50288296",
"0.50288296",
"0.5023645",
"0.50131434",
"0.5000122",
"0.49848938",
"0.49816984"
] | 0.682294 | 0 |
Turns the given STIX2 Indicator into a valid VAST query and forwards all query results (sightings) to the sightings_queue. vast_binary The vast binary command to use with PyVAST | async def retro_match_vast(
vast_binary: str,
vast_endpoint: str,
retro_match_max_events: int,
retro_match_timeout: float,
indicator: Indicator,
sightings_queue: asyncio.Queue,
):
start = time.time()
query = indicator_to_vast_query(indicator)
if not query:
g_retro_match_backlog.dec()
return
global logger, max_open_tasks
async with max_open_tasks:
vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)
kwargs = {}
if retro_match_max_events > 0:
kwargs["max_events"] = retro_match_max_events
proc = await vast.export(**kwargs).json(query).exec()
retro_result = None
try:
retro_result = await asyncio.wait_for(
proc.communicate(),
timeout=retro_match_timeout if retro_match_timeout > 0 else None,
)
except asyncio.TimeoutError:
proc.terminate()
logger.error(
f"Timeout after {retro_match_timeout}s in retro-query for indicator {indicator}"
)
if not retro_result or len(retro_result) != 2:
g_retro_match_backlog.dec()
return
reported = 0
stdout = retro_result[0]
for line in stdout.decode().split("\n"):
line = line.rstrip()
if line:
sighting = query_result_to_sighting(line, indicator)
if not sighting:
logger.error(f"Could not parse VAST query result: {line}")
continue
reported += 1
await sightings_queue.put(sighting)
logger.debug(f"Retro-matched {reported} sighting(s) for indicator: {indicator}")
s_retro_matches_per_ioc.observe(reported)
s_retro_query_time_s_per_ioc.observe(time.time() - start)
g_retro_match_backlog.dec() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def match_intel(\n vast_binary: str,\n vast_endpoint: str,\n indicator_queue: asyncio.Queue,\n sightings_queue: asyncio.Queue,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n):\n global logger, open_tasks\n while True:\n msg = await indicator_queue.get()\n try:\n indicator = parse(msg, allow_custom=True)\n except Exception as e:\n logger.warning(f\"Failed to decode STIX-2 Indicator item {msg}: {e}\")\n continue\n if type(indicator) is not Indicator:\n logger.warning(\n f\"Ignoring unknown message type, expected STIX-2 Indicator: {type(indicator)}\"\n )\n continue\n if (\n ThreatBusSTIX2Constants.X_THREATBUS_UPDATE.value\n in indicator.object_properties()\n and indicator.x_threatbus_update == Operation.REMOVE.value\n ):\n g_iocs_removed.inc()\n if live_match:\n asyncio.create_task(\n remove_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n else:\n # add new Indicator to matcher / query Indicator retrospectively\n g_iocs_added.inc()\n if retro_match:\n g_retro_match_backlog.inc()\n asyncio.create_task(\n retro_match_vast(\n vast_binary,\n vast_endpoint,\n retro_match_max_events,\n retro_match_timeout,\n indicator,\n sightings_queue,\n )\n )\n if live_match:\n asyncio.create_task(\n ingest_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n indicator_queue.task_done()",
"async def ingest_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):\n global logger\n vast_ioc = indicator_to_vast_matcher_ioc(indicator)\n if not vast_ioc:\n logger.error(\n f\"Unable to convert STIX-2 Indicator to VAST compatible IoC. Is it a point IoC? {indicator}\"\n )\n return\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n proc = await vast.import_(type=\"intel.indicator\").json().exec(stdin=vast_ioc)\n await proc.wait()\n logger.debug(f\"Ingested indicator for VAST live matching: {indicator}\")",
"async def live_match_vast(\n vast_binary: str, vast_endpoint: str, sightings_queue: asyncio.Queue\n):\n global logger, matcher_name\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n matcher_name = \"threatbus-\" + \"\".join(random.choice(letters) for i in range(10))\n proc = await vast.matcher().start(name=matcher_name).exec()\n # returncode is None as long as the process did not terminate yet\n while proc.returncode is None:\n data = await proc.stdout.readline()\n if not data:\n if not await vast.test_connection():\n logger.error(\"Lost connection to VAST, cannot live-match\")\n # TODO reconnect\n continue\n vast_sighting = data.decode(\"utf-8\").rstrip()\n sighting = matcher_result_to_sighting(vast_sighting)\n if not sighting:\n logger.error(f\"Cannot parse sighting-output from VAST: {vast_sighting}\")\n continue\n g_live_matcher_sightings.inc()\n await sightings_queue.put(sighting)\n stderr = await proc.stderr.read()\n if stderr:\n logger.error(\n \"VAST matcher process exited with message: {}\".format(stderr.decode())\n )\n logger.critical(\"Unexpected exit of VAST matcher process.\")",
"async def start(\n vast_binary: str,\n vast_endpoint: str,\n zmq_endpoint: str,\n snapshot: int,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n max_open_files: int,\n metrics_interval: int,\n metrics_filename: str,\n transform_cmd: str = None,\n sink: str = None,\n):\n global logger, async_tasks, p2p_topic, max_open_tasks, metrics\n # needs to be created inside the same eventloop where it is used\n max_open_tasks = asyncio.Semaphore(max_open_files)\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n assert await vast.test_connection() is True, \"Cannot connect to VAST\"\n\n logger.debug(f\"Calling Threat Bus management endpoint {zmq_endpoint}\")\n reply = subscribe(zmq_endpoint, \"stix2/indicator\", snapshot)\n if not reply_is_success(reply):\n logger.error(\"Subscription failed\")\n return\n pub_endpoint = reply.get(\"pub_endpoint\", None)\n sub_endpoint = reply.get(\"sub_endpoint\", None)\n topic = reply.get(\"topic\", None)\n if not pub_endpoint or not sub_endpoint or not topic:\n logger.error(\"Subscription failed\")\n return\n logger.info(f\"Subscription successful. New p2p_topic: {topic}\")\n if p2p_topic:\n # The 'start' function is called as result of a restart\n # Unsubscribe the old topic as soon as we get a working connection\n logger.info(\"Cleaning up old p2p_topic subscription ...\")\n unsubscribe(zmq_endpoint, p2p_topic)\n atexit.unregister(unsubscribe)\n p2p_topic = topic\n atexit.register(unsubscribe, zmq_endpoint, topic)\n\n async_tasks.append(\n asyncio.create_task(heartbeat(zmq_endpoint, p2p_topic, interval=5))\n )\n\n indicator_queue = asyncio.Queue()\n sightings_queue = asyncio.Queue()\n async_tasks.append(\n asyncio.create_task(\n report_sightings(sub_endpoint, sightings_queue, transform_cmd, sink)\n )\n )\n\n async_tasks.append(\n asyncio.create_task(receive(pub_endpoint, p2p_topic, indicator_queue))\n )\n\n async_tasks.append(\n asyncio.create_task(\n match_intel(\n vast_binary,\n vast_endpoint,\n indicator_queue,\n sightings_queue,\n live_match,\n retro_match,\n retro_match_max_events,\n retro_match_timeout,\n )\n )\n )\n\n if retro_match:\n # add metrics for retro-matching to the metric output\n metrics += [\n s_retro_matches_per_ioc,\n s_retro_query_time_s_per_ioc,\n g_retro_match_backlog,\n ]\n if live_match:\n # add metrics for live-matching to the metric output\n metrics.append(g_live_matcher_sightings)\n async_tasks.append(\n asyncio.create_task(\n live_match_vast(vast_binary, vast_endpoint, sightings_queue)\n )\n )\n\n if metrics_interval:\n async_tasks.append(\n asyncio.create_task(write_metrics(metrics_interval, metrics_filename))\n )\n\n loop = asyncio.get_event_loop()\n for s in [signal.SIGHUP, signal.SIGTERM, signal.SIGINT]:\n loop.add_signal_handler(s, lambda: asyncio.create_task(stop_signal()))\n return await asyncio.gather(*async_tasks)",
"async def remove_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):\n global logger, matcher_name\n type_and_value = get_vast_type_and_value(indicator.pattern)\n if not type_and_value:\n logger.debug(f\"Cannot remove IoC from VAST. Is it a point IoC? {indicator}\")\n return None\n (vast_type, ioc_value) = type_and_value\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n # TODO pass matcher_name once VAST supports more fine-grained deletion\n await vast.matcher().intel().remove(ioc_value, vast_type).exec()\n logger.debug(f\"Removed indicator from VAST live matching: {indicator}\")",
"def iast_binary_svp(\n isotherms,\n mole_fractions,\n pressures,\n branch=\"ads\",\n warningoff=False,\n adsorbed_mole_fraction_guess=None,\n verbose=False,\n ax=None\n):\n\n # Parameter checks\n if len(isotherms) != 2 or len(mole_fractions) != 2:\n raise ParameterError(\n \"The selectivity calculation can only take two components as parameters.\"\n )\n if sum(mole_fractions) != 1:\n raise ParameterError(\"Mole fractions do not add up to unity\")\n if any(iso.pressure_mode.startswith(\"relative\") for iso in isotherms):\n raise ParameterError(\"IAST only runs with isotherms on an absolute pressure basis.\")\n\n # Convert to numpy arrays just in case\n pressures = numpy.asarray(pressures)\n mole_fractions = numpy.asarray(mole_fractions)\n\n # Generate the array of partial pressures\n component_loadings = numpy.zeros((len(pressures), 2))\n\n for index, pressure in enumerate(pressures):\n component_loadings[index, :] = iast_point_fraction(\n isotherms,\n mole_fractions,\n pressure,\n branch=branch,\n warningoff=warningoff,\n adsorbed_mole_fraction_guess=adsorbed_mole_fraction_guess\n )\n\n selectivities = [(x[0] / mole_fractions[0]) / (x[1] / mole_fractions[1])\n for x in component_loadings]\n\n if verbose:\n plot_iast_svp(\n pressures,\n selectivities,\n isotherms[0].adsorbate,\n isotherms[1].adsorbate,\n mole_fractions[0],\n isotherms[0].pressure_unit,\n ax=ax\n )\n\n return dict(\n pressure=pressures,\n selectivity=selectivities,\n )",
"def run(version=1):\n\n # scan header to define our graph parameters\n try:\n header = input(\"Enter graph header:\")\n edges_count, start_edge, finish_edge = header.split(\" \")\n edges_count = int(edges_count)\n logger.debug(\"Scanned edges count: {}; Start:{}, End:{}\".format(\n edges_count, start_edge, finish_edge))\n except ValueError:\n raise ValueError(\"Input data parsing error, \"\n \"the format should be like \\\"3 a b\\\"\")\n\n # scan edges\n edges = scan_edges(edges_count)\n logger.debug(\"Scanned edges: {}\".format(edges))\n\n optimize(edges, start_edge, finish_edge)\n\n print_output(edges)",
"def show_sver(ibs, aid1, aid2, chipmatch_FILT=None, aid2_svtup=None, **kwargs):\n print('\\n[show_sver] ====================== [show_sver]')\n #print(utool.func_str(show_sv, kwargs=locals()))\n if chipmatch_FILT is None or aid2_svtup is None:\n chipmatch_FILT, aid2_svtup = _compute_svvars(ibs, aid1)\n sv_vartup = _get_sv_vartup_for_plottool(ibs, aid1, aid2, chipmatch_FILT, aid2_svtup)\n (chip1, chip2, kpts1, kpts2, fm, homog_tup, aff_tup) = sv_vartup\n if WRITE_SV_DEBUG:\n keys = ('chip1', 'chip2', 'kpts1', 'kpts2', 'fm', 'homog_tup', 'aff_tup')\n utool.save_testdata(*keys)\n print('[vizsv] write test info')\n utool.qflag()\n draw_sv.show_sv(chip1, chip2, kpts1, kpts2, fm, homog_tup=homog_tup, aff_tup=aff_tup, **kwargs)",
"def parse (self, line):\n result = self.program.parseString (line)\n return TranQL_AST (result.asList (), self.backplane)",
"def check_script(vouts):\n for vout in [v for v in vouts[::-1] if v['hex'].startswith('6a')]:\n verb = BlockchainSpider.decode_op_return(vout['hex'])\n action = Spoolverb.from_verb(verb).action\n if action in Spoolverb.supported_actions:\n return verb\n raise Exception(\"Invalid ascribe transaction\")",
"def convert_xml_using_saxon(source_file, template_file):\n if not os.path.isabs(template_file):\n template_file = CFG_BIBCONVERT_XSL_PATH + os.sep + template_file\n source_directory = os.path.dirname(source_file)\n command = \"cd %s && saxon9he-xslt -s:%s -xsl:%s -dtd:off\" % \\\n (source_directory, source_file, template_file)\n exit_code, stdout_buffer, stderr_buffer = run_shell_command(cmd=command)\n if exit_code or stdout_buffer or stderr_buffer:\n # Error may have happened\n raise APSHarvesterConversionError(\"%s: %s\\nOut:%s\" %\n (exit_code,\n stderr_buffer,\n stdout_buffer))",
"def test_vasp_immigrant(immigrant_with_builder):\n immigrant, inputs = immigrant_with_builder\n\n # We need to set the parser explicitly\n inputs.metadata['options']['parser_name'] = 'vasp.vasp'\n result, node = run.get_node(immigrant, **inputs)\n assert node.exit_status == 0\n\n expected_output_nodes = {'misc', 'remote_folder', 'retrieved'}\n assert expected_output_nodes.issubset(set(result))",
"def parse_voting(\n aragon_voting, abi_storage: CachedStorage,\n vote_number: int\n) -> List[Union[Call, str]]:\n script_code = str(aragon_voting.getVote(vote_number)[-1])\n return decode_evm_script(script_code, abi_storage)",
"def main():\n parser = argparse.ArgumentParser(description=\"Tracks adult fish\")\n # add options for argument parser\n parser.add_argument(\"in_path\",\n help=\"Path to the video directory.\")\n parser.add_argument(\"out_path\",\n help=\"Directory for results. Should be empty.\")\n parser.add_argument(\"-x\", \"--keep_temp\", action=\"store_true\",\n help=\"Keep temporary folder after execution.\")\n parser.add_argument(\"--visual\", action=\"store_true\",\n help=\"shows a visual representation of the tracking progress.\")\n\n # parse arguments from command line\n args = parser.parse_args()\n # get all file names and directories ready\n out_dir, temp_dir, video_bases, videos = housekeeping(args)\n borders = []\n for i in range(len(videos)):\n v = videos[i]\n get_borders(borders, temp_dir, v)\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n v = videos[i]\n scaled_video = \"scaled_\" + vbn + \".avi\"\n ffmpeg = Ffmpeg(v, os.path.join(temp_dir, scaled_video))\n ffmpeg.f = \"avi\"\n ffmpeg.vcodec = \"libx264rgb\"\n ffmpeg.width = 480\n ffmpeg.run()\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n pts = tracker(args, temp_dir, vbn)\n border = borders[i]\n tracks_lower, tracks_upper = split_tracks(border, pts)\n analysis = Analysis(tracks_lower, tracks_upper, px_size=0.06)\n analysis.analyze(os.path.join(out_dir, 'stats.txt'), vbn, vel=True)\n\n if not args.keep_temp:\n shutil.rmtree(temp_dir)",
"def build_ibb_graph_from( ea_source, sourcenode, reachgraph ):\r\n\tflowgraph = create_flowgraph_from( 0x4423D0 )\r\n\tadd_disasm_lines_to_flowgraph( flowgraph )\r\n\tflowgraph.write_VCG_File(\"C:\\\\test.vcg\")",
"def execute(input_file, op_exec):\n if op_exec == \"ts2db\":\n # print(prep.ts2db(input_file, None))\n return prep.ts2db(input_file)",
"def scan_report_command():\n # 1. Get input scan id and extended_info flag from Demisto\n scanid = demisto.args().get('scanid')\n extended_info = demisto.args().get('extended_info')\n # 2. Get the scan report from SlashNext API\n response = scan_report(scanid=scanid)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n url_data = response.get('urlData')\n scanid = url_data.get('scanId')\n\n snx_ioc_cont, dbot_score_cont, url_cont = get_snx_url_ioc_context(url_data, is_scan=True)\n\n ec = {\n 'SlashNext.URL(val.Value === obj.Value)': snx_ioc_cont[0],\n 'DBotScore': dbot_score_cont,\n 'URL': url_cont\n }\n\n title = 'SlashNext Phishing Incident Response - Scan Report\\n'\\\n '##### url = {}'.format(url_data.get('url'))\n\n if response.get('normalizeData').get('normalizeStatus') == 1:\n title += ' *\\n*' + response.get('normalizeData').get('normalizeMessage')\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ScanID',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)\n\n if extended_info == 'true' and response.get('swlData') is None:\n # Download Screenshot, HTML and Text Section\n if url_data.get('landingUrl') is None:\n if url_data.get('finalUrl') is not None and url_data.get('finalUrl') != 'N/A':\n tag = 'Final URL = {}'.format(url_data.get('finalUrl'))\n else:\n tag = 'Scanned URL = {}'.format(url_data.get('url'))\n else:\n tag = 'Redirected URL = {}'.format(url_data.get('landingUrl').get('url'))\n\n download_forensics_data(scanid=scanid, tag=tag, screenshot=True, html=True, txt=True)",
"def execute(self, source):\n tree = ast.parse(source=source)\n self._execute(body=tree.body[:-1], mode=\"exec\")\n self._execute(body=tree.body[-1:], mode=\"single\")",
"def scan_rack(csv_file: str, motor: object, RE: Callable, xrun: Callable) -> None:\n df = pd.read_csv(csv_file)\n go_on = ask_for_confirmation(df)\n if go_on:\n print(r'Start the scan ...')\n carry_out_plan(df, motor, RE, xrun)\n else:\n print(r'The scan is rejected.')\n return",
"def optimize(spv_bin):\n\n tmp_dir = utils.tempdir()\n tmp_in = tmp_dir.relpath(\"input.spv\")\n tmp_out = tmp_dir.relpath(\"output.spv\")\n with open(tmp_in, \"wb\") as out_file:\n out_file.write(bytes(spv_bin))\n\n sdk = os.environ.get(\"VULKAN_SDK\", None)\n cmd = os.path.join(sdk, \"bin/spirv-opt\") if sdk else \"spirv-opt\"\n args = [cmd, \"-O\", tmp_in, \"-o\", tmp_out]\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (out, _) = proc.communicate()\n\n if proc.returncode != 0:\n msg = \"Opitmizationerror using spirv-opt:\\n\"\n msg += py_str(out)\n raise RuntimeError(msg)\n\n return bytearray(open(tmp_out, \"rb\").read())",
"def relay_casttag(c, x, tag):\n assert tag.is_constant(int)\n rtag = get_union_ctr(tag.value, x.abstract.options.get(tag.value))\n v = relay.Var(\"v\")\n clause = adt.Clause(adt.PatternConstructor(rtag, [adt.PatternVar(v)]), v)\n return adt.Match(c.ref(x), [clause], complete=False)",
"async def infernal_search(sequence, job_id):\n sequence = sequence.replace('T', 'U').upper()\n\n params = {\n 'query': os.path.join(INFERNAL_QUERY_DIR, '%s' % job_id),\n 'output': os.path.join(INFERNAL_RESULTS_DIR, '%s' % job_id),\n 'tblout': os.path.join(INFERNAL_RESULTS_DIR, '%s.tblout' % job_id),\n 'rfam_cm': settings.RFAM_CM,\n 'cmscan': settings.CMSCAN_EXECUTABLE,\n 'cpu': 4,\n }\n\n # write out query in fasta format\n with open(params['query'], 'w') as f:\n f.write('>query\\n')\n f.write(sequence)\n f.write('\\n')\n\n command = ('{cmscan} '\n '--notextw ' # unlimit ASCII text output line width\n '--cut_ga ' # use CM's GA gathering cutoffs as reporting thresholds\n '--rfam ' # set heuristic filters at Rfam-level (fast)\n '--nohmmonly ' # never run HMM-only mode, not even for models with 0 basepairs\n '-o {output} ' # direct output to file\n '--tblout {tblout} ' # save parseable table of hits to file\n '--acc ' # prefer accessions over names in output\n '--cpu {cpu} ' # number of CPUs to use\n '{rfam_cm} ' # Rfam.cm file\n '{query} ' # query file\n ).format(**params)\n\n process = await asyncio.subprocess.create_subprocess_exec(\n *shlex.split(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n\n return process, params['output']",
"def _compute_svvars(ibs, aid1):\n from ibeis.model.hots import query_helpers\n qaids = [aid1]\n qcomp = query_helpers.get_query_components(ibs, qaids)\n qaid2_chipmatch_FILT = qcomp['qaid2_chipmatch_FILT']\n qaid2_svtups = qcomp['qaid2_svtups']\n chipmatch_FILT = qaid2_chipmatch_FILT[aid1]\n aid2_svtup = qaid2_svtups[aid1]\n return chipmatch_FILT, aid2_svtup",
"def _parse_binary(self, bin):\n packets = self._split_packets(bin) # split data to individual ticks packet\n data = []\n print_message(\"_parse_binary \")\n for packet in packets:\n instrument_token = self._unpack_int(packet, 0, 4)\n segment = instrument_token & 0xff # Retrive segment constant from instrument_token\n\n divisor = 10000000.0 if segment == self.EXCHANGE_MAP[\"cds\"] else 100.0\n _stream_data = {}\n # Parse index packets.\n if segment in self.INDICES:\n d = {}\n if len(packet) == 8:\n _stream_data = {\n \"tradeable\": False,\n \"mode\": self.MODE_LTP,\n \"instrument_token\": instrument_token,\n \"last_price\": self._unpack_int(packet, 4, 8) / divisor\n }\n self.__insert_into_db(self.MODE_LTP, _stream_data)\n data.append(_stream_data)\n elif len(packet) == 28:\n _stream_data = {\n \"tradeable\": False,\n \"mode\": self.MODE_QUOTE,\n \"instrument_token\": instrument_token,\n \"last_price\": self._unpack_int(packet, 4, 8) / divisor,\n \"ohlc\": {\n \"high\": self._unpack_int(packet, 8, 12) / divisor,\n \"low\": self._unpack_int(packet, 12, 16) / divisor,\n \"open\": self._unpack_int(packet, 16, 20) / divisor,\n \"close\": self._unpack_int(packet, 20, 24) / divisor\n },\n \"change\": self._unpack_int(packet, 24, 28) / divisor,\n }\n self.__insert_into_db(self.MODE_QUOTE, _stream_data)\n data.append(_stream_data)\n\n continue\n\n # Parse non-index packets.\n if len(packet) == 8:\n _stream_data = {\n \"tradeable\": True,\n \"mode\": self.MODE_LTP,\n \"instrument_token\": instrument_token,\n \"last_price\": self._unpack_int(packet, 4, 8) / divisor\n }\n self.__insert_into_db(self.MODE_LTP, _stream_data)\n data.append(_stream_data)\n elif len(packet) > 8:\n d = {\n \"tradeable\": True,\n \"mode\": self.MODE_QUOTE,\n \"instrument_token\": instrument_token,\n \"last_price\": self._unpack_int(packet, 4, 8) / divisor,\n \"last_quantity\": self._unpack_int(packet, 8, 12),\n \"average_price\": self._unpack_int(packet, 12, 16) / divisor,\n \"volume\": self._unpack_int(packet, 16, 20),\n \"buy_quantity\": self._unpack_int(packet, 20, 24),\n \"sell_quantity\": self._unpack_int(packet, 24, 28),\n \"ohlc\": {\n \"open\": self._unpack_int(packet, 28, 32) / divisor,\n \"high\": self._unpack_int(packet, 32, 36) / divisor,\n \"low\": self._unpack_int(packet, 36, 40) / divisor,\n \"close\": self._unpack_int(packet, 40, 44) / divisor\n }\n }\n\n # Compute the change price.\n d[\"change\"] = 0\n if (d[\"ohlc\"][\"close\"] != 0):\n d[\"change\"] = (d[\"last_price\"] - d[\"ohlc\"][\"close\"]) * 100 / d[\"ohlc\"][\"close\"]\n\n # Market depth entries.\n depth = {\n \"buy\": [],\n \"sell\": []\n }\n\n if len(packet) > 44:\n # Compile the market depth lists.\n for i, p in enumerate(range(44, len(packet), 12)):\n depth[\"sell\" if i >= 5 else \"buy\"].append({\n \"quantity\": self._unpack_int(packet, p, p + 4),\n \"price\": self._unpack_int(packet, p + 4, p + 8) / divisor,\n # Byte format is unsigned short for orders field\n \"orders\": self._unpack_int(packet, p + 8, p + 10, byte_format=\"H\")\n })\n\n d[\"depth\"] = depth\n self.__insert_into_db(self.MODE_QUOTE, d)\n data.append(d)\n\n return data",
"def execute_statement(self, bql_statement_ast, pretty=True, timing=False, plots=None, yes=False,\n debug=False, pandas_df=None, pandas_output=True, key_column=None,\n return_raw_result=False, force_output=False):\n if timing:\n start_time = time.time()\n\n parser_out = None\n # TODO move pyparsing objects out of client into parser\n if debug:\n parser_out = self.parser.parse_single_statement(bql_statement_ast)\n else:\n try:\n parser_out = self.parser.parse_single_statement(bql_statement_ast)\n except Exception as e:\n raise utils.BayesDBParseError(str(e))\n if parser_out is None:\n print(\"Could not parse command. Try typing 'help' for a list of all commands.\")\n return\n elif not parser_out:\n return\n\n method_name, args_dict, client_dict = parser_out\n if client_dict is None:\n client_dict = {}\n\n # Do stuff now that you know the user's command, but before passing it to engine.\n if method_name == 'execute_file':\n return dict(message='execute_file', bql_string=open(args_dict['filename'], 'r').read())\n elif method_name == 'update_codebook':\n _, codebook_rows = data_utils.read_csv(client_dict['codebook_path'], has_header=True)\n # TODO: require specific codebook_header values? Or don't require a header,\n # and if the first value in the header is actually a data column name, assume\n # the first row is codebook data, not a header.\n\n # Create a dict indexed by column name\n codebook = dict()\n for codebook_row in codebook_rows:\n codebook[codebook_row[0]] = dict(zip(['short_name', 'description', 'value_map'],\n codebook_row[1:]))\n\n args_dict['codebook'] = codebook\n elif (method_name == 'drop_btable') and (not yes):\n # If dropping something, ask for confirmation.\n print(\"Are you sure you want to permanently delete this btable, and all associated \"\n \"models, without any way to get them back? Enter 'y' if yes.\")\n user_confirmation = raw_input()\n if 'y' != user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif (method_name == 'drop_models') and (not yes):\n # If dropping something, ask for confirmation.\n print(\"Are you sure you want to permanently delete model(s), without any way to get \"\n \"them back? Enter 'y' if yes.\")\n user_confirmation = raw_input()\n if 'y' != user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif method_name == 'load_models':\n pklpath = client_dict['pkl_path']\n try:\n model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath), 'rb'))\n except IOError as e:\n if pklpath[-7:] != '.pkl.gz':\n if pklpath[-4:] == '.pkl':\n model_data = pickle.load(open(self.parser.get_absolute_path(pklpath), 'rb'))\n else:\n pklpath = pklpath + \".pkl.gz\"\n model_data = pickle.load(gzip.open(self.parser.get_absolute_path(pklpath),\n 'rb'))\n else:\n raise utils.BayesDBError('Models file %s could not be found.' % pklpath)\n # This is the more recent version, where schema is stored with models.\n if 'schema' in model_data.keys():\n args_dict['models'] = model_data['models']\n args_dict['model_schema'] = model_data['schema']\n # This support older saved models, where only the model info was stored.\n else:\n args_dict['models'] = model_data\n args_dict['model_schema'] = None\n\n # Older versions of model_schema just had a str cctype as the dict items.\n # Newest version has a dict of cctype and parameters. Use this values to\n # test the recency of the models.\n model_schema = args_dict['model_schema']\n if model_schema:\n model_schema_itemtype = type(model_schema[model_schema.keys()[0]])\n else:\n model_schema_itemtype = None\n\n if model_schema is None or model_schema_itemtype != dict:\n args_dict['model_schema'] = None\n if not yes:\n print \"\"\"WARNING! The models you are currently importing were saved without a schema\n or without detailed column parameters (probably from a previous version).\n\n If you are loading models into the same table from which you created them, problems\n are unlikely, unless you have dropped models and then updated the schema.\n\n If you are loading models into a different table from which you created them, you\n should verify that the table schemas are the same.\n\n Please use \"SAVE MODELS FROM <btable> TO <filename.pkl.gz>\" to create an updated copy of your models.\n\n Are you sure you want to load these model(s)?\n \"\"\"\n user_confirmation = raw_input()\n if 'y' != user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif method_name == 'create_btable':\n if pandas_df is None:\n header, rows = data_utils.read_csv(client_dict['csv_path'])\n else:\n header, rows = data_utils.read_pandas_df(pandas_df)\n args_dict['header'] = header\n args_dict['raw_T_full'] = rows\n args_dict['key_column'] = key_column\n args_dict['subsample'] = False\n\n if 'codebook_path' in client_dict:\n _, codebook_rows = data_utils.read_csv(client_dict['codebook_path'],\n has_header=True)\n # TODO: require specific codebook_header values? Or don't require a header,\n # and if the first value in the header is actually a data column name, assume\n # the first row is codebook data, not a header.\n\n # Create a dict indexed by column name\n codebook = dict()\n for codebook_row in codebook_rows:\n codebook[codebook_row[0]] = dict(zip(['short_name', 'description', 'value_map'],\n codebook_row[1:]))\n args_dict['codebook'] = codebook\n else:\n warning = dedent(\"\"\"\n WARNING!\n\n You are creating a btable without a codebook, which will make interpretation\n of results more difficult. Codebooks should be in CSV format with each row\n corresponding to one column of the original data. The codebook should have four\n columns:\n\n 1. actual column name\n 2. short column description\n 3. long column description\n 4. value map (optional, only used for categorical columns - should be in JSON\n format)\n \"\"\")\n print(warning)\n\n # Display warning messages and get confirmation if btable is too large.\n # Ask user if they want to turn on subsampling.\n max_columns = 200\n max_rows = 1000\n max_cells = 100000\n message = None\n if not yes:\n if len(rows[0]) > max_columns:\n message = \"The btable you are uploading has %d columns, but BayesDB is \" \\\n \"currently designed to support only %d columns. If you proceed, \" \\\n \"performance may suffer unless you set many columns' datatypes to \" \\\n \"'ignore'. Would you like to continue? Enter 'y' if yes.\" \\\n % (len(rows[0]), max_columns)\n if len(rows) > max_rows:\n message = \"The btable you are uploading has %d rows, but BayesDB is currently \"\\\n \"designed to support only %d rows. If you proceed, performance may \"\\\n \"suffer. Would you like to continue? Enter 'y' to continue without \"\\\n \"subsampling, 'n' to abort, 's' to continue by subsampling %d rows, \"\\\n \"or a positive integer to specify the number of rows to be \"\\\n \"subsampled.\" % (len(rows), max_rows, max_rows)\n if len(rows[0])*len(rows) > max_cells:\n message = \"The btable you are uploading has %d cells, but BayesDB is currently\"\\\n \" designed to support only %d cells. If you proceed, performance may\"\\\n \" suffer unless you enable subsampling. Enter 'y' to continue \"\\\n \" without subsampling, 'n' to abort, 's' to continue by subsampling \"\\\n \"%d rows, or a positive integer to specify the number of rows to be \"\\\n \"subsampled.\" % (len(rows)*len(rows[0]), max_cells, max_rows)\n if message is not None:\n print(message)\n user_confirmation = raw_input()\n if 'y' == user_confirmation.strip():\n pass\n elif 'n' == user_confirmation.strip():\n return dict(message=\"Operation canceled by user.\")\n elif 's' == user_confirmation.strip():\n args_dict['subsample'] = min(max_rows, len(rows))\n elif utils.is_int(user_confirmation.strip()):\n args_dict['subsample'] = int(user_confirmation.strip())\n else:\n return dict(message=\"Operation canceled by user.\")\n elif method_name in ['label_columns', 'update_metadata']:\n if client_dict['source'] == 'file':\n header, rows = data_utils.read_csv(client_dict['csv_path'])\n args_dict['mappings'] = {key: value for key, value in rows}\n\n # Call engine.\n result = self.call_bayesdb_engine(method_name, args_dict, debug)\n\n # If error occurred, exit now.\n if 'error' in result and result['error']:\n if pretty:\n print(result['message'])\n if force_output:\n return result\n else:\n return result['message']\n else:\n return result\n\n # Do stuff now that engine has given you output, but before printing the result.\n result = self.callback(method_name, args_dict, client_dict, result)\n\n if return_raw_result:\n raw_result = {\n 'result': result,\n 'method_name': method_name,\n 'client_dict': client_dict}\n print(\"returning raw result for %s\" % (method_name))\n return raw_result\n\n assert type(result) != int\n\n if timing:\n end_time = time.time()\n print('Elapsed time: %.2f seconds.' % (end_time - start_time))\n\n if plots is None:\n plots = 'DISPLAY' in os.environ.keys()\n\n if 'matrix' in result and (plots or client_dict['filename']):\n # Plot matrices\n plotting_utils.plot_matrix(result['matrix'], result['column_names'], result['title'],\n client_dict['filename'])\n if pretty:\n if 'column_lists' in result:\n print(self.pretty_print(dict(column_lists=result['column_lists'])))\n\n if force_output:\n return result\n else:\n return self.pretty_print(result)\n else:\n return result\n if ('plot' in client_dict and client_dict['plot']):\n if (plots or client_dict['filename']):\n # Plot generalized histograms or scatterplots\n\n try:\n plotting_M_c = result['metadata_full']['M_c_full']\n except KeyError:\n plotting_M_c = result['M_c']\n\n plot_remove_key = method_name in ['select', 'infer']\n plotting_utils.plot_general_histogram(result['column_names'], result['data'],\n plotting_M_c, result['schema_full'],\n client_dict['filename'],\n client_dict['scatter'],\n remove_key=plot_remove_key)\n return self.pretty_print(result)\n else:\n if 'message' not in result:\n result['message'] = \"\"\n result['message'] = \"Your query indicates that you would like to make a plot, but \"\\\n \"in order to do so, you must either enable plotting in a \"\\\n \"window or specify a filename to save to by appending 'SAVE \"\\\n \"TO <filename>' to this command.\\n\" + result['message']\n\n if pretty:\n pp = self.pretty_print(result)\n print(pp)\n\n # Print warnings last so they're readable without scrolling backwards.\n if 'warnings' in result:\n \"\"\" Pretty-print warnings. \"\"\"\n for warning in result['warnings']:\n print('WARNING: %s' % warning)\n\n if pandas_output and 'data' in result and 'column_labels' in result:\n result_pandas_df = data_utils.construct_pandas_df(result)\n return result_pandas_df\n else:\n return result",
"def execute(self,s,v):\n self.cur.execute(s,v)",
"def test_vs_scoring_vina():\n vs = virtualscreening(n_cpu=1)\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/crystal_ligand.sdf'))\n vs.score(function='autodock_vina',\n protein=os.path.join(test_data_dir, 'data/dude/xiap/receptor_rdkit.pdb'))\n mols = list(vs.fetch())\n assert_equal(len(mols), 1)\n mol_data = mols[0].data\n assert_in('vina_affinity', mol_data)\n assert_in('vina_gauss1', mol_data)\n assert_in('vina_gauss2', mol_data)\n assert_in('vina_hydrogen', mol_data)\n assert_in('vina_hydrophobic', mol_data)\n assert_in('vina_repulsion', mol_data)",
"def run_traffic_item(self, device, command, *argv, **kwarg):\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n ############# Implement me ################\n if command == \"set_traffic\":\n params = kwarg[\"params\"]\n if not params or not params[0]:\n return 0, \"Need to specify the packet data\"\n param = params[0]\n type = param[\"pkt_data\"].get(\"type\", \"ipv4\")\n if type == \"ipv4\":\n self.set_ipv4_traffic(device, param[\"name\"], param[\"pkt_data\"], traffic_type=\"ipv4\")\n elif type == \"bgp\":\n self.set_ipv4_traffic(device, param[\"name\"], param[\"pkt_data\"], traffic_type=\"bgp\")\n elif type == \"ethernet\":\n self.set_ethernet_traffic(\n device, param[\"name\"], param[\"pkt_data\"], traffic_type=\"ethernetVlan\"\n )\n elif type == \"ethernetVlan\":\n self.set_ethernet_traffic(\n device, param[\"name\"], param[\"pkt_data\"], traffic_type=\"ethernetVlan\"\n )\n elif type == \"raw\":\n self.set_ethernet_traffic(\n device, param[\"name\"], param[\"pkt_data\"], traffic_type=\"raw\"\n )\n elif command == \"start_traffic\":\n device.applog.info(\"Starting Traffic\")\n IxnetworkIxiaClientImpl.ixnet.Traffic.Start()\n elif command == \"stop_traffic\":\n device.applog.info(\"Stopping Traffic\")\n IxnetworkIxiaClientImpl.ixnet.Traffic.Stop()\n elif command == \"get_stats\":\n device.applog.info(\"Getting Stats\")\n stats_type = \"Port Statistics\"\n params = kwarg[\"params\"]\n if params or params[0]:\n stats_type = params[0].get(\"stats_type\", stats_type)\n stats = SVA(IxnetworkIxiaClientImpl.ixnet, stats_type)\n # device.applog.info(stats)\n return 0, stats\n elif command == \"clear_stats\":\n device.applog.info(\"Clear Stats\")\n IxnetworkIxiaClientImpl.ixnet.ClearStats()\n return 0, \"\"",
"def url_scan_command():\n # 1. Get input url and extended_info from Demisto\n url = demisto.args().get('url')\n extended_info = demisto.args().get('extended_info')\n # 2. Get the url scan from SlashNext API\n response = url_scan(url=url)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n url_data = response.get('urlData')\n scanid = url_data.get('scanId')\n\n snx_ioc_cont, dbot_score_cont, url_cont = get_snx_url_ioc_context(url_data, is_scan=True)\n\n ec = {\n 'SlashNext.URL(val.Value === obj.Value)': snx_ioc_cont[0],\n 'DBotScore': dbot_score_cont,\n 'URL': url_cont\n }\n\n title = 'SlashNext Phishing Incident Response - URL Scan\\n'\\\n '##### url = {}'.format(url_data.get('url'))\n\n if response.get('normalizeData').get('normalizeStatus') == 1:\n title += ' *\\n*' + response.get('normalizeData').get('normalizeMessage')\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ScanID',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)\n\n if extended_info == 'true' and response.get('swlData') is None:\n # Download Screenshot, HTML and Text Section\n if url_data.get('landingUrl') is None:\n if url_data.get('finalUrl') is not None and url_data.get('finalUrl') != 'N/A':\n tag = 'Final URL = {}'.format(url_data.get('finalUrl'))\n else:\n tag = 'Scanned URL = {}'.format(url_data.get('url'))\n else:\n tag = 'Redirected URL = {}'.format(url_data.get('landingUrl').get('url'))\n\n download_forensics_data(scanid=scanid, tag=tag, screenshot=True, html=True, txt=True)",
"def accept(visitor):"
] | [
"0.6584765",
"0.6489582",
"0.63159645",
"0.5070378",
"0.49106118",
"0.48129866",
"0.48059142",
"0.46778646",
"0.46212944",
"0.44722167",
"0.44278392",
"0.43826163",
"0.43719858",
"0.4344309",
"0.43253714",
"0.43091223",
"0.4265866",
"0.42530748",
"0.4211338",
"0.41965106",
"0.41959673",
"0.4170969",
"0.41616535",
"0.41548726",
"0.4134935",
"0.4124194",
"0.40886745",
"0.40789413",
"0.4070462",
"0.40608463"
] | 0.68324536 | 0 |
Converts the given STIX2 Indicator to a VASTcompatible IoC and ingests it via a VAST matcher. vast_binary The vast binary command to use with PyVAST | async def ingest_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):
global logger
vast_ioc = indicator_to_vast_matcher_ioc(indicator)
if not vast_ioc:
logger.error(
f"Unable to convert STIX-2 Indicator to VAST compatible IoC. Is it a point IoC? {indicator}"
)
return
vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)
proc = await vast.import_(type="intel.indicator").json().exec(stdin=vast_ioc)
await proc.wait()
logger.debug(f"Ingested indicator for VAST live matching: {indicator}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def match_intel(\n vast_binary: str,\n vast_endpoint: str,\n indicator_queue: asyncio.Queue,\n sightings_queue: asyncio.Queue,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n):\n global logger, open_tasks\n while True:\n msg = await indicator_queue.get()\n try:\n indicator = parse(msg, allow_custom=True)\n except Exception as e:\n logger.warning(f\"Failed to decode STIX-2 Indicator item {msg}: {e}\")\n continue\n if type(indicator) is not Indicator:\n logger.warning(\n f\"Ignoring unknown message type, expected STIX-2 Indicator: {type(indicator)}\"\n )\n continue\n if (\n ThreatBusSTIX2Constants.X_THREATBUS_UPDATE.value\n in indicator.object_properties()\n and indicator.x_threatbus_update == Operation.REMOVE.value\n ):\n g_iocs_removed.inc()\n if live_match:\n asyncio.create_task(\n remove_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n else:\n # add new Indicator to matcher / query Indicator retrospectively\n g_iocs_added.inc()\n if retro_match:\n g_retro_match_backlog.inc()\n asyncio.create_task(\n retro_match_vast(\n vast_binary,\n vast_endpoint,\n retro_match_max_events,\n retro_match_timeout,\n indicator,\n sightings_queue,\n )\n )\n if live_match:\n asyncio.create_task(\n ingest_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n indicator_queue.task_done()",
"async def remove_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):\n global logger, matcher_name\n type_and_value = get_vast_type_and_value(indicator.pattern)\n if not type_and_value:\n logger.debug(f\"Cannot remove IoC from VAST. Is it a point IoC? {indicator}\")\n return None\n (vast_type, ioc_value) = type_and_value\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n # TODO pass matcher_name once VAST supports more fine-grained deletion\n await vast.matcher().intel().remove(ioc_value, vast_type).exec()\n logger.debug(f\"Removed indicator from VAST live matching: {indicator}\")",
"async def retro_match_vast(\n vast_binary: str,\n vast_endpoint: str,\n retro_match_max_events: int,\n retro_match_timeout: float,\n indicator: Indicator,\n sightings_queue: asyncio.Queue,\n):\n start = time.time()\n query = indicator_to_vast_query(indicator)\n if not query:\n g_retro_match_backlog.dec()\n return\n global logger, max_open_tasks\n async with max_open_tasks:\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n kwargs = {}\n if retro_match_max_events > 0:\n kwargs[\"max_events\"] = retro_match_max_events\n proc = await vast.export(**kwargs).json(query).exec()\n retro_result = None\n try:\n retro_result = await asyncio.wait_for(\n proc.communicate(),\n timeout=retro_match_timeout if retro_match_timeout > 0 else None,\n )\n except asyncio.TimeoutError:\n proc.terminate()\n logger.error(\n f\"Timeout after {retro_match_timeout}s in retro-query for indicator {indicator}\"\n )\n if not retro_result or len(retro_result) != 2:\n g_retro_match_backlog.dec()\n return\n reported = 0\n stdout = retro_result[0]\n for line in stdout.decode().split(\"\\n\"):\n line = line.rstrip()\n if line:\n sighting = query_result_to_sighting(line, indicator)\n if not sighting:\n logger.error(f\"Could not parse VAST query result: {line}\")\n continue\n reported += 1\n await sightings_queue.put(sighting)\n logger.debug(f\"Retro-matched {reported} sighting(s) for indicator: {indicator}\")\n s_retro_matches_per_ioc.observe(reported)\n s_retro_query_time_s_per_ioc.observe(time.time() - start)\n g_retro_match_backlog.dec()",
"async def live_match_vast(\n vast_binary: str, vast_endpoint: str, sightings_queue: asyncio.Queue\n):\n global logger, matcher_name\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n matcher_name = \"threatbus-\" + \"\".join(random.choice(letters) for i in range(10))\n proc = await vast.matcher().start(name=matcher_name).exec()\n # returncode is None as long as the process did not terminate yet\n while proc.returncode is None:\n data = await proc.stdout.readline()\n if not data:\n if not await vast.test_connection():\n logger.error(\"Lost connection to VAST, cannot live-match\")\n # TODO reconnect\n continue\n vast_sighting = data.decode(\"utf-8\").rstrip()\n sighting = matcher_result_to_sighting(vast_sighting)\n if not sighting:\n logger.error(f\"Cannot parse sighting-output from VAST: {vast_sighting}\")\n continue\n g_live_matcher_sightings.inc()\n await sightings_queue.put(sighting)\n stderr = await proc.stderr.read()\n if stderr:\n logger.error(\n \"VAST matcher process exited with message: {}\".format(stderr.decode())\n )\n logger.critical(\"Unexpected exit of VAST matcher process.\")",
"def iast_binary_svp(\n isotherms,\n mole_fractions,\n pressures,\n branch=\"ads\",\n warningoff=False,\n adsorbed_mole_fraction_guess=None,\n verbose=False,\n ax=None\n):\n\n # Parameter checks\n if len(isotherms) != 2 or len(mole_fractions) != 2:\n raise ParameterError(\n \"The selectivity calculation can only take two components as parameters.\"\n )\n if sum(mole_fractions) != 1:\n raise ParameterError(\"Mole fractions do not add up to unity\")\n if any(iso.pressure_mode.startswith(\"relative\") for iso in isotherms):\n raise ParameterError(\"IAST only runs with isotherms on an absolute pressure basis.\")\n\n # Convert to numpy arrays just in case\n pressures = numpy.asarray(pressures)\n mole_fractions = numpy.asarray(mole_fractions)\n\n # Generate the array of partial pressures\n component_loadings = numpy.zeros((len(pressures), 2))\n\n for index, pressure in enumerate(pressures):\n component_loadings[index, :] = iast_point_fraction(\n isotherms,\n mole_fractions,\n pressure,\n branch=branch,\n warningoff=warningoff,\n adsorbed_mole_fraction_guess=adsorbed_mole_fraction_guess\n )\n\n selectivities = [(x[0] / mole_fractions[0]) / (x[1] / mole_fractions[1])\n for x in component_loadings]\n\n if verbose:\n plot_iast_svp(\n pressures,\n selectivities,\n isotherms[0].adsorbate,\n isotherms[1].adsorbate,\n mole_fractions[0],\n isotherms[0].pressure_unit,\n ax=ax\n )\n\n return dict(\n pressure=pressures,\n selectivity=selectivities,\n )",
"async def start(\n vast_binary: str,\n vast_endpoint: str,\n zmq_endpoint: str,\n snapshot: int,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n max_open_files: int,\n metrics_interval: int,\n metrics_filename: str,\n transform_cmd: str = None,\n sink: str = None,\n):\n global logger, async_tasks, p2p_topic, max_open_tasks, metrics\n # needs to be created inside the same eventloop where it is used\n max_open_tasks = asyncio.Semaphore(max_open_files)\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n assert await vast.test_connection() is True, \"Cannot connect to VAST\"\n\n logger.debug(f\"Calling Threat Bus management endpoint {zmq_endpoint}\")\n reply = subscribe(zmq_endpoint, \"stix2/indicator\", snapshot)\n if not reply_is_success(reply):\n logger.error(\"Subscription failed\")\n return\n pub_endpoint = reply.get(\"pub_endpoint\", None)\n sub_endpoint = reply.get(\"sub_endpoint\", None)\n topic = reply.get(\"topic\", None)\n if not pub_endpoint or not sub_endpoint or not topic:\n logger.error(\"Subscription failed\")\n return\n logger.info(f\"Subscription successful. New p2p_topic: {topic}\")\n if p2p_topic:\n # The 'start' function is called as result of a restart\n # Unsubscribe the old topic as soon as we get a working connection\n logger.info(\"Cleaning up old p2p_topic subscription ...\")\n unsubscribe(zmq_endpoint, p2p_topic)\n atexit.unregister(unsubscribe)\n p2p_topic = topic\n atexit.register(unsubscribe, zmq_endpoint, topic)\n\n async_tasks.append(\n asyncio.create_task(heartbeat(zmq_endpoint, p2p_topic, interval=5))\n )\n\n indicator_queue = asyncio.Queue()\n sightings_queue = asyncio.Queue()\n async_tasks.append(\n asyncio.create_task(\n report_sightings(sub_endpoint, sightings_queue, transform_cmd, sink)\n )\n )\n\n async_tasks.append(\n asyncio.create_task(receive(pub_endpoint, p2p_topic, indicator_queue))\n )\n\n async_tasks.append(\n asyncio.create_task(\n match_intel(\n vast_binary,\n vast_endpoint,\n indicator_queue,\n sightings_queue,\n live_match,\n retro_match,\n retro_match_max_events,\n retro_match_timeout,\n )\n )\n )\n\n if retro_match:\n # add metrics for retro-matching to the metric output\n metrics += [\n s_retro_matches_per_ioc,\n s_retro_query_time_s_per_ioc,\n g_retro_match_backlog,\n ]\n if live_match:\n # add metrics for live-matching to the metric output\n metrics.append(g_live_matcher_sightings)\n async_tasks.append(\n asyncio.create_task(\n live_match_vast(vast_binary, vast_endpoint, sightings_queue)\n )\n )\n\n if metrics_interval:\n async_tasks.append(\n asyncio.create_task(write_metrics(metrics_interval, metrics_filename))\n )\n\n loop = asyncio.get_event_loop()\n for s in [signal.SIGHUP, signal.SIGTERM, signal.SIGINT]:\n loop.add_signal_handler(s, lambda: asyncio.create_task(stop_signal()))\n return await asyncio.gather(*async_tasks)",
"def vsi_path(path): # -> Any | str:\n ...",
"def init_VI():\n\n\tprint 'Setting VI'\n\tvi = UsbVehicleInterface(payload_format=\"json\")\n\n\treturn vi",
"def show_sver(ibs, aid1, aid2, chipmatch_FILT=None, aid2_svtup=None, **kwargs):\n print('\\n[show_sver] ====================== [show_sver]')\n #print(utool.func_str(show_sv, kwargs=locals()))\n if chipmatch_FILT is None or aid2_svtup is None:\n chipmatch_FILT, aid2_svtup = _compute_svvars(ibs, aid1)\n sv_vartup = _get_sv_vartup_for_plottool(ibs, aid1, aid2, chipmatch_FILT, aid2_svtup)\n (chip1, chip2, kpts1, kpts2, fm, homog_tup, aff_tup) = sv_vartup\n if WRITE_SV_DEBUG:\n keys = ('chip1', 'chip2', 'kpts1', 'kpts2', 'fm', 'homog_tup', 'aff_tup')\n utool.save_testdata(*keys)\n print('[vizsv] write test info')\n utool.qflag()\n draw_sv.show_sv(chip1, chip2, kpts1, kpts2, fm, homog_tup=homog_tup, aff_tup=aff_tup, **kwargs)",
"def build_ibb_graph_from( ea_source, sourcenode, reachgraph ):\r\n\tflowgraph = create_flowgraph_from( 0x4423D0 )\r\n\tadd_disasm_lines_to_flowgraph( flowgraph )\r\n\tflowgraph.write_VCG_File(\"C:\\\\test.vcg\")",
"def convert_xml_using_saxon(source_file, template_file):\n if not os.path.isabs(template_file):\n template_file = CFG_BIBCONVERT_XSL_PATH + os.sep + template_file\n source_directory = os.path.dirname(source_file)\n command = \"cd %s && saxon9he-xslt -s:%s -xsl:%s -dtd:off\" % \\\n (source_directory, source_file, template_file)\n exit_code, stdout_buffer, stderr_buffer = run_shell_command(cmd=command)\n if exit_code or stdout_buffer or stderr_buffer:\n # Error may have happened\n raise APSHarvesterConversionError(\"%s: %s\\nOut:%s\" %\n (exit_code,\n stderr_buffer,\n stdout_buffer))",
"def open_interfaces(ipadd):\n global the_vx_ifc #pylint: disable=W0603,C0103\n print('opening VXI-11 at %s ...'%ipadd, end=' ')\n the_vx_ifc = vxi11.Instrument(ipadd)\n print('done')",
"def visit_binary(spec):",
"def sox_convert(input_path: str, output_path: str, check=True,\n verbose_level=0):\n os.system('sox -V{} {} {}'.format(verbose_level, input_path, output_path))\n if check:\n if not os.path.isfile(output_path):\n raise RuntimeError('Not able to convert file', input_path,\n output_path)",
"def cast(*args):\n return _itkEdgePotentialImageFilterPython.itkEdgePotentialImageFilterICVF22IF2_cast(*args)",
"def relay_casttag(c, x, tag):\n assert tag.is_constant(int)\n rtag = get_union_ctr(tag.value, x.abstract.options.get(tag.value))\n v = relay.Var(\"v\")\n clause = adt.Clause(adt.PatternConstructor(rtag, [adt.PatternVar(v)]), v)\n return adt.Match(c.ref(x), [clause], complete=False)",
"def cast(*args):\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIF2IF2_cast(*args)",
"def cast_to(ibuilder, data_amounts, src_buf, dst_buf):\n src_dtype = src_buf.dtype\n dst_dtype = dst_buf.dtype\n if src_dtype == \"float16\" and dst_dtype == \"int32\":\n vconv_instr = \"vconv_f162s32f\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float32\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_f322f16\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float32\" and dst_dtype == \"int32\":\n vconv_instr = \"vconv_f322s32f\"\n vconv_compute_num = VEC_NUMS_HALF\n # vconv_s322f32 only support cloud_v100\n elif src_dtype == \"int32\" and dst_dtype == \"float32\":\n vconv_instr = \"vconv_s322f32\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"int8\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_s82f16\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"uint8\" and dst_dtype == \"float16\":\n vconv_instr = \"vconv_u82f16\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"float16\" and dst_dtype == \"float32\":\n vconv_instr = \"vconv_f162f32\"\n vconv_compute_num = VEC_NUMS_HALF\n elif src_dtype == \"float16\" and dst_dtype == \"int8\":\n vconv_instr = \"vconv_f162s8f\"\n vconv_compute_num = VEC_NUMS\n elif src_dtype == \"float16\" and dst_dtype == \"uint8\":\n vconv_instr = \"vconv_f162u8f\"\n vconv_compute_num = VEC_NUMS\n\n def compute_stride(src_type, dst_type, vconv_num):\n \"\"\"\n Calculated stride value\n \"\"\"\n perblock_nums_a = compute_perblock_nums(src_type)\n perblock_nums_b = compute_perblock_nums(dst_type)\n src_stride = vconv_num // perblock_nums_a\n dst_stride = vconv_num // perblock_nums_b\n\n return src_stride, dst_stride\n\n src_strides, dst_strides = compute_stride(src_dtype, dst_dtype, vconv_compute_num)\n\n # recheck vconv_instr support\n if not tbe_platform.cce_conf.intrinsic_check_support(\"Intrinsic_vconv\", \\\n vconv_instr.split('_')[1]):\n raise RuntimeError(\"This product don't support Intrinsic_vconv \" + \\\n vconv_instr)\n\n repeats = int(data_amounts // vconv_compute_num)\n remain = int(data_amounts % vconv_compute_num)\n init_times = int(repeats // UINT8_MAX)\n init_remain = int(repeats % UINT8_MAX)\n with ibuilder.if_scope(repeats != 0):\n if init_times != 0:\n with ibuilder.for_range(0, init_times) as rch:\n with ibuilder.new_scope():\n reset_mask_insn(\n ibuilder, dst_buf.dtype, bits=vconv_compute_num)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=rch * UINT8_MAX\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=rch * UINT8_MAX\n * vconv_compute_num), \\\n 255, 1, 1, dst_strides, src_strides))\n if init_remain != 0:\n with ibuilder.new_scope():\n reset_mask_insn(ibuilder, dst_buf.dtype, bits=vconv_compute_num)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=init_times * UINT8_MAX\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=init_times * UINT8_MAX\n * vconv_compute_num), \\\n init_remain, 1, 1, dst_strides, src_strides))\n\n with ibuilder.if_scope(remain != 0):\n with ibuilder.new_scope():\n mask_len = remain\n reset_mask_insn(ibuilder, dst_buf.dtype, bits=mask_len)\n ibuilder.emit(tvm.call_extern(dst_buf.dtype, vconv_instr, \\\n dst_buf.access_ptr('w', offset=repeats\n * vconv_compute_num), \\\n src_buf.access_ptr('r', offset=repeats\n * vconv_compute_num), \\\n 1, 1, 1, 0, 0))",
"def ssi(parser, token):\n bits = token.split_contents()\n parsed = False\n if len(bits) not in (2, 3):\n raise TemplateSyntaxError(\"'ssi' tag takes one argument: the path to\"\n \" the file to be included\")\n if len(bits) == 3:\n if bits[2] == 'parsed':\n parsed = True\n else:\n raise TemplateSyntaxError(\"Second (optional) argument to %s tag\"\n \" must be 'parsed'\" % bits[0])\n filepath = parser.compile_filter(bits[1])\n return SsiNode(filepath, parsed, legacy_filepath=False)",
"def itkBinaryContourImageFilterISS2ISS2_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterISS2ISS2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterISS2ISS2_cast(obj)",
"def itkVectorExpandImageFilterIVF22IVF22_cast(obj: 'itkLightObject') -> \"itkVectorExpandImageFilterIVF22IVF22 *\":\n return _itkVectorExpandImageFilterPython.itkVectorExpandImageFilterIVF22IVF22_cast(obj)",
"def cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterIF2IF2_cast(*args)",
"def itkBinaryContourImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIF2IF2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF2IF2_cast(obj)",
"def readSVFile(self, frame, symbol = '?'):\n time_ini = self.configs[self.TIME_INI_KEY] \n t = time_ini + frame\n binary_path = corrTIFPath(self.configs[self.BINATY_PATH_KEY], symbol, t)\n\n return readSuperVoxelFromFile(binary_path)",
"def cast(*args):\n return _itkEdgePotentialImageFilterPython.itkEdgePotentialImageFilterICVF22ID2_cast(*args)",
"def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks",
"def parse_voting(\n aragon_voting, abi_storage: CachedStorage,\n vote_number: int\n) -> List[Union[Call, str]]:\n script_code = str(aragon_voting.getVote(vote_number)[-1])\n return decode_evm_script(script_code, abi_storage)",
"def cast(*args):\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIUC2IUC2_cast(*args)",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterISS2ISS2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterISS2ISS2_cast(obj)",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIF2IF2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF2IF2_cast(obj)"
] | [
"0.6255441",
"0.6119161",
"0.5682917",
"0.5192349",
"0.5060265",
"0.493398",
"0.48183864",
"0.4803378",
"0.47764373",
"0.46372145",
"0.46292838",
"0.46243283",
"0.45121062",
"0.4508024",
"0.45072028",
"0.45029947",
"0.44965547",
"0.4473348",
"0.44625607",
"0.4462195",
"0.4459072",
"0.4445787",
"0.44406956",
"0.44335225",
"0.43766493",
"0.4371787",
"0.43625647",
"0.43596268",
"0.4346733",
"0.43251938"
] | 0.81019396 | 0 |
Converts the given STIX2 Indicator to a VASTcompatible IoC and removes it from the VAST matcher. vast_binary The vast binary command to use with PyVAST | async def remove_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):
global logger, matcher_name
type_and_value = get_vast_type_and_value(indicator.pattern)
if not type_and_value:
logger.debug(f"Cannot remove IoC from VAST. Is it a point IoC? {indicator}")
return None
(vast_type, ioc_value) = type_and_value
vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)
# TODO pass matcher_name once VAST supports more fine-grained deletion
await vast.matcher().intel().remove(ioc_value, vast_type).exec()
logger.debug(f"Removed indicator from VAST live matching: {indicator}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def ingest_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):\n global logger\n vast_ioc = indicator_to_vast_matcher_ioc(indicator)\n if not vast_ioc:\n logger.error(\n f\"Unable to convert STIX-2 Indicator to VAST compatible IoC. Is it a point IoC? {indicator}\"\n )\n return\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n proc = await vast.import_(type=\"intel.indicator\").json().exec(stdin=vast_ioc)\n await proc.wait()\n logger.debug(f\"Ingested indicator for VAST live matching: {indicator}\")",
"async def match_intel(\n vast_binary: str,\n vast_endpoint: str,\n indicator_queue: asyncio.Queue,\n sightings_queue: asyncio.Queue,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n):\n global logger, open_tasks\n while True:\n msg = await indicator_queue.get()\n try:\n indicator = parse(msg, allow_custom=True)\n except Exception as e:\n logger.warning(f\"Failed to decode STIX-2 Indicator item {msg}: {e}\")\n continue\n if type(indicator) is not Indicator:\n logger.warning(\n f\"Ignoring unknown message type, expected STIX-2 Indicator: {type(indicator)}\"\n )\n continue\n if (\n ThreatBusSTIX2Constants.X_THREATBUS_UPDATE.value\n in indicator.object_properties()\n and indicator.x_threatbus_update == Operation.REMOVE.value\n ):\n g_iocs_removed.inc()\n if live_match:\n asyncio.create_task(\n remove_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n else:\n # add new Indicator to matcher / query Indicator retrospectively\n g_iocs_added.inc()\n if retro_match:\n g_retro_match_backlog.inc()\n asyncio.create_task(\n retro_match_vast(\n vast_binary,\n vast_endpoint,\n retro_match_max_events,\n retro_match_timeout,\n indicator,\n sightings_queue,\n )\n )\n if live_match:\n asyncio.create_task(\n ingest_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n indicator_queue.task_done()",
"async def retro_match_vast(\n vast_binary: str,\n vast_endpoint: str,\n retro_match_max_events: int,\n retro_match_timeout: float,\n indicator: Indicator,\n sightings_queue: asyncio.Queue,\n):\n start = time.time()\n query = indicator_to_vast_query(indicator)\n if not query:\n g_retro_match_backlog.dec()\n return\n global logger, max_open_tasks\n async with max_open_tasks:\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n kwargs = {}\n if retro_match_max_events > 0:\n kwargs[\"max_events\"] = retro_match_max_events\n proc = await vast.export(**kwargs).json(query).exec()\n retro_result = None\n try:\n retro_result = await asyncio.wait_for(\n proc.communicate(),\n timeout=retro_match_timeout if retro_match_timeout > 0 else None,\n )\n except asyncio.TimeoutError:\n proc.terminate()\n logger.error(\n f\"Timeout after {retro_match_timeout}s in retro-query for indicator {indicator}\"\n )\n if not retro_result or len(retro_result) != 2:\n g_retro_match_backlog.dec()\n return\n reported = 0\n stdout = retro_result[0]\n for line in stdout.decode().split(\"\\n\"):\n line = line.rstrip()\n if line:\n sighting = query_result_to_sighting(line, indicator)\n if not sighting:\n logger.error(f\"Could not parse VAST query result: {line}\")\n continue\n reported += 1\n await sightings_queue.put(sighting)\n logger.debug(f\"Retro-matched {reported} sighting(s) for indicator: {indicator}\")\n s_retro_matches_per_ioc.observe(reported)\n s_retro_query_time_s_per_ioc.observe(time.time() - start)\n g_retro_match_backlog.dec()",
"async def live_match_vast(\n vast_binary: str, vast_endpoint: str, sightings_queue: asyncio.Queue\n):\n global logger, matcher_name\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n matcher_name = \"threatbus-\" + \"\".join(random.choice(letters) for i in range(10))\n proc = await vast.matcher().start(name=matcher_name).exec()\n # returncode is None as long as the process did not terminate yet\n while proc.returncode is None:\n data = await proc.stdout.readline()\n if not data:\n if not await vast.test_connection():\n logger.error(\"Lost connection to VAST, cannot live-match\")\n # TODO reconnect\n continue\n vast_sighting = data.decode(\"utf-8\").rstrip()\n sighting = matcher_result_to_sighting(vast_sighting)\n if not sighting:\n logger.error(f\"Cannot parse sighting-output from VAST: {vast_sighting}\")\n continue\n g_live_matcher_sightings.inc()\n await sightings_queue.put(sighting)\n stderr = await proc.stderr.read()\n if stderr:\n logger.error(\n \"VAST matcher process exited with message: {}\".format(stderr.decode())\n )\n logger.critical(\"Unexpected exit of VAST matcher process.\")",
"def rm_find_instr(bv: BinaryView, addr: int):\n\n # Remove instruction highlight\n clear_highlight(bv, addr)\n\n # Remove the instruction to the list associated with the current view\n bv.session_data.mui_find.remove(addr)",
"def dvi_to_svg(dvi_file, regen_if_exists=False):\n result = dvi_file.replace(\".dvi\", \".svg\")\n if not os.path.exists(result):\n commands = [\n \"dvisvgm\",\n dvi_file,\n \"-n\",\n \"-v\",\n \"0\",\n \"-o\",\n result,\n \">\",\n get_null()\n ]\n os.system(\" \".join(commands))\n return result",
"def iast_binary_svp(\n isotherms,\n mole_fractions,\n pressures,\n branch=\"ads\",\n warningoff=False,\n adsorbed_mole_fraction_guess=None,\n verbose=False,\n ax=None\n):\n\n # Parameter checks\n if len(isotherms) != 2 or len(mole_fractions) != 2:\n raise ParameterError(\n \"The selectivity calculation can only take two components as parameters.\"\n )\n if sum(mole_fractions) != 1:\n raise ParameterError(\"Mole fractions do not add up to unity\")\n if any(iso.pressure_mode.startswith(\"relative\") for iso in isotherms):\n raise ParameterError(\"IAST only runs with isotherms on an absolute pressure basis.\")\n\n # Convert to numpy arrays just in case\n pressures = numpy.asarray(pressures)\n mole_fractions = numpy.asarray(mole_fractions)\n\n # Generate the array of partial pressures\n component_loadings = numpy.zeros((len(pressures), 2))\n\n for index, pressure in enumerate(pressures):\n component_loadings[index, :] = iast_point_fraction(\n isotherms,\n mole_fractions,\n pressure,\n branch=branch,\n warningoff=warningoff,\n adsorbed_mole_fraction_guess=adsorbed_mole_fraction_guess\n )\n\n selectivities = [(x[0] / mole_fractions[0]) / (x[1] / mole_fractions[1])\n for x in component_loadings]\n\n if verbose:\n plot_iast_svp(\n pressures,\n selectivities,\n isotherms[0].adsorbate,\n isotherms[1].adsorbate,\n mole_fractions[0],\n isotherms[0].pressure_unit,\n ax=ax\n )\n\n return dict(\n pressure=pressures,\n selectivity=selectivities,\n )",
"def itkBinaryContourImageFilterISS2ISS2_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterISS2ISS2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterISS2ISS2_cast(obj)",
"def vsi_path(path): # -> Any | str:\n ...",
"def itkBinaryContourImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIF2IF2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF2IF2_cast(obj)",
"async def start(\n vast_binary: str,\n vast_endpoint: str,\n zmq_endpoint: str,\n snapshot: int,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n max_open_files: int,\n metrics_interval: int,\n metrics_filename: str,\n transform_cmd: str = None,\n sink: str = None,\n):\n global logger, async_tasks, p2p_topic, max_open_tasks, metrics\n # needs to be created inside the same eventloop where it is used\n max_open_tasks = asyncio.Semaphore(max_open_files)\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n assert await vast.test_connection() is True, \"Cannot connect to VAST\"\n\n logger.debug(f\"Calling Threat Bus management endpoint {zmq_endpoint}\")\n reply = subscribe(zmq_endpoint, \"stix2/indicator\", snapshot)\n if not reply_is_success(reply):\n logger.error(\"Subscription failed\")\n return\n pub_endpoint = reply.get(\"pub_endpoint\", None)\n sub_endpoint = reply.get(\"sub_endpoint\", None)\n topic = reply.get(\"topic\", None)\n if not pub_endpoint or not sub_endpoint or not topic:\n logger.error(\"Subscription failed\")\n return\n logger.info(f\"Subscription successful. New p2p_topic: {topic}\")\n if p2p_topic:\n # The 'start' function is called as result of a restart\n # Unsubscribe the old topic as soon as we get a working connection\n logger.info(\"Cleaning up old p2p_topic subscription ...\")\n unsubscribe(zmq_endpoint, p2p_topic)\n atexit.unregister(unsubscribe)\n p2p_topic = topic\n atexit.register(unsubscribe, zmq_endpoint, topic)\n\n async_tasks.append(\n asyncio.create_task(heartbeat(zmq_endpoint, p2p_topic, interval=5))\n )\n\n indicator_queue = asyncio.Queue()\n sightings_queue = asyncio.Queue()\n async_tasks.append(\n asyncio.create_task(\n report_sightings(sub_endpoint, sightings_queue, transform_cmd, sink)\n )\n )\n\n async_tasks.append(\n asyncio.create_task(receive(pub_endpoint, p2p_topic, indicator_queue))\n )\n\n async_tasks.append(\n asyncio.create_task(\n match_intel(\n vast_binary,\n vast_endpoint,\n indicator_queue,\n sightings_queue,\n live_match,\n retro_match,\n retro_match_max_events,\n retro_match_timeout,\n )\n )\n )\n\n if retro_match:\n # add metrics for retro-matching to the metric output\n metrics += [\n s_retro_matches_per_ioc,\n s_retro_query_time_s_per_ioc,\n g_retro_match_backlog,\n ]\n if live_match:\n # add metrics for live-matching to the metric output\n metrics.append(g_live_matcher_sightings)\n async_tasks.append(\n asyncio.create_task(\n live_match_vast(vast_binary, vast_endpoint, sightings_queue)\n )\n )\n\n if metrics_interval:\n async_tasks.append(\n asyncio.create_task(write_metrics(metrics_interval, metrics_filename))\n )\n\n loop = asyncio.get_event_loop()\n for s in [signal.SIGHUP, signal.SIGTERM, signal.SIGINT]:\n loop.add_signal_handler(s, lambda: asyncio.create_task(stop_signal()))\n return await asyncio.gather(*async_tasks)",
"def remove_instr_resp(ac, station, startev): \n \n if station == 'FUR':\n ac.detrend(type='linear')\n ac.taper(max_percentage=0.05) \n\n paz_sts2_vel = {'poles': [(-0.0367429 + 0.036754j),\n (-0.0367429 - 0.036754j)],\n 'sensitivity': 0.944019640, \n 'zeros': [0j,0j], \n 'gain': 1.0}\n \n ac.simulate(paz_remove=paz_sts2_vel, remove_sensitivity=True) \n\n else:\n print 'Incorrect station call'\n\n return ac",
"def build_ibb_graph_from( ea_source, sourcenode, reachgraph ):\r\n\tflowgraph = create_flowgraph_from( 0x4423D0 )\r\n\tadd_disasm_lines_to_flowgraph( flowgraph )\r\n\tflowgraph.write_VCG_File(\"C:\\\\test.vcg\")",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIF2IF2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF2IF2_cast(obj)",
"def show_sver(ibs, aid1, aid2, chipmatch_FILT=None, aid2_svtup=None, **kwargs):\n print('\\n[show_sver] ====================== [show_sver]')\n #print(utool.func_str(show_sv, kwargs=locals()))\n if chipmatch_FILT is None or aid2_svtup is None:\n chipmatch_FILT, aid2_svtup = _compute_svvars(ibs, aid1)\n sv_vartup = _get_sv_vartup_for_plottool(ibs, aid1, aid2, chipmatch_FILT, aid2_svtup)\n (chip1, chip2, kpts1, kpts2, fm, homog_tup, aff_tup) = sv_vartup\n if WRITE_SV_DEBUG:\n keys = ('chip1', 'chip2', 'kpts1', 'kpts2', 'fm', 'homog_tup', 'aff_tup')\n utool.save_testdata(*keys)\n print('[vizsv] write test info')\n utool.qflag()\n draw_sv.show_sv(chip1, chip2, kpts1, kpts2, fm, homog_tup=homog_tup, aff_tup=aff_tup, **kwargs)",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterISS2ISS2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterISS2ISS2_cast(obj)",
"def unplug_vifs(self, instance, network_info):\n raise NotImplementedError()",
"def itkVectorExpandImageFilterIVF22IVF22_cast(obj: 'itkLightObject') -> \"itkVectorExpandImageFilterIVF22IVF22 *\":\n return _itkVectorExpandImageFilterPython.itkVectorExpandImageFilterIVF22IVF22_cast(obj)",
"def remove_oov(analogy_path, _log, encoding='utf-8', lower=True):\n vocab = read_vocab()\n _log.info('Processing analogies from %s', analogy_path)\n with open(analogy_path, encoding=encoding) as f:\n for line in f:\n if line.startswith(':'):\n # Found a section title\n print(line.rstrip())\n else:\n should_print = True\n\n for ws in line.split():\n # Handle synonyms separated by slash (/)\n for w in ws.split('/'):\n if lower:\n w = w.lower()\n if w not in vocab:\n should_print = False\n break\n if not should_print:\n break\n\n if should_print:\n print(line.rstrip())",
"def remove_sriov_adapter(self, network_obj):\n\n task = self.remove_network_adapter(network_obj)\n return task",
"def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks",
"def rm_avoid_instr(bv: BinaryView, addr: int):\n\n # Remove instruction highlight\n clear_highlight(bv, addr)\n\n # Remove the instruction to the list associated with the current view\n bv.session_data.mui_avoid.remove(addr)",
"def relay_casttag(c, x, tag):\n assert tag.is_constant(int)\n rtag = get_union_ctr(tag.value, x.abstract.options.get(tag.value))\n v = relay.Var(\"v\")\n clause = adt.Clause(adt.PatternConstructor(rtag, [adt.PatternVar(v)]), v)\n return adt.Match(c.ref(x), [clause], complete=False)",
"def cast(obj: 'itkLightObject') -> \"itkVectorExpandImageFilterIVF22IVF22 *\":\n return _itkVectorExpandImageFilterPython.itkVectorExpandImageFilterIVF22IVF22_cast(obj)",
"def parse_voting(\n aragon_voting, abi_storage: CachedStorage,\n vote_number: int\n) -> List[Union[Call, str]]:\n script_code = str(aragon_voting.getVote(vote_number)[-1])\n return decode_evm_script(script_code, abi_storage)",
"def itkBinaryContourImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIUC2IUC2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIUC2IUC2_cast(obj)",
"def remove_vizant(tree):\n for childpath in [\".//target[@name='graph.init']\", \".//target[@name='graph.all']\", \".//target[@name='graph.sabbus']\"]:\n child = tree.find(childpath)\n parent = tree.find(\"%s/..\" % childpath)\n parent.remove(child)",
"def cast(obj: 'itkLightObject') -> \"itkBinaryContourImageFilterIUC2IUC2 *\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIUC2IUC2_cast(obj)",
"def itkSubtractImageFilterIF2IF2IF2_cast(*args):\n return _itkSubtractImageFilterPython.itkSubtractImageFilterIF2IF2IF2_cast(*args)",
"def cast(*args):\n return _itkEdgePotentialImageFilterPython.itkEdgePotentialImageFilterICVF22IF2_cast(*args)"
] | [
"0.6753386",
"0.56779706",
"0.5470638",
"0.48409158",
"0.44988817",
"0.44799104",
"0.44298604",
"0.43801382",
"0.4329384",
"0.43279293",
"0.4323277",
"0.42903358",
"0.42787653",
"0.42719406",
"0.42589614",
"0.42512602",
"0.4224931",
"0.42167395",
"0.41993895",
"0.41924408",
"0.4180672",
"0.4119734",
"0.41055134",
"0.41037855",
"0.41033873",
"0.40936762",
"0.40856126",
"0.40832436",
"0.40814906",
"0.40676564"
] | 0.7576261 | 0 |
Reads from the indicator_queue and matches all IoCs, either via VAST's livematching or retromatching. vast_binary The vast binary command to use with PyVAST | async def match_intel(
vast_binary: str,
vast_endpoint: str,
indicator_queue: asyncio.Queue,
sightings_queue: asyncio.Queue,
live_match: bool,
retro_match: bool,
retro_match_max_events: int,
retro_match_timeout: float,
):
global logger, open_tasks
while True:
msg = await indicator_queue.get()
try:
indicator = parse(msg, allow_custom=True)
except Exception as e:
logger.warning(f"Failed to decode STIX-2 Indicator item {msg}: {e}")
continue
if type(indicator) is not Indicator:
logger.warning(
f"Ignoring unknown message type, expected STIX-2 Indicator: {type(indicator)}"
)
continue
if (
ThreatBusSTIX2Constants.X_THREATBUS_UPDATE.value
in indicator.object_properties()
and indicator.x_threatbus_update == Operation.REMOVE.value
):
g_iocs_removed.inc()
if live_match:
asyncio.create_task(
remove_vast_ioc(vast_binary, vast_endpoint, indicator)
)
else:
# add new Indicator to matcher / query Indicator retrospectively
g_iocs_added.inc()
if retro_match:
g_retro_match_backlog.inc()
asyncio.create_task(
retro_match_vast(
vast_binary,
vast_endpoint,
retro_match_max_events,
retro_match_timeout,
indicator,
sightings_queue,
)
)
if live_match:
asyncio.create_task(
ingest_vast_ioc(vast_binary, vast_endpoint, indicator)
)
indicator_queue.task_done() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def retro_match_vast(\n vast_binary: str,\n vast_endpoint: str,\n retro_match_max_events: int,\n retro_match_timeout: float,\n indicator: Indicator,\n sightings_queue: asyncio.Queue,\n):\n start = time.time()\n query = indicator_to_vast_query(indicator)\n if not query:\n g_retro_match_backlog.dec()\n return\n global logger, max_open_tasks\n async with max_open_tasks:\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n kwargs = {}\n if retro_match_max_events > 0:\n kwargs[\"max_events\"] = retro_match_max_events\n proc = await vast.export(**kwargs).json(query).exec()\n retro_result = None\n try:\n retro_result = await asyncio.wait_for(\n proc.communicate(),\n timeout=retro_match_timeout if retro_match_timeout > 0 else None,\n )\n except asyncio.TimeoutError:\n proc.terminate()\n logger.error(\n f\"Timeout after {retro_match_timeout}s in retro-query for indicator {indicator}\"\n )\n if not retro_result or len(retro_result) != 2:\n g_retro_match_backlog.dec()\n return\n reported = 0\n stdout = retro_result[0]\n for line in stdout.decode().split(\"\\n\"):\n line = line.rstrip()\n if line:\n sighting = query_result_to_sighting(line, indicator)\n if not sighting:\n logger.error(f\"Could not parse VAST query result: {line}\")\n continue\n reported += 1\n await sightings_queue.put(sighting)\n logger.debug(f\"Retro-matched {reported} sighting(s) for indicator: {indicator}\")\n s_retro_matches_per_ioc.observe(reported)\n s_retro_query_time_s_per_ioc.observe(time.time() - start)\n g_retro_match_backlog.dec()",
"async def live_match_vast(\n vast_binary: str, vast_endpoint: str, sightings_queue: asyncio.Queue\n):\n global logger, matcher_name\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n matcher_name = \"threatbus-\" + \"\".join(random.choice(letters) for i in range(10))\n proc = await vast.matcher().start(name=matcher_name).exec()\n # returncode is None as long as the process did not terminate yet\n while proc.returncode is None:\n data = await proc.stdout.readline()\n if not data:\n if not await vast.test_connection():\n logger.error(\"Lost connection to VAST, cannot live-match\")\n # TODO reconnect\n continue\n vast_sighting = data.decode(\"utf-8\").rstrip()\n sighting = matcher_result_to_sighting(vast_sighting)\n if not sighting:\n logger.error(f\"Cannot parse sighting-output from VAST: {vast_sighting}\")\n continue\n g_live_matcher_sightings.inc()\n await sightings_queue.put(sighting)\n stderr = await proc.stderr.read()\n if stderr:\n logger.error(\n \"VAST matcher process exited with message: {}\".format(stderr.decode())\n )\n logger.critical(\"Unexpected exit of VAST matcher process.\")",
"async def ingest_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):\n global logger\n vast_ioc = indicator_to_vast_matcher_ioc(indicator)\n if not vast_ioc:\n logger.error(\n f\"Unable to convert STIX-2 Indicator to VAST compatible IoC. Is it a point IoC? {indicator}\"\n )\n return\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n proc = await vast.import_(type=\"intel.indicator\").json().exec(stdin=vast_ioc)\n await proc.wait()\n logger.debug(f\"Ingested indicator for VAST live matching: {indicator}\")",
"async def remove_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):\n global logger, matcher_name\n type_and_value = get_vast_type_and_value(indicator.pattern)\n if not type_and_value:\n logger.debug(f\"Cannot remove IoC from VAST. Is it a point IoC? {indicator}\")\n return None\n (vast_type, ioc_value) = type_and_value\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n # TODO pass matcher_name once VAST supports more fine-grained deletion\n await vast.matcher().intel().remove(ioc_value, vast_type).exec()\n logger.debug(f\"Removed indicator from VAST live matching: {indicator}\")",
"async def start(\n vast_binary: str,\n vast_endpoint: str,\n zmq_endpoint: str,\n snapshot: int,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n max_open_files: int,\n metrics_interval: int,\n metrics_filename: str,\n transform_cmd: str = None,\n sink: str = None,\n):\n global logger, async_tasks, p2p_topic, max_open_tasks, metrics\n # needs to be created inside the same eventloop where it is used\n max_open_tasks = asyncio.Semaphore(max_open_files)\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n assert await vast.test_connection() is True, \"Cannot connect to VAST\"\n\n logger.debug(f\"Calling Threat Bus management endpoint {zmq_endpoint}\")\n reply = subscribe(zmq_endpoint, \"stix2/indicator\", snapshot)\n if not reply_is_success(reply):\n logger.error(\"Subscription failed\")\n return\n pub_endpoint = reply.get(\"pub_endpoint\", None)\n sub_endpoint = reply.get(\"sub_endpoint\", None)\n topic = reply.get(\"topic\", None)\n if not pub_endpoint or not sub_endpoint or not topic:\n logger.error(\"Subscription failed\")\n return\n logger.info(f\"Subscription successful. New p2p_topic: {topic}\")\n if p2p_topic:\n # The 'start' function is called as result of a restart\n # Unsubscribe the old topic as soon as we get a working connection\n logger.info(\"Cleaning up old p2p_topic subscription ...\")\n unsubscribe(zmq_endpoint, p2p_topic)\n atexit.unregister(unsubscribe)\n p2p_topic = topic\n atexit.register(unsubscribe, zmq_endpoint, topic)\n\n async_tasks.append(\n asyncio.create_task(heartbeat(zmq_endpoint, p2p_topic, interval=5))\n )\n\n indicator_queue = asyncio.Queue()\n sightings_queue = asyncio.Queue()\n async_tasks.append(\n asyncio.create_task(\n report_sightings(sub_endpoint, sightings_queue, transform_cmd, sink)\n )\n )\n\n async_tasks.append(\n asyncio.create_task(receive(pub_endpoint, p2p_topic, indicator_queue))\n )\n\n async_tasks.append(\n asyncio.create_task(\n match_intel(\n vast_binary,\n vast_endpoint,\n indicator_queue,\n sightings_queue,\n live_match,\n retro_match,\n retro_match_max_events,\n retro_match_timeout,\n )\n )\n )\n\n if retro_match:\n # add metrics for retro-matching to the metric output\n metrics += [\n s_retro_matches_per_ioc,\n s_retro_query_time_s_per_ioc,\n g_retro_match_backlog,\n ]\n if live_match:\n # add metrics for live-matching to the metric output\n metrics.append(g_live_matcher_sightings)\n async_tasks.append(\n asyncio.create_task(\n live_match_vast(vast_binary, vast_endpoint, sightings_queue)\n )\n )\n\n if metrics_interval:\n async_tasks.append(\n asyncio.create_task(write_metrics(metrics_interval, metrics_filename))\n )\n\n loop = asyncio.get_event_loop()\n for s in [signal.SIGHUP, signal.SIGTERM, signal.SIGINT]:\n loop.add_signal_handler(s, lambda: asyncio.create_task(stop_signal()))\n return await asyncio.gather(*async_tasks)",
"def test_find_vlans_conflicting_paths(self):\n push_vlan2 = Instructions()\n push_vlan2.goto_table = 1\n push_vlan2.apply_actions.append(\"PUSH_VLAN\", 0x8100)\n push_vlan2.apply_actions.append(\"SET_FIELD\", (\"VLAN_VID\", 2))\n push_vlan1 = Instructions()\n push_vlan1.goto_table = 1\n push_vlan1.apply_actions.append(\"PUSH_VLAN\", 0x8100)\n push_vlan1.apply_actions.append(\"SET_FIELD\", (\"VLAN_VID\", 1))\n trunk10 = Instructions()\n trunk10.write_actions.append(\"OUTPUT\", 11)\n access11 = Instructions()\n access11.write_actions.append(\"POP_VLAN\", None)\n access11.write_actions.append(\"OUTPUT\", 11)\n # Note: Set VLAN applies the present bit mask so must included it\n ruleset_a = [\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 1, None), ('VLAN_VID', 0x1001, None)]),\n instructions=goto1),\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 2, None)]),\n instructions=push_vlan2),\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 3, None)]),\n instructions=push_vlan1),\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 4, None), ('VLAN_VID', 0x1002, None)]),\n instructions=goto1),\n Rule(priority=0, table=0),\n Rule(priority=10, table=1, match=Match([('TCP_SRC', 80, None)])),\n Rule(priority=0, table=1, instructions=goto2),\n Rule(priority=10, table=2, match=Match([('VLAN_VID', 0x1001, None)]),\n instructions=Instructions(dup=trunk10)),\n Rule(priority=10, table=2, match=Match([('VLAN_VID', 0x1002, None)]),\n instructions=Instructions(dup=access11)),\n Rule(priority=0, table=2),\n ]\n ruleset_b = [\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 1, None), ('VLAN_VID', 0x1001, None)]),\n instructions=goto1),\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 2, None)]),\n instructions=push_vlan2),\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 3, None)]),\n instructions=push_vlan1),\n Rule(priority=10, table=0,\n match=Match([('IN_PORT', 4, None), ('VLAN_VID', 0x1002, None)]),\n instructions=goto1),\n Rule(priority=0, table=0),\n Rule(priority=10, table=1, match=Match([('VLAN_VID', 0x1001, None)]),\n instructions=Instructions(dup=access11)),\n Rule(priority=10, table=1, match=Match([('VLAN_VID', 0x1002, None)]),\n instructions=Instructions(dup=trunk10)),\n Rule(priority=0, table=1)]\n ruleset_c = [\n Rule(priority=0, table=0)\n ]\n single_a = to_single_table(ruleset_a)\n single_b = to_single_table(ruleset_b)\n single_c = to_single_table(ruleset_c)\n norm_a = normalise(single_a)\n norm_b = normalise(single_b)\n norm_c = normalise(single_c)\n\n # Make sure the frozensets are made after to_single_table which changes\n # priorities which changes the Rule's hash in the frozenset\n result_ab = {\n (ruleset_a[0], ruleset_a[5]):\n frozenset([(ruleset_b[0], ruleset_b[5])]),\n (ruleset_a[0], ruleset_a[6], ruleset_a[7]):\n frozenset([(ruleset_b[0], ruleset_b[5])]),\n (ruleset_a[1], ruleset_a[5]):\n frozenset([(ruleset_b[1], ruleset_b[6])]),\n (ruleset_a[1], ruleset_a[6], ruleset_a[8]):\n frozenset([(ruleset_b[1], ruleset_b[6])]),\n (ruleset_a[2], ruleset_a[5]):\n frozenset([(ruleset_b[2], ruleset_b[5])]),\n (ruleset_a[2], ruleset_a[6], ruleset_a[7]):\n frozenset([(ruleset_b[2], ruleset_b[5])]),\n (ruleset_a[3], ruleset_a[5]):\n frozenset([(ruleset_b[3], ruleset_b[6])]),\n (ruleset_a[3], ruleset_a[6], ruleset_a[8]):\n frozenset([(ruleset_b[3], ruleset_b[6])]),\n }\n result_ba = {\n (ruleset_b[0], ruleset_b[5]):\n frozenset([(ruleset_a[0], ruleset_a[5]),\n (ruleset_a[0], ruleset_a[6], ruleset_a[7])]),\n (ruleset_b[1], ruleset_b[6]):\n frozenset([(ruleset_a[1], ruleset_a[5]),\n (ruleset_a[1], ruleset_a[6], ruleset_a[8])]),\n (ruleset_b[2], ruleset_b[5]):\n frozenset([(ruleset_a[2], ruleset_a[5]),\n (ruleset_a[2], ruleset_a[6], ruleset_a[7])]),\n (ruleset_b[3], ruleset_b[6]):\n frozenset([(ruleset_a[3], ruleset_a[5]),\n (ruleset_a[3], ruleset_a[6], ruleset_a[8])]),\n }\n result_ca = {\n (ruleset_c[0],):\n frozenset([(ruleset_a[0], ruleset_a[6], ruleset_a[7]),\n (ruleset_a[1], ruleset_a[6], ruleset_a[8]),\n (ruleset_a[2], ruleset_a[6], ruleset_a[7]),\n (ruleset_a[3], ruleset_a[6], ruleset_a[8])])\n }\n equal_ab, diff_ab = check_equal(norm_a, norm_b, diff=True)\n equal_ca, diff_ca = check_equal(norm_c, norm_a, diff=True)\n self.assertFalse(equal_ab)\n self.assertFalse(equal_ca)\n\n paths_ab = find_conflicting_paths(diff_ab, single_a, single_b)\n paths_ba = find_conflicting_paths(diff_ab, single_b, single_a)\n paths_ca = find_conflicting_paths(diff_ca, single_c, single_a)\n self.assertEqual(paths_ab, result_ab)\n self.assertNotEqual(paths_ab, result_ba) # Sanity check\n self.assertEqual(paths_ba, result_ba)\n self.assertEqual(paths_ca, result_ca)",
"def find_instr(bv: BinaryView, addr: int):\n\n # Highlight the instruction in green\n highlight_instr(bv, addr, HighlightStandardColor.GreenHighlightColor)\n\n # Add the instruction to the list associated with the current view\n bv.session_data.mui_find.add(addr)",
"def visit_binary(spec):",
"def docking_vina(self, ligand_file, docking_pdbqt_file, docking_log_file):\n\n run_line = '%s' % self.docking_program\n run_line += ' --config %s' % self.dock_config_file\n run_line += ' --ligand %s' % ligand_file\n run_line += ' --out %s' % docking_pdbqt_file\n if self.output_save:\n run_line += ' --log %s' % (docking_log_file)\n e = None\n try:\n result = subprocess.check_output(run_line.split(),\n stderr=subprocess.STDOUT,\n timeout=self.timeout_dock,\n universal_newlines=True)\n except Exception as e:\n return [99.999], e\n\n result_lines = result.split('\\n')\n\n check_result = False\n affinity_list = list()\n for result_line in result_lines:\n if result_line.startswith('-----+'):\n check_result = True\n continue\n if not check_result:\n continue\n if result_line.startswith('Writing output'):\n break\n if result_line.startswith('Refine time'):\n break\n lis = result_line.strip().split()\n if not lis[0].isdigit():\n break\n# mode = int(lis[0])\n affinity = float(lis[1])\n affinity_list += [affinity]\n if len(affinity_list) == 0:\n e = 'WARNING: Could not find any conformations.'\n return [99.999], e\n return affinity_list, e",
"def _read_rs(self, process, append):\n print('read_rs thread started')\n for line in iter(process.stdout.readline, \"\"):\n if 'value 1' in line.decode('utf-8'):\n self.vol_up()\n if 'value -1' in line.decode('utf-8'):\n self.vol_down()\n print('read_rs thread stopped')",
"def scan(self):\n try:\n assert self.text_to_match\n except AssertionError:\n print('Please introduce an operation to calculate.')\n exit(1)\n\n for line in self.text_to_match: # taking each line from the code to match\n i = 0\n while i < len(line): # looping till the end of the string is reached\n string_segment = self.try_match(i, line)\n\n try:\n assert string_segment.token\n i += string_segment.end_string # set the new index to take the unmatched string\n self.list_tokens.append(string_segment) # append the found goods\n except AssertionError:\n print(f'SyntaxError: Unmatched Syntax -{line[i]}- at line: '\n f'\\n{line}')\n exit(1)",
"def check_script(vouts):\n for vout in [v for v in vouts[::-1] if v['hex'].startswith('6a')]:\n verb = BlockchainSpider.decode_op_return(vout['hex'])\n action = Spoolverb.from_verb(verb).action\n if action in Spoolverb.supported_actions:\n return verb\n raise Exception(\"Invalid ascribe transaction\")",
"async def infernal_search(sequence, job_id):\n sequence = sequence.replace('T', 'U').upper()\n\n params = {\n 'query': os.path.join(INFERNAL_QUERY_DIR, '%s' % job_id),\n 'output': os.path.join(INFERNAL_RESULTS_DIR, '%s' % job_id),\n 'tblout': os.path.join(INFERNAL_RESULTS_DIR, '%s.tblout' % job_id),\n 'rfam_cm': settings.RFAM_CM,\n 'cmscan': settings.CMSCAN_EXECUTABLE,\n 'cpu': 4,\n }\n\n # write out query in fasta format\n with open(params['query'], 'w') as f:\n f.write('>query\\n')\n f.write(sequence)\n f.write('\\n')\n\n command = ('{cmscan} '\n '--notextw ' # unlimit ASCII text output line width\n '--cut_ga ' # use CM's GA gathering cutoffs as reporting thresholds\n '--rfam ' # set heuristic filters at Rfam-level (fast)\n '--nohmmonly ' # never run HMM-only mode, not even for models with 0 basepairs\n '-o {output} ' # direct output to file\n '--tblout {tblout} ' # save parseable table of hits to file\n '--acc ' # prefer accessions over names in output\n '--cpu {cpu} ' # number of CPUs to use\n '{rfam_cm} ' # Rfam.cm file\n '{query} ' # query file\n ).format(**params)\n\n process = await asyncio.subprocess.create_subprocess_exec(\n *shlex.split(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n\n return process, params['output']",
"def _read_pb(self, process, append):\n print('read_pb thread started')\n for line in iter(process.stdout.readline, \"\"):\n if 'value 1' in line.decode('utf-8'):\n self.vol_mute()\n print('read_pb thread stopped')",
"def wait_for_solution(m,lit_to_clauses):\n i = 2\n for line in sys.stdin:\n if (line.startswith(\"v \")): \n handle_solution_line(line,i)\n i += 1",
"def run_parse(dump_dir):\n global EV, SQL\n\n def gv(port, key):\n \"\"\" get value or none \"\"\"\n if not port:\n return None\n else:\n return None if not key in port else str(port[key])\n \n def find_cables(port1, port2):\n \"\"\" Find any cable in db that match guid/port pairs \n Warning: this may find crossed cables into existing ports\n Warning: will not find any removed cables\n port1: ib_diagnostics formatted port\n port2: ib_diagnostics formatted port\n \"\"\"\n\n vlog(5, 'find_cable %s/P%s %s/P%s' % (gv(port1,'guid'), gv(port1,'port'), gv(port2,'guid'), gv(port2,'port')))\n if not port1 and port2:\n port1 = port2\n port2 = None\n\n if port2 and int(port1['guid'], 16) > int(port2['guid'], 16):\n #always order the ports by largest gid as port 2\n #order doesnt matter as long as it is stable\n port1, port2 = port2, port1\n\n if not port1:\n vlog(1, 'Error: attempt to find cable no ports? %s %s' % (port1, port2))\n return None\n\n #Attempt to find the any matching cable by the ports\n SQL.execute('''\n SELECT \n cables.cid as cid,\n max(cp1.hca, cp2.hca) as has_hca,\n cables.SN as SN,\n cables.PN as PN,\n cables.state as state\n\n FROM \n cables\n \n INNER JOIN\n cable_ports as cp1\n ON\n cables.cid = cp1.cid and\n cp1.guid = ? and\n cp1.port = ?\n\n LEFT OUTER JOIN\n cable_ports as cp2\n ON\n ? IS NOT NULL and\n cables.cid = cp2.cid and\n cp2.guid = ? and\n cp2.port = ? \n\n WHERE\n cables.state != 'removed'\n\n GROUP BY cables.cid\n ORDER BY cables.ctime DESC\n ''',(\n convert_guid_intstr(port1['guid']),\n int(port1['port']),\n '1' if port2 else None,\n convert_guid_intstr(port2['guid']) if port2 else None, \n int(port2['port']) if port2 else None,\n ))\n\n cables = []\n for row in SQL.fetchall():\n cid = int(row['cid'])\n if not cid is None and cid > 0:\n cables.append({ \n 'cid': row['cid'],\n 'has_hca': True if row['has_hca'] == 1 else False,\n 'SN': row['SN'],\n 'PN': row['PN']\n })\n else:\n vlog(4, 'Unexpected invalid cable c%s: %s' % (cid, row))\n\n return cables\n\n def insert_cable(port1, port2, timestamp):\n \"\"\" insert new cable into db \"\"\"\n\n plabel = None\n port_plabel = { 'port1': None, 'port2': None }\n\n SQL.execute('''\n INSERT INTO \n cables \n (\n ctime,\n length,\n SN,\n PN,\n state,\n suspected,\n flabel,\n plabel\n ) VALUES (\n ?, ?, ?, ?, ?, ?, ?, ?\n );''', (\n timestamp,\n gv(port1,'LengthDesc'),\n gv(port1,'SN'),\n gv(port1,'PN'), \n 'watch', #watching all cables by default\n 0, #new cables havent been suspected yet\n '%s <--> %s' % (ib_diagnostics.port_pretty(port1), ib_diagnostics.port_pretty(port2)),\n plabel\n ));\n cid = SQL.lastrowid\n\n #insert the ports\n for key,port in list({'port1': port1, 'port2': port2}.items()):\n if port:\n SQL.execute('''\n INSERT INTO cable_ports (\n cid,\n guid,\n name,\n port,\n hca,\n plabel,\n flabel\n ) VALUES (\n ?, ?, ?, ?, ?, ?, ?\n );\n ''', (\n cid,\n convert_guid_intstr(port['guid']),\n port['name'],\n int(port['port']),\n port['type'] == \"CA\",\n port_plabel[key],\n ib_diagnostics.port_pretty(port), \n ))\n cpid = SQL.lastrowid\n\n vlog(5, 'create cable(%s) %s <--> %s' % (cid, ib_diagnostics.port_pretty(port1),ib_diagnostics.port_pretty(port2)))\n return cid\n\n\n \n #ports from ib_diagnostics should not leave this function\n ports = []\n #dict to hold the issues found\n issues = []\n #timestamp to apply to the cables and issues \n timestamp = time.time()\n\n with open('%s/%s' % (dump_dir,'timestamp.txt') , 'r') as fds:\n for line in fds:\n try:\n timestamp = int(line.strip())\n except:\n pass\n\n vlog(5, 'parse dir timestamp: %s = %s' % (timestamp, datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')))\n\n with open('%s/%s' % (dump_dir,'ibnetdiscover.log') , 'r') as fds:\n ib_diagnostics.parse_ibnetdiscover_cables(ports, fds.read()) \n\n with open('%s/ibdiagnet2.db_csv' % (dump_dir), 'r') as fds:\n ib_diagnostics.parse_ibdiagnet_csv(ports, issues, fds)\n\n with open('%s/%s' % (dump_dir,'ibdiagnet2.log') , 'r') as fds:\n ib_diagnostics.parse_ibdiagnet(ports, issues, fds.read()) \n\n if os.path.isfile('%s/%s' % (dump_dir,'ibdiagnet2.cables')):\n with open('%s/%s' % (dump_dir,'ibdiagnet2.cables'), 'r') as fds:\n ib_diagnostics.parse_ibdiagnet_cables(ports, issues, fds.read()) \n\n p_ibcv2 = '%s/%s' % (dump_dir,'sgi-ibcv2.log') #optional\n if os.path.isfile(p_ibcv2):\n with open(p_ibcv2, 'r') as fds:\n ib_diagnostics.parse_sgi_ibcv2(ports, issues, fds.read()) \n pass\n\n ibsp = cluster_info.get_ib_speed()\n ib_diagnostics.find_underperforming_cables ( ports, issues, ibsp['speed'], ibsp['width'])\n\n #add every known cable to database\n #slow but keeps sane list of all cables forever for issue tracking\n known_cables=[] #track every that is found\n hca_cables=[] #track every that has an hca\n all_replaced_cables=set() #every cable replaced so far\n for port in ports:\n if 'cable_id' in port and port['cable_id']:\n #ignore duplicate found cables\n continue \n\n port1 = port\n port2 = port['connection']\n\n cid = None\n hca_found = None\n replaced_cables=set()\n cables = find_cables(port1, port2)\n #If the current existing cable has a SN/PN, then favor the cable that matches\n if gv(port1, 'SN') and gv(port1, 'PN'):\n #find the newest matching cable\n for cable in cables:\n if not cable['cid'] in all_replaced_cables:\n if cable['SN'] == gv(port1, 'SN') and \\\n cable['PN'] == gv(port1, 'PN') and \\\n (cid is None or cable['cid'] > cid) :\n cid = cable['cid']\n hca_found = cable['has_hca']\n vlog(5, 'Found matching cable c%s' % (cid))\n else:\n vlog(5, 'Non-matching cable c%s rejected SN=%s PN=%s' % (cable['cid'],cable['SN'],cable['PN']))\n\n #mark all other cables as replaced\n if cid:\n for cable in cables:\n if not cable['cid'] in all_replaced_cables and cable['cid'] != cid:\n #found old cable w/ different SN/PN\n replaced_cables.add(cable['cid'])\n vlog(5, 'Found replaced cable c%s' % (cable['cid']))\n else: \n #if the detected live cable lacks a SN/PN, favor the cable that has one\n #ignore any replacements since current cable is unplugged or half-dead\n #always favor newer cables\n cid_nosn = None\n cid_nosn_hca_found = None\n for cable in cables:\n if not cable['cid'] in all_replaced_cables:\n if cable['SN'] and cable['PN'] and (cid is None or cable['cid'] > cid):\n cid = cable['cid']\n hca_found = cable['has_hca']\n vlog(5, 'Found matching cable c%s with serial: %s product: %s' % (cid,cable['SN'],cable['PN']))\n else:\n #record newest cable found if no SN/PN are found\n if cid_nosn is None or cable['cid'] > cid_nosn:\n cid_nosn = cable['cid']\n cid_nosn_hca_found = cable['has_hca']\n vlog(5, 'Found matching cable c%s without serial' % (cid_nosn))\n\n if cid is None and cid_nosn:\n #unable to find a cable with a SN/PN, just take first cable found\n cid = cid_nosn \n hca_found = cid_nosn_hca_found \n vlog(5, 'Found matching cable c%s without serial' % (cid))\n \n if cid is None: #create the cable\n vlog(5, 'Unable to find matching cable. Creating new cable')\n cid = insert_cable(port1, port2, timestamp)\n hca_found = port1['type'] == \"CA\" or (port2 and port2['type'] == \"CA\") \n\n #mark all the replaced cables (hope its not more than one...)\n if replaced_cables:\n for rcid in replaced_cables:\n if not rcid in all_replaced_cables:\n all_replaced_cables.add(rcid)\n mark_replaced_cable(\n rcid, \n cid, \n 'Detected new cable c%s in same physical location' % (cid)\n )\n\n #record cid in each port to avoid relookup\n port1['cable_id'] = cid\n if port2:\n port2['cable_id'] = cid\n\n known_cables.append(cid)\n if hca_found:\n hca_cables.append(cid)\n\n #Find any cables that are known but not parsed this time around (aka went dark)\n #ignore any cables in a disabled state\n missing_cables = set()\n disabled_cables = set()\n removed_cables = set()\n SQL.execute('''\n SELECT \n cid,\n state\n FROM \n cables\n ''')\n for row in SQL.fetchall():\n if row['state'] == 'disabled':\n disabled_cables.add(int(row['cid'])) \n if row['state'] == 'removed':\n removed_cables.add(int(row['cid'])) \n \n if row['cid'] in known_cables:\n #cable found, mark it online and when\n SQL.execute('''\n UPDATE\n cables \n SET\n online = 1,\n onlineTime = ?\n WHERE\n cid = ?\n ;''', (\n int(timestamp),\n row['cid']\n ));\n \n else: #cable not found\n if not int(row['cid']) in disabled_cables:\n #only note cables if they should not be missing\n missing_cables.add(int(row['cid']))\n\n #Mark cable offline\n SQL.execute('''\n UPDATE\n cables \n SET\n online = 0\n WHERE\n cid = ?\n ;''', (\n row['cid'],\n ));\n\n for cid in missing_cables:\n #Verify missing cables actually matter: ignore single port cables (aka unconnected)\n #ignore missing cables connected to HCAs, nodes go up and down all the time\n SQL.execute('''\n SELECT \n cables.cid,\n cp1.cpid as cp1_cpid,\n cp2.cpid as cp2_cpid\n FROM \n cables\n\n INNER JOIN\n cable_ports as cp1\n ON\n cables.cid = ? and\n cables.cid = cp1.cid and\n cp1.hca != 0\n\n LEFT OUTER JOIN\n cable_ports as cp2\n ON\n cables.cid = cp2.cid and\n cp1.cpid != cp2.cpid and\n cp2.hca != 0\n \n LIMIT 1 \n ''', (\n cid,\n ))\n for row in SQL.fetchall():\n if row['cp1_cpid'] and row['cp2_cpid']:\n #cable went dark\n add_issue(\n 'missing',\n cid,\n 'Cable went missing',\n None,\n 'ibnetdiscover -p',\n timestamp\n ) \n else:\n vlog(3, 'Ignoring missing single port Cable %s' % (cid))\n\n fabric_disabled = set()\n ticket_issues = []\n for issue in issues:\n cid = None\n\n #set cable from port which was just resolved\n for iport in issue['ports']:\n if iport and 'cable_id' in iport:\n cid = iport['cable_id']\n\n if cid and issue['type'] == 'disabled':\n fabric_disabled.add(cid)\n \n if issue['type'] == 'missing' and cid in hca_cables:\n vlog(3, 'ignoring missing cable c%s with an hca' % (cid))\n continue\n\n if issue['type'] in ['missing','disabled'] and cid in removed_cables:\n vlog(3, 'ignoring missing removed cable c%s' % (cid))\n continue\n\n if issue['type'] in ['missing','disabled'] and cid in disabled_cables:\n vlog(3, 'ignoring missing disabled or removed cable c%s' % (cid))\n continue\n \n vlog(5, 'issue detected: %s' % ([\n issue['type'],\n 'c%s' % cid if cid else None,\n issue['issue'],\n issue['raw'],\n issue['source'],\n timestamp\n ]))\n\n if cid:\n #hand over cleaned up info for issues\n add_issue(\n issue['type'],\n cid,\n issue['issue'],\n issue['raw'],\n dump_dir,\n timestamp\n )\n else:\n #issues without a known cable will be aggregated into a single ticket\n ticket_issues.append(issue['raw'])\n\n #detect cables that should be disabled but are not any more\n #print 'fabric disabled: %s' % (fabric_disabled)\n #print 'expected disabled: %s' % (disabled_cables)\n for cid in disabled_cables:\n if not cid in fabric_disabled:\n vlog(3, 'Cable that should be disabled found to be enabled c%s' % (cid))\n add_issue(\n 'enabled',\n cid,\n 'Atleast one port in cable detected as enabled',\n 'csv state of cable',\n dump_dir,\n timestamp\n ) \n #enable_cable(cid, 'Cable detected as enabled')\n\n SQL.execute('VACUUM;')\n\n #create ticket if are non cable issues\n if ticket_issues and not DISABLE_TICKETS:\n tid = EV.create( \n 'ssgev',\n 'ssg',\n None,\n '%s: Infiniband Issues' % (cluster_info.get_cluster_name_formal()), \n '''\n %s issues have been detected against the Infinband fabric for %s.\n\n Raw Data: %s\n ''' % (\n len(ticket_issues),\n cluster_info.get_cluster_name_formal(),\n dump_dir\n ), { \n 'HELP_LOCATION': EV.get_field_value_to_field_key('HELP_LOCATION', 'NWSC'),\n 'HELP_HOSTNAME': EV.get_field_value_to_field_key(\n 'HELP_HOSTNAME', \n cluster_info.get_cluster_name_formal()\n ),\n 'HELP_HOSTNAME_OTHER': 'Infiniband Fabric'\n }) \n\n vlog(3, 'Created Ticket %s against fabric issues' % (tid))\n\n #combine the issues and try to not crash EV\n i = 0\n buf = ''\n for msg in ticket_issues:\n buf += msg + \"\\n\"\n i += 1\n\n if i == 200: #magic guessed number that EV can take\n EV.add_resolver_comment(tid, buf)\n buf = ''\n i = 0\n if buf != '':\n EV.add_resolver_comment(tid, buf)",
"def process(opcode):\n opcode.process()",
"def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks",
"def test_vasp_immigrant(immigrant_with_builder):\n immigrant, inputs = immigrant_with_builder\n\n # We need to set the parser explicitly\n inputs.metadata['options']['parser_name'] = 'vasp.vasp'\n result, node = run.get_node(immigrant, **inputs)\n assert node.exit_status == 0\n\n expected_output_nodes = {'misc', 'remote_folder', 'retrieved'}\n assert expected_output_nodes.issubset(set(result))",
"def analyze_dynamic(self, test_input):\n # Execute binary with qemu user mode while taking care of libraries\n # collect dynamic translation block execution information\n\n # 1. create a named pipe for qemu to write to\n self.mkfifo()\n\n # 2. build command and launch QEMU\n # cmd = self.build_qemu_cmd(test_input)\n cmdfile = open('command_file', 'r')\n run_cmd = cmdfile.readline()\n\n if '|' in run_cmd:\n cmd = run_cmd.format(test_input, self._fifo_name)\n else:\n cmd = run_cmd.format(self._fifo_name, test_input)\n\n print cmd\n process = Process(target=run_command_noret, args=[cmd, 120])\n process.start()\n\n # 3. read from fifo after QEMU finished executing\n try:\n self.parse_trace()\n except Exception as e:\n traceback.print_exc()\n print 'error when parsing qemu trace'\n print getattr(e, 'message', repr(e))\n raise e\n finally:\n os.remove(self._fifo_name)\n os.rmdir(self._tmpdir)",
"def scan_binary(self, filepath):\n with open(filepath, \"rb\") as input_file:\n return self.__make_api_call('scan/binary', files={'input_file': input_file}, method='POST')",
"def issue_binary_command (self, command_id, ch=None, BCAST=0, ALLCH=0, ADDM=0, RW=0, ACT=0, DEXT=0, value_int=0, addr_id_num=0x0000, n_lines_requested=2**31, target_errors=None, output_regex='(.*)', special_timeout = None):\r\n\t\t\r\n\t\t\r\n\t\tdef get_val(i):\r\n\t\t\t\"\"\"Function to convert uint16 to bytearray([uint8,uint8])\"\"\"\r\n\t\t\treturn bytearray([int(i/256),int(i)-int(i/256)*256])\r\n\t\t\r\n\t\tdef parity_odd(x):\r\n\t\t\t\"\"\"Function to compute whether a byte's parity is odd.\"\"\"\r\n\t\t\tx = x ^ (x >> 4)\r\n\t\t\tx = x ^ (x >> 2)\r\n\t\t\tx = x ^ (x >> 1)\r\n\t\t\treturn x & 1\r\n\t\t\r\n\t\t\r\n\t\t# Format header byte\r\n\t\theader_byte = 0x80\r\n\t\theader_byte += BCAST*0x40\r\n\t\theader_byte += ALLCH*0x20\r\n\t\theader_byte += ADDM*0x10\r\n\t\theader_byte += RW*0x08\r\n\t\theader_byte += ACT*0x04\r\n\t\theader_byte += DEXT*0x02\r\n\t\theader_byte += parity_odd(header_byte)\r\n\t\t\r\n\t\t\r\n\t\t# Format command byte\r\n\t\tif isinstance(command_id, str):\r\n\t\t\tcommand_byte = CMD_CODES[command_id.upper()]\r\n\t\telif isinstance(command_id, int):\r\n\t\t\tcommand_byte = command_id\r\n\t\t\r\n\t\t\r\n\t\t# Format channel address\r\n\t\taddress_bytes = bytearray()\r\n\t\tif ch is None:\r\n\t\t\tch = 0\r\n\t\tif ADDM == 1:\r\n\t\t\taddress_bytes.extend(get_val(addr_id_num))\r\n\t\t\taddress_bytes.append(ch)\r\n\t\telif ADDM == 0:\r\n\t\t\taddress_bytes.append(0)\r\n\t\t\taddress_bytes.extend(get_val(ch))\r\n\t\t\r\n\t\t\r\n\t\t# Format value bytes\r\n\t\t# value_int can be either an int or a list of ints (for vectorised input, DEXT = 1)\r\n\t\tdata_bytes = bytearray()\r\n\t\t\r\n\t\tif DEXT == 1:\r\n\t\t\t# Handle data extension length\r\n\t\t\tif isinstance(value_int, list):\r\n\t\t\t\tn_dext_words = len(value_int)\r\n\t\t\telse:\r\n\t\t\t\tn_dext_words = 1\r\n\t\t\tif n_dext_words > 0xFFFF:\r\n\t\t\t\tn_dext_words = 0xFFFF\r\n\t\t\tdata_bytes.extend(get_val(n_dext_words))\r\n\t\t\r\n\t\tif isinstance(value_int, int):\r\n\t\t\tdata_bytes.extend(get_val(value_int))\r\n\t\t\r\n\t\telif isinstance(value_int, list) and all([isinstance(e ,int) for e in value_int]):\r\n\t\t\tfor i,e in enumerate(value_int):\r\n\t\t\t\tdata_bytes.extend(get_val(e))\r\n\t\t\t\tif i == n_dext_words:\r\n\t\t\t\t\tbreak\r\n\t\t\r\n\t\telse:\r\n\t\t\traise AttributeError(\"value_int must be of type int, or of type list with all elements of type int (received type {:})\".format(type(value_int) ) )\r\n\t\t\r\n\t\t\r\n\t\t# Compose command byte string\r\n\t\ttx_str = bytearray()\r\n\t\ttx_str.append(header_byte)\t\t\t\t# Header byte\r\n\t\ttx_str.append(command_byte)\t\t\t\t# Command byte\r\n\t\ttx_str.extend(address_bytes)\t\t\t# Three bytes of channel address\r\n\t\ttx_str.extend(data_bytes)\t\t\t\t# 2 (DEXT=0) or 2*N+1 (DEXT=1) bytes of data\r\n\t\t\r\n\t\t# Transmit it\r\n\t\tself.transmit(tx_str, binary_mode = True)\r\n\t\t\r\n\t\t\r\n\t\t# Function to retry this command (in case of comms error)\r\n\t\tdef retry_function():\r\n\t\t\treturn self.issue_binary_command (command_id, ch, BCAST, ALLCH, ADDM, RW, ACT, DEXT, value_int, addr_id_num, n_lines_requested, target_errors, output_regex, special_timeout)\r\n\t\t\r\n\t\t# Wait for response\r\n\t\tif RW==1 or ((RW==0 or ACT) and self.wait_for_responses):\r\n\t\t\ttry:\r\n\t\t\t\tresult = self._issue_command_receive_response (retry_function, n_lines_requested, target_errors, output_regex, special_timeout)\r\n\t\t\t\treturn result\r\n\t\t\texcept RuntimeError as e:\r\n\t\t\t\tif RW == 1:\r\n\t\t\t\t\t# If we want a return value, raise an error\r\n\t\t\t\t\traise RuntimeError (\"Failed to read with command '{0}'. {1}\".format(tx_str, e))\r\n\t\t\t\telse:\r\n\t\t\t\t\t# If we are setting something, just warn the user\r\n\t\t\t\t\tprint(\"Qontroller.issue_command: Warning: Failed to write with command '{0}'. {1}\".format(tx_str, e))\r\n\t\t\t\t\treturn None",
"def match(\n input_handles, barcodes_handle, mismatch, use_edit, path='.',\n filter_multiple=False, directional=False):\n filenames = list(map(lambda x: _name(x), input_handles))\n queue = Queue()\n default_handles = _open_files(path, filenames, 'UNKNOWN', queue)\n multiple_handles = _open_files(path, filenames, 'MULTIPLE', queue)\n\n indel_score = 1\n if not use_edit:\n indel_score = 1000\n\n barcodes = []\n for line in map(lambda x: x.strip().split(), barcodes_handle.readlines()):\n try:\n name = line.pop(0)\n except (IndexError, ValueError):\n raise ValueError('invalid barcodes file format')\n barcodes.append((_open_files(path, filenames, name, queue), line))\n\n file_format = guess_file_format(input_handles[0])\n readers = list(map(\n lambda x: SeqIO.parse(x, file_format), input_handles))\n\n while True:\n records = list(map(lambda x: next(x), readers))\n if not records:\n break\n\n reference = str(records[0].seq)\n if directional:\n reference_rc = reverse_complement(reference)\n\n found_handles = []\n for handles, barcode in barcodes:\n if multi_align(reference, barcode, mismatch, indel_score):\n found_handles.append(handles)\n elif directional and multi_align(\n reference_rc, barcode, mismatch, indel_score):\n found_handles.append(handles)\n\n if found_handles:\n if not filter_multiple or len(found_handles) == 1:\n for handles in found_handles:\n _write(handles, records, file_format)\n else:\n _write(multiple_handles, records, file_format)\n else:\n _write(default_handles, records, file_format)\n\n queue.flush()",
"def vi(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(Vi(), target=qubit) for qubit in QubitSet(target)]",
"def is_match(self, command_bytes):",
"def v(target: QubitSetInput) -> Iterable[Instruction]:\n return [Instruction(V(), target=qubit) for qubit in QubitSet(target)]",
"def Control(inst, itype, ctrl):\n\n #\n # opcode, funct3 and func7 are computed once here so that duplicate verilog\n # code isn't produced for every loop iteration below.\n #\n\n opcode = Opcode(inst)\n funct3 = Funct3(inst)\n funct7 = Funct7(inst)\n\n #\n # Atlas requires all wire signals to be fully initialized or it fails in the\n # emitter. An easy solution is to do a first assignment to a default value,\n # and later assignments will take precedence.\n #\n\n itype <<= 0\n\n ctrl.ex.alu_src <<= 0\n ctrl.ex.alu_op <<= 0\n ctrl.ex.lui <<= False\n ctrl.ex.auipc <<= False\n ctrl.ex.jalr <<= False\n ctrl.ex.funct3 <<= funct3\n ctrl.ex.funct7 <<= funct7\n\n ctrl.mem.branch <<= False\n ctrl.mem.branch_type <<= 0\n ctrl.mem.jal <<= False\n ctrl.mem.mem_write <<= False\n ctrl.mem.mem_read <<= False\n\n ctrl.wb.mem_to_reg <<= False\n ctrl.wb.write_reg <<= False\n\n #\n # This part here really shows the power / advantage of Atlas's meta-\n # programming abilities. Each instruction declared above is considered, and\n # a \"match\" signal is produced. Upon a match, the control signals for that\n # function are assigned to the output of the idecode stage.\n #\n\n for name in instructions:\n inst_spec = instructions[name]\n\n #\n # For some instructions, funct3 and funct7 don't need to be matched\n # (and so are marked as Python \"None\"). For these cases, funct3_match\n # and/or funct7_match are just set to 1 (always true).\n #\n\n opcode_match = opcode == inst_spec.pattern.opcode\n\n if inst_spec.pattern.funct3 is None:\n funct3_match = 1\n else:\n funct3_match = funct3 == inst_spec.pattern.funct3\n\n if inst_spec.pattern.funct7 is None:\n funct7_match = 1\n else:\n funct7_match = funct7 == inst_spec.pattern.funct7\n\n with opcode_match & funct3_match & funct7_match:\n SetControlSignals(inst_spec, itype, ctrl)",
"def parse_for_indicators(line):\n # Initialize Variables\n indicators_present = False\n\n # Checking each indicator type\n md5_list = re.findall(r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])', line)\n if md5_list:\n indicators_present = True\n for md5 in md5_list:\n log.info(\" (MD5 found!), Inspecting {} via OSINT\".format(md5))\n if inspect_indicator(md5, 'HASH'):\n log.info(\" RESULTS FOUND: {} [Adding to results]\".format(md5))\n else:\n log.info(\" Did not inspect {} or it has been inspected previously\".format(md5))\n else:\n log.info(\" No MD5 found in line: {} \".format(line.rstrip()))\n\n sha1_list = re.findall(r'(?i)(?<![a-z0-9])[a-f0-9]{40}(?![a-z0-9])', line)\n if sha1_list:\n indicators_present = True\n for sha1 in sha1_list:\n log.info(\" (SHA1 found!), Inspecting {} via OSINT\".format(sha1))\n if inspect_indicator(sha1, 'HASH'):\n log.info(\" RESULTS FOUND: {} [Adding to results]\".format(md5))\n else:\n log.info(\" Did not find results {} or it has been inspected previously\".format(sha1))\n else:\n log.info(\" No SHA1 found in line: {}\".format(line.rstrip()))\n\n ip_list = re.findall(r'\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b', line)\n if ip_list:\n indicators_present = True\n for ip in ip_list:\n log.info(\" (IP found!), Inspecting {} via OSINT\".format(ip))\n if inspect_indicator(ip, 'IP'):\n log.info(\" RESULTS FOUND: {} [Adding to results]\".format(ip))\n else:\n log.info(\" Did not inspect {} or it has been inspected previously\".format(ip))\n else:\n log.info(\" No IP found in line: {}\".format(line.rstrip()))\n\n url_list = re.findall(r'(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', line)\n if url_list:\n indicators_present = True\n log.info(\" (URLs found!): {}\".format(url_list))\n for url in url_list:\n log.info(\" (URL found!), Inspecting {} via OSINT\".format(url))\n if inspect_indicator(url, 'URL'):\n log.info(\" RESULTS FOUND: {} [Adding to results]\".format(url))\n else:\n log.info(\" Did not inspect {} or it has been inspected previously\".format(url))\n if HASHLEE_VERBOSE:\n if url in CHECKED_INDICATORS:\n print(\"[INDICATOR PREVIOUSLY CHECKED] {}\".format(url.replace(\".\", \"[.]\")))\n else:\n print(\"[NO THREAT REFENCE FOUND] {}\".format(url.replace(\".\", \"[.]\")))\n else:\n log.info(\" No URL found in line: {}\".format(line.rstrip()))\n\n # Return whether we were able to pull indicators or not\n if indicators_present == True:\n return True\n else:\n return False",
"def open_interfaces(ipadd):\n global the_vx_ifc #pylint: disable=W0603,C0103\n print('opening VXI-11 at %s ...'%ipadd, end=' ')\n the_vx_ifc = vxi11.Instrument(ipadd)\n print('done')",
"def test_with_run_command(self):\n self.build()\n self.runCmd(\"file \" + self.getBuildArtifact(\"a.out\"), CURRENT_EXECUTABLE_SET)\n\n lldbutil.run_break_set_by_file_and_line(\n self, \"main.cpp\", self.line, num_expected_locations=1, loc_exact=True)\n\n self.runCmd(\"run\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped',\n 'stop reason = breakpoint'])\n\n # This is the function to remove the custom formats in order to have a\n # clean slate for the next test case.\n def cleanup():\n pass\n\n # Execute the cleanup function during test case tear down.\n self.addTearDownHook(cleanup)\n\n pass # my code never fails\n\n v = self.frame().FindVariable(\"v\")\n v.SetPreferSyntheticValue(True)\n v.SetFormat(lldb.eFormatVectorOfFloat32)\n\n if self.TraceOn():\n print(v)\n\n self.assertTrue(\n v.GetNumChildren() == 4,\n \"v as float32[] has 4 children\")\n self.assertTrue(v.GetChildAtIndex(0).GetData().float[\n 0] == 1.25, \"child 0 == 1.25\")\n self.assertTrue(v.GetChildAtIndex(1).GetData().float[\n 0] == 1.25, \"child 1 == 1.25\")\n self.assertTrue(v.GetChildAtIndex(2).GetData().float[\n 0] == 2.50, \"child 2 == 2.50\")\n self.assertTrue(v.GetChildAtIndex(3).GetData().float[\n 0] == 2.50, \"child 3 == 2.50\")\n\n self.expect(\"expr -f int16_t[] -- v\",\n substrs=['(0, 16288, 0, 16288, 0, 16416, 0, 16416)'])\n self.expect(\"expr -f uint128_t[] -- v\",\n substrs=['(85236745249553456609335044694184296448)'])\n self.expect(\n \"expr -f float32[] -- v\",\n substrs=['(1.25, 1.25, 2.5, 2.5)'])\n\n oldValue = v.GetChildAtIndex(0).GetValue()\n v.SetFormat(lldb.eFormatHex)\n newValue = v.GetChildAtIndex(0).GetValue()\n self.assertFalse(oldValue == newValue,\n \"values did not change along with format\")\n\n v.SetFormat(lldb.eFormatVectorOfFloat32)\n oldValueAgain = v.GetChildAtIndex(0).GetValue()\n self.assertTrue(\n oldValue == oldValueAgain,\n \"same format but different values\")"
] | [
"0.6922588",
"0.6637018",
"0.6202399",
"0.5444081",
"0.51181835",
"0.4728371",
"0.453623",
"0.45278928",
"0.4521745",
"0.45122913",
"0.45079392",
"0.44740847",
"0.446948",
"0.44226426",
"0.44106132",
"0.43992066",
"0.43902928",
"0.43865323",
"0.436398",
"0.43589398",
"0.4312458",
"0.4299072",
"0.4288024",
"0.42858815",
"0.4285869",
"0.4251327",
"0.4232774",
"0.42245454",
"0.41992536",
"0.4191852"
] | 0.7095307 | 0 |
Starts a VAST matcher. Enqueues all matches from VAST to the sightings_queue. vast_binary The VAST binary command to use with PyVAST vast_endpoint The endpoint of a running VAST node sightings_queue The queue to put new sightings into retro_match Boolean flag to use retromatching over livematching | async def live_match_vast(
vast_binary: str, vast_endpoint: str, sightings_queue: asyncio.Queue
):
global logger, matcher_name
vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)
matcher_name = "threatbus-" + "".join(random.choice(letters) for i in range(10))
proc = await vast.matcher().start(name=matcher_name).exec()
# returncode is None as long as the process did not terminate yet
while proc.returncode is None:
data = await proc.stdout.readline()
if not data:
if not await vast.test_connection():
logger.error("Lost connection to VAST, cannot live-match")
# TODO reconnect
continue
vast_sighting = data.decode("utf-8").rstrip()
sighting = matcher_result_to_sighting(vast_sighting)
if not sighting:
logger.error(f"Cannot parse sighting-output from VAST: {vast_sighting}")
continue
g_live_matcher_sightings.inc()
await sightings_queue.put(sighting)
stderr = await proc.stderr.read()
if stderr:
logger.error(
"VAST matcher process exited with message: {}".format(stderr.decode())
)
logger.critical("Unexpected exit of VAST matcher process.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def retro_match_vast(\n vast_binary: str,\n vast_endpoint: str,\n retro_match_max_events: int,\n retro_match_timeout: float,\n indicator: Indicator,\n sightings_queue: asyncio.Queue,\n):\n start = time.time()\n query = indicator_to_vast_query(indicator)\n if not query:\n g_retro_match_backlog.dec()\n return\n global logger, max_open_tasks\n async with max_open_tasks:\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n kwargs = {}\n if retro_match_max_events > 0:\n kwargs[\"max_events\"] = retro_match_max_events\n proc = await vast.export(**kwargs).json(query).exec()\n retro_result = None\n try:\n retro_result = await asyncio.wait_for(\n proc.communicate(),\n timeout=retro_match_timeout if retro_match_timeout > 0 else None,\n )\n except asyncio.TimeoutError:\n proc.terminate()\n logger.error(\n f\"Timeout after {retro_match_timeout}s in retro-query for indicator {indicator}\"\n )\n if not retro_result or len(retro_result) != 2:\n g_retro_match_backlog.dec()\n return\n reported = 0\n stdout = retro_result[0]\n for line in stdout.decode().split(\"\\n\"):\n line = line.rstrip()\n if line:\n sighting = query_result_to_sighting(line, indicator)\n if not sighting:\n logger.error(f\"Could not parse VAST query result: {line}\")\n continue\n reported += 1\n await sightings_queue.put(sighting)\n logger.debug(f\"Retro-matched {reported} sighting(s) for indicator: {indicator}\")\n s_retro_matches_per_ioc.observe(reported)\n s_retro_query_time_s_per_ioc.observe(time.time() - start)\n g_retro_match_backlog.dec()",
"async def match_intel(\n vast_binary: str,\n vast_endpoint: str,\n indicator_queue: asyncio.Queue,\n sightings_queue: asyncio.Queue,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n):\n global logger, open_tasks\n while True:\n msg = await indicator_queue.get()\n try:\n indicator = parse(msg, allow_custom=True)\n except Exception as e:\n logger.warning(f\"Failed to decode STIX-2 Indicator item {msg}: {e}\")\n continue\n if type(indicator) is not Indicator:\n logger.warning(\n f\"Ignoring unknown message type, expected STIX-2 Indicator: {type(indicator)}\"\n )\n continue\n if (\n ThreatBusSTIX2Constants.X_THREATBUS_UPDATE.value\n in indicator.object_properties()\n and indicator.x_threatbus_update == Operation.REMOVE.value\n ):\n g_iocs_removed.inc()\n if live_match:\n asyncio.create_task(\n remove_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n else:\n # add new Indicator to matcher / query Indicator retrospectively\n g_iocs_added.inc()\n if retro_match:\n g_retro_match_backlog.inc()\n asyncio.create_task(\n retro_match_vast(\n vast_binary,\n vast_endpoint,\n retro_match_max_events,\n retro_match_timeout,\n indicator,\n sightings_queue,\n )\n )\n if live_match:\n asyncio.create_task(\n ingest_vast_ioc(vast_binary, vast_endpoint, indicator)\n )\n indicator_queue.task_done()",
"async def start(\n vast_binary: str,\n vast_endpoint: str,\n zmq_endpoint: str,\n snapshot: int,\n live_match: bool,\n retro_match: bool,\n retro_match_max_events: int,\n retro_match_timeout: float,\n max_open_files: int,\n metrics_interval: int,\n metrics_filename: str,\n transform_cmd: str = None,\n sink: str = None,\n):\n global logger, async_tasks, p2p_topic, max_open_tasks, metrics\n # needs to be created inside the same eventloop where it is used\n max_open_tasks = asyncio.Semaphore(max_open_files)\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n assert await vast.test_connection() is True, \"Cannot connect to VAST\"\n\n logger.debug(f\"Calling Threat Bus management endpoint {zmq_endpoint}\")\n reply = subscribe(zmq_endpoint, \"stix2/indicator\", snapshot)\n if not reply_is_success(reply):\n logger.error(\"Subscription failed\")\n return\n pub_endpoint = reply.get(\"pub_endpoint\", None)\n sub_endpoint = reply.get(\"sub_endpoint\", None)\n topic = reply.get(\"topic\", None)\n if not pub_endpoint or not sub_endpoint or not topic:\n logger.error(\"Subscription failed\")\n return\n logger.info(f\"Subscription successful. New p2p_topic: {topic}\")\n if p2p_topic:\n # The 'start' function is called as result of a restart\n # Unsubscribe the old topic as soon as we get a working connection\n logger.info(\"Cleaning up old p2p_topic subscription ...\")\n unsubscribe(zmq_endpoint, p2p_topic)\n atexit.unregister(unsubscribe)\n p2p_topic = topic\n atexit.register(unsubscribe, zmq_endpoint, topic)\n\n async_tasks.append(\n asyncio.create_task(heartbeat(zmq_endpoint, p2p_topic, interval=5))\n )\n\n indicator_queue = asyncio.Queue()\n sightings_queue = asyncio.Queue()\n async_tasks.append(\n asyncio.create_task(\n report_sightings(sub_endpoint, sightings_queue, transform_cmd, sink)\n )\n )\n\n async_tasks.append(\n asyncio.create_task(receive(pub_endpoint, p2p_topic, indicator_queue))\n )\n\n async_tasks.append(\n asyncio.create_task(\n match_intel(\n vast_binary,\n vast_endpoint,\n indicator_queue,\n sightings_queue,\n live_match,\n retro_match,\n retro_match_max_events,\n retro_match_timeout,\n )\n )\n )\n\n if retro_match:\n # add metrics for retro-matching to the metric output\n metrics += [\n s_retro_matches_per_ioc,\n s_retro_query_time_s_per_ioc,\n g_retro_match_backlog,\n ]\n if live_match:\n # add metrics for live-matching to the metric output\n metrics.append(g_live_matcher_sightings)\n async_tasks.append(\n asyncio.create_task(\n live_match_vast(vast_binary, vast_endpoint, sightings_queue)\n )\n )\n\n if metrics_interval:\n async_tasks.append(\n asyncio.create_task(write_metrics(metrics_interval, metrics_filename))\n )\n\n loop = asyncio.get_event_loop()\n for s in [signal.SIGHUP, signal.SIGTERM, signal.SIGINT]:\n loop.add_signal_handler(s, lambda: asyncio.create_task(stop_signal()))\n return await asyncio.gather(*async_tasks)",
"def route(self, regex, callback):\n self.__matcher.register(regex, callback)",
"def test_pattern_search(self, mock_get, circuits_app, umbinv_regex, umbinv_start_epoch, umbinv_start_relative, umbinv_limit, umbinv_include_category):\n\n keys_outer = [\"matches\", \"limit\", \"totalResults\", \"expression\", \"moreDataAvailable\"]\n keys_match = [\"name\", \"securityCategories\", \"firstSeen\", \"name\", \"firstSeenISO\"]\n function_params = {\n \"umbinv_regex\": umbinv_regex,\n \"umbinv_start_epoch\": umbinv_start_epoch,\n \"umbinv_start_relative\": umbinv_start_relative,\n \"umbinv_limit\": umbinv_limit,\n \"umbinv_include_category\": umbinv_include_category\n }\n results = call_umbrella_pattern_search_function(circuits_app, function_params)\n search_matches = results[\"search_matches\"]\n assert_keys_in(search_matches, *keys_outer)\n match = search_matches[\"matches\"].pop(0)\n assert_keys_in(match, *keys_match)",
"def run_fasttree_raxml_survived_best_garli(working_dir, seqdb, run_id, fasta_file, number_of_sequences, base_seq_name, raxml_kill_rate, raxml_bfgs, raxml_model_optimization_precision, raxml_num_runs, garli_num_runs, garli_attachmentspertaxon, garli_stoptime, email, machines):\n runner = FasttreeRaxmlSurvivedBestGarli(\n working_dir=working_dir, wait_timeout=600, seqdb=seqdb, run_id=run_id, fasta_file=fasta_file, number_of_sequences=number_of_sequences, base_seq_name=base_seq_name,\n raxml_settings={\"kill_rate\": raxml_kill_rate, \"bfgs\": raxml_bfgs, \"model_optimization_precision\": raxml_model_optimization_precision, \"num_runs\": raxml_num_runs},\n garli_settings={\"num_runs\": garli_num_runs, \"attachmentspertaxon\": garli_attachmentspertaxon, \"stoptime\": garli_stoptime},\n email=email, machines=machines)\n while not runner.finished():\n runner.iteration()\n return runner.results()\n\n # run_id = run_id.replace(' ', '-').replace('/', '-') # raxml cannot handle spaces and slashes in run-id\n # save_settings(working_dir=working_dir, run_id=run_id, mode=\"fasttree-survived-best\", fasta_file=Path(fasta_file).resolve(), number_of_sequences=number_of_sequences, base_seq_name=base_seq_name, raxml_bfgs=raxml_bfgs, raxml_model_optimization_precision=raxml_model_optimization_precision, raxml_num_runs=raxml_num_runs, garli_num_runs=garli_num_runs, garli_attachmentspertaxon=garli_attachmentspertaxon, garli_stoptime=garli_stoptime, email=email, machines=machines)\n # r_fasttree = run_fasttree(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, email=email, machines=machines)\n # r_raxml = run_raxml_survived(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, source_tree=r_fasttree.best_tree(), base_seq_name=base_seq_name, raxml_kill_rate=raxml_kill_rate, raxml_bfgs=raxml_bfgs, raxml_model_optimization_precision=raxml_model_optimization_precision, raxml_num_runs=raxml_num_runs, email=email, machines=machines)\n # r_garli = run_garli(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, tree=r_raxml.best_tree(), garli_num_runs=garli_num_runs, garli_attachmentspertaxon=garli_attachmentspertaxon, garli_stoptime=garli_stoptime, email=email, machines=machines)\n # return make_results(working_dir=working_dir, r_raxml=r_raxml, r_garli=r_garli, seqdb=seqdb)",
"async def ingest_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator):\n global logger\n vast_ioc = indicator_to_vast_matcher_ioc(indicator)\n if not vast_ioc:\n logger.error(\n f\"Unable to convert STIX-2 Indicator to VAST compatible IoC. Is it a point IoC? {indicator}\"\n )\n return\n vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger)\n proc = await vast.import_(type=\"intel.indicator\").json().exec(stdin=vast_ioc)\n await proc.wait()\n logger.debug(f\"Ingested indicator for VAST live matching: {indicator}\")",
"def test_vasp_immigrant(immigrant_with_builder):\n immigrant, inputs = immigrant_with_builder\n\n # We need to set the parser explicitly\n inputs.metadata['options']['parser_name'] = 'vasp.vasp'\n result, node = run.get_node(immigrant, **inputs)\n assert node.exit_status == 0\n\n expected_output_nodes = {'misc', 'remote_folder', 'retrieved'}\n assert expected_output_nodes.issubset(set(result))",
"def run_matching(self):\n paradic = self.cfg['param']['paradic']\n print 'in run_matching() n_bins = ' +str(paradic['n_bins'])\n\n f = open(self.work_dir+'matches.txt','w')\n matching = self.run_proc(['match_cli', 'keys_im0.txt',\n 'keys_im1.txt',\n str(paradic['flag_match']),\n str(paradic['C_match']),\n str(paradic['n_hist']),\n str(paradic['n_ori']),\n str(paradic['n_bins'])],\n stdout=f)\n self.wait_proc(matching, timeout=self.timeout)\n return 1",
"def parse_vasprun(self):\n self.vasprun_filename = match_filename(\"vasprun.xml\")\n if not self.vasprun_filename:\n raise FileNotFoundError(\"Could not find vasprun.xml or vasprun.xml.gz file\")\n try:\n self.vasprun = Vasprun(\n self.vasprun_filename, parse_potcar_file=False, parse_dos=False\n )\n except ET.ParseError:\n self.vasprun = None\n except:\n raise",
"def launch_vrouter_instance(self):\n # Add code to start vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"VTEST_ONLY_RETURN \" +\n str(self.vr_args['vtest_only']))\n return\n cpid = os.fork()\n if cpid == 0:\n vrouter_cmd_args = [\"taskset\", self.vr_args['taskset'],\n self.vr_args['vrouter_path'], \"--no-daemon\",\n \"--no-huge\", \"--vr_packet_sz\", \"2048\"]\n if self.vr_args['dpdk_args']:\n for dpdk_arg in self.vr_args['dpdk_args'].split(' '):\n vrouter_cmd_args.append(dpdk_arg)\n vrouter_cmd_args.extend([\"--vr_socket_dir\",\n self.vr_args['socket_dir']])\n os.execvp(\"taskset\", vrouter_cmd_args)\n else:\n self.logger.info(\n \"Running cmd - taskset %s %s --no-daemon --no-huge \"\n \"--vr_packet_sz 2048 --vr_socket_dir %s\" %\n (self.vr_args['taskset'],\n self.vr_args['vrouter_path'],\n self.vr_args['socket_dir']))\n self.logger.info(\"pid = \" + str(cpid))\n self.pid = cpid\n count = 0\n ret = 0\n while (count < 10):\n cmd = \"lsof \" + self.vr_args['socket_dir'] +\\\n \"/dpdk_netlink | wc -l\"\n self.logger.info(\"Running cmd - {}\".format(cmd))\n try:\n ret = subprocess.check_output(cmd, shell=True)\n # check if the netlink is up using the ret value\n if (ret == \"2\\n\"):\n break\n else:\n time.sleep(1)\n count += 1\n except Exception as e:\n self.logger.error(e)\n time.sleep(1)\n count += 1\n if (ret != \"2\\n\"):\n self.logger.error(\"Failed to bringup vrouter\")\n return -1\n else:\n return 0",
"def test_match_regexp_including_start():\r\n runmatch(lcode)",
"def start(self):\n super(BrcmVlanFilterTask, self).start()\n self._local_deferred = reactor.callLater(0, self.perform_vlan_tagging)",
"def run_matchengine():\n with matchengine.internals.engine.MatchEngine(\n match_on_deceased=False,\n match_on_closed=True,\n db_name=\"matchminer\") as me_prod:\n me_prod.get_matches_for_all_trials()\n me_prod.update_all_matches()\n\n reset_elasticsearch()\n resp = Response(response=json.dumps({\"success\": True}),\n status=200,\n mimetype=\"application/json\")\n return resp",
"def run_raxml_survived_best_garli(working_dir, seqdb, run_id, fasta_file, number_of_sequences, base_seq_name, raxml_kill_rate, raxml_bfgs, raxml_model_optimization_precision, raxml_num_runs, garli_num_runs, garli_attachmentspertaxon, garli_stoptime, email, machines):\n runner = RaxmlSurvivedBestGarli(\n working_dir=working_dir, wait_timeout=600, seqdb=seqdb, run_id=run_id, fasta_file=fasta_file, number_of_sequences=number_of_sequences, base_seq_name=base_seq_name,\n raxml_settings={\"kill_rate\": raxml_kill_rate, \"bfgs\": raxml_bfgs, \"model_optimization_precision\": raxml_model_optimization_precision, \"num_runs\": raxml_num_runs},\n garli_settings={\"num_runs\": garli_num_runs, \"attachmentspertaxon\": garli_attachmentspertaxon, \"stoptime\": garli_stoptime},\n email=email, machines=machines)\n while not runner.finished():\n runner.iteration()\n return runner.results()\n\n # run_id = run_id.replace(' ', '-').replace('/', '-') # raxml cannot handle spaces and slashes in run-id\n # save_settings(working_dir=working_dir, run_id=run_id, mode=\"survived-best\", fasta_file=Path(fasta_file).resolve(), number_of_sequences=number_of_sequences, base_seq_name=base_seq_name, raxml_bfgs=raxml_bfgs, raxml_model_optimization_precision=raxml_model_optimization_precision, raxml_num_runs=raxml_num_runs, garli_num_runs=garli_num_runs, garli_attachmentspertaxon=garli_attachmentspertaxon, garli_stoptime=garli_stoptime, email=email, machines=machines)\n # r_raxml = run_raxml_survived(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, source_tree=None, base_seq_name=base_seq_name, raxml_kill_rate=raxml_kill_rate, raxml_bfgs=raxml_bfgs, raxml_model_optimization_precision=raxml_model_optimization_precision, raxml_num_runs=raxml_num_runs, email=email, machines=machines)\n # r_garli = run_garli(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, tree=r_raxml.best_tree(), garli_num_runs=garli_num_runs, garli_attachmentspertaxon=garli_attachmentspertaxon, garli_stoptime=garli_stoptime, email=email, machines=machines)\n # return make_results(working_dir=working_dir, r_raxml=r_raxml, r_garli=r_garli, seqdb=seqdb)",
"def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)",
"def test_with_run_command(self):\n self.build()\n self.runCmd(\"file \" + self.getBuildArtifact(\"a.out\"), CURRENT_EXECUTABLE_SET)\n\n lldbutil.run_break_set_by_file_and_line(\n self, \"main.cpp\", self.line, num_expected_locations=1, loc_exact=True)\n\n self.runCmd(\"run\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped',\n 'stop reason = breakpoint'])\n\n # This is the function to remove the custom formats in order to have a\n # clean slate for the next test case.\n def cleanup():\n pass\n\n # Execute the cleanup function during test case tear down.\n self.addTearDownHook(cleanup)\n\n pass # my code never fails\n\n v = self.frame().FindVariable(\"v\")\n v.SetPreferSyntheticValue(True)\n v.SetFormat(lldb.eFormatVectorOfFloat32)\n\n if self.TraceOn():\n print(v)\n\n self.assertTrue(\n v.GetNumChildren() == 4,\n \"v as float32[] has 4 children\")\n self.assertTrue(v.GetChildAtIndex(0).GetData().float[\n 0] == 1.25, \"child 0 == 1.25\")\n self.assertTrue(v.GetChildAtIndex(1).GetData().float[\n 0] == 1.25, \"child 1 == 1.25\")\n self.assertTrue(v.GetChildAtIndex(2).GetData().float[\n 0] == 2.50, \"child 2 == 2.50\")\n self.assertTrue(v.GetChildAtIndex(3).GetData().float[\n 0] == 2.50, \"child 3 == 2.50\")\n\n self.expect(\"expr -f int16_t[] -- v\",\n substrs=['(0, 16288, 0, 16288, 0, 16416, 0, 16416)'])\n self.expect(\"expr -f uint128_t[] -- v\",\n substrs=['(85236745249553456609335044694184296448)'])\n self.expect(\n \"expr -f float32[] -- v\",\n substrs=['(1.25, 1.25, 2.5, 2.5)'])\n\n oldValue = v.GetChildAtIndex(0).GetValue()\n v.SetFormat(lldb.eFormatHex)\n newValue = v.GetChildAtIndex(0).GetValue()\n self.assertFalse(oldValue == newValue,\n \"values did not change along with format\")\n\n v.SetFormat(lldb.eFormatVectorOfFloat32)\n oldValueAgain = v.GetChildAtIndex(0).GetValue()\n self.assertTrue(\n oldValue == oldValueAgain,\n \"same format but different values\")",
"def main():\n\n try:\n args = parse()\n\n is_match = match(args.regex, args.text)\n print(is_match)\n except Exception as err:\n # Print catch-all error message\n print(f\"[{type(err).__name__}]: {err}.\", file=sys.stderr)",
"def scan(self):\n try:\n assert self.text_to_match\n except AssertionError:\n print('Please introduce an operation to calculate.')\n exit(1)\n\n for line in self.text_to_match: # taking each line from the code to match\n i = 0\n while i < len(line): # looping till the end of the string is reached\n string_segment = self.try_match(i, line)\n\n try:\n assert string_segment.token\n i += string_segment.end_string # set the new index to take the unmatched string\n self.list_tokens.append(string_segment) # append the found goods\n except AssertionError:\n print(f'SyntaxError: Unmatched Syntax -{line[i]}- at line: '\n f'\\n{line}')\n exit(1)",
"def main():\n # Handel the file opening of the FASTA file to be searched\n file = open(r\"C:\\Users\\doodw\\PycharmProjects\\Zalgorithm\\venv\\input\\testfasta.fasta\", 'r')\n # Create an empty string to hold text sequence\n sequence = \"\"\n # Flag to ensure only one sequence is read from the Fasta\n single_sequence_flag = False\n # Read in and concatenate sequence lines from Fasta file\n for line in file:\n if line.find('>') != -1: # Header ignored\n # Ignores multiple sequences in Fasta file\n if single_sequence_flag:\n break\n continue\n else:\n line = line.strip().lower()\n sequence += line\n\n # Prompt the user for query\n query = input(\"Please enter a DNA sequence to search for: \").strip().lower()\n\n # Call the z-algorithm\n output = z_algorithm(sequence, query)\n\n # Print the output form the z-algorithm\n for index in output:\n print(\"A perfect match found at: \" + str(index))",
"def main():\n args = parse_args()\n\n make_session = bootstrap(\n 'sqlite:///{}'.format(DB_NAME),\n )\n session = make_session()\n\n for vase in session.query(Vase).order_by(Vase.produced_start):\n for query_str in make_searches(vase):\n pass",
"def call(targetlist, querylist, match=1, mismatch=2, gapopen=5, gapextend=0,\n ksize=31):\n for query in sorted(querylist, reverse=True, key=len):\n bestcigar = None\n bestscore = None\n besttarget = None\n bestorientation = None\n for target in sorted(targetlist, key=lambda record: record.name):\n cigar, score, strand = align_both_strands(\n target.sequence, query.sequence, match, mismatch, gapopen,\n gapextend\n )\n if bestscore is None or score > bestscore:\n bestscore = score\n bestcigar = cigar\n besttarget = target\n bestorientation = strand\n\n if bestorientation == -1:\n query.sequence = kevlar.revcom(query.sequence)\n for varcall in make_call(besttarget, query, bestcigar, ksize):\n yield varcall",
"async def infernal_search(sequence, job_id):\n sequence = sequence.replace('T', 'U').upper()\n\n params = {\n 'query': os.path.join(INFERNAL_QUERY_DIR, '%s' % job_id),\n 'output': os.path.join(INFERNAL_RESULTS_DIR, '%s' % job_id),\n 'tblout': os.path.join(INFERNAL_RESULTS_DIR, '%s.tblout' % job_id),\n 'rfam_cm': settings.RFAM_CM,\n 'cmscan': settings.CMSCAN_EXECUTABLE,\n 'cpu': 4,\n }\n\n # write out query in fasta format\n with open(params['query'], 'w') as f:\n f.write('>query\\n')\n f.write(sequence)\n f.write('\\n')\n\n command = ('{cmscan} '\n '--notextw ' # unlimit ASCII text output line width\n '--cut_ga ' # use CM's GA gathering cutoffs as reporting thresholds\n '--rfam ' # set heuristic filters at Rfam-level (fast)\n '--nohmmonly ' # never run HMM-only mode, not even for models with 0 basepairs\n '-o {output} ' # direct output to file\n '--tblout {tblout} ' # save parseable table of hits to file\n '--acc ' # prefer accessions over names in output\n '--cpu {cpu} ' # number of CPUs to use\n '{rfam_cm} ' # Rfam.cm file\n '{query} ' # query file\n ).format(**params)\n\n process = await asyncio.subprocess.create_subprocess_exec(\n *shlex.split(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n\n return process, params['output']",
"def __enter__(self):\n try:\n run(['logger', 'BVT', 'starting', self.full_description()], \n host=self.dut, timeout=10)\n except SubprocessError:\n print 'INFO: unable to mark test log'\n if not self.record:\n return self\n if self.result_id is None:\n self.mdb = get_autotest()\n terms = {'test_case':self.description or 'to be determined',\n 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],\n 'control_pid' : getpid(), 'start_time' : time(),\n 'development_mode' : 0,\n 'command_line':abbreviate(' '.join(sys.argv))}\n if self.dut:\n dutdoc = self.mdb.duts.find_one({'name':self.dut})\n self.dut_id = terms['dut'] = dutdoc['_id']\n terms['dut_name'] = dutdoc['name']\n if 'development_mode' in dutdoc:\n terms['development_mode'] = dutdoc['development_mode']\n self.result_id = self.mdb.results.save(terms)\n if self.job_id is not None:\n self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})\n if self.build is None and self.dut:\n self.build = get_build(self.dut, timeout=10)\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':self.build}})\n if self.dut:\n self.mdb.duts.update({'_id':terms['dut']}, {'$set': {\n 'build':self.build,\n 'control_command_line': abbreviate(' '.join(sys.argv)),\n 'result_id' : self.result_id}})\n if self.stdout_filter:\n self.record_queue = Queue()\n self.stream_process = Process(\n target=service_queue, \n args=[self.record_queue, self.result_id, \n self.dut, self.dut_id])\n self.stream_process.start()\n self.stdout_filter.add_callback(self, \n lambda *x: self.record_queue.put(x))\n\n if self.description:\n print 'HEADLINE: starting', self.full_description()\n get_track().updates.save({'result_id':self.result_id,\n 'action':'new result record'})\n return self",
"def test__parser__grammar_sequence(seg_list, caplog):\n bs = StringParser(\"bar\", KeywordSegment)\n fs = StringParser(\"foo\", KeywordSegment)\n g = Sequence(bs, fs)\n # If running in the test environment, assert that Sequence recognises this\n if getenv(\"SQLFLUFF_TESTENV\", \"\"):\n assert g.test_env\n gc = Sequence(bs, fs, allow_gaps=False)\n ctx = ParseContext(dialect=None)\n with caplog.at_level(logging.DEBUG, logger=\"sqlfluff.parser\"):\n # Should be able to match the list using the normal matcher\n logging.info(\"#### TEST 1\")\n m = g.match(seg_list, parse_context=ctx)\n assert m\n assert len(m) == 3\n assert m.matched_segments == (\n KeywordSegment(\"bar\", seg_list[0].pos_marker),\n seg_list[1], # This will be the whitespace segment\n KeywordSegment(\"foo\", seg_list[2].pos_marker),\n )\n # Shouldn't with the allow_gaps matcher\n logging.info(\"#### TEST 2\")\n assert not gc.match(seg_list, parse_context=ctx)\n # Shouldn't match even on the normal one if we don't start at the beginning\n logging.info(\"#### TEST 2\")\n assert not g.match(seg_list[1:], parse_context=ctx)",
"def run(version=1):\n\n # scan header to define our graph parameters\n try:\n header = input(\"Enter graph header:\")\n edges_count, start_edge, finish_edge = header.split(\" \")\n edges_count = int(edges_count)\n logger.debug(\"Scanned edges count: {}; Start:{}, End:{}\".format(\n edges_count, start_edge, finish_edge))\n except ValueError:\n raise ValueError(\"Input data parsing error, \"\n \"the format should be like \\\"3 a b\\\"\")\n\n # scan edges\n edges = scan_edges(edges_count)\n logger.debug(\"Scanned edges: {}\".format(edges))\n\n optimize(edges, start_edge, finish_edge)\n\n print_output(edges)",
"def make_video(queue, video_file, destination, face_locations, face_encodings, match_encodings, settings):\n trackers = [] # list of tracker objects, one for each matched face\n # get video\n video = cv2.VideoCapture(video_file) # input VideoCapture object\n frame_rate = video.get(cv2.CAP_PROP_FPS) # frames per second in input video\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) # width of input video frame\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # height of input video frame\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # number of frames in input video\n # get settings\n track_period = settings[\"track_period\"] # track period from settings\n tolerance = settings[\"tolerance\"] # face matching tolerance from settings\n blur_method = settings[\"blur_method\"] # type of blurring from settings\n blur_intensity = settings[\"blur_intensity\"] # blurring filter size from settings\n display_output = settings[\"display_output\"] # flag indicating whether to display output video from settings\n # initialize writer\n out = video_utils.initialize_writer(destination, (width, height), frame_rate) # VideoWriter object\n for i in range(frame_count):\n ret, img = video.read() # ret indicates if frame was read correctly, img is last read frame\n if i % track_period == 0: # frame for detection\n current_frame_encodings = np.array(face_encodings[i // track_period]) # array of encodings for faces in current frame\n matched_indices, matched_encodings = recognition.match_faces(current_frame_encodings, np.array(match_encodings), tolerance) # indices of matched faces from current frame and their encodings\n matched_locations = [face_locations[i // track_period][k] for k in matched_indices] # locations of matched faces from current frame\n trackers = tracking.start_trackers(img, matched_locations) # list of tracker objects, one for each matched face\n else: # frame for tracking\n matched_locations = tracking.update_locations(trackers, img) # updated locations of matched faces from current frame\n # generate blurred image\n blurred = None # object holding image with blurred faces\n if blur_method == \"pixelate\":\n blurred = blur_methods.pixelated(img, matched_locations, blur_intensity)\n elif blur_method == \"blur\":\n blurred = blur_methods.blurred(img, matched_locations, blur_intensity)\n elif blur_method == \"blacken\":\n blurred = blur_methods.blackened(img, matched_locations)\n out.write(blurred)\n\n out.release()\n queue.put(0)\n if display_output:\n video_utils.display_video(destination)",
"def generate_asts_beam_search(self, psi, beam_width):\n\n # each candidate is (list of production paths in the AST, likelihood of AST so far)\n candidates = [([[('DSubTree', CHILD_EDGE)]], 1.)]\n complete_candidates = []\n complete_pr_keys = []\n cache = dict()\n\n # hack: use 16-digit probability as candidate ID to check for duplicates that might block out the beam\n def get_key(prob):\n return float('{:16f}'.format(prob))\n\n partial_candidate = True\n while partial_candidate:\n partial_candidate = False\n new_candidates = []\n pr_keys = []\n for (candidate, pr) in candidates:\n\n # gather candidate's complete and incomplete paths\n complete_paths, incomplete_paths = [], []\n try:\n for path in candidate:\n if self.is_complete_path(path):\n complete_paths.append(path)\n else:\n incomplete_paths.append(path)\n except (InvalidSketchError, TooLongPathError) as e:\n continue # throw out the candidate\n\n # if candidate is a fully formed AST, add it to new candidates and continue\n if len(incomplete_paths) == 0:\n pr_key = get_key(pr)\n if pr_key not in complete_pr_keys:\n complete_pr_keys.append(pr_key)\n complete_candidates.append((candidate, pr))\n if pr_key not in pr_keys:\n pr_keys.append(pr_key)\n new_candidates.append((candidate, pr))\n continue\n partial_candidate = True\n\n # for every incomplete path, create k new candidates from the top k in the next step's dist\n for i, inc_path in enumerate(incomplete_paths):\n nodes, edges = zip(*inc_path)\n dist = list(enumerate(self.model.infer_ast(self.sess, psi, nodes, edges, cache=cache)))\n dist.sort(key=lambda x: x[1], reverse=True)\n topk = dist[:beam_width]\n\n for (idx, p) in topk:\n new_candidate = [path for path in complete_paths] + \\\n [path for (j, path) in enumerate(incomplete_paths) if i != j]\n prediction = self.model.config.decoder.chars[idx]\n\n inc_path_step_SIBLING = inc_path + [(prediction, SIBLING_EDGE)]\n if prediction in ['DBranch', 'DExcept', 'DLoop']:\n inc_path_step_CHILD = inc_path + [(prediction, CHILD_EDGE)]\n new_pr = pr * p * p\n pr_key = get_key(new_pr)\n if pr_key not in pr_keys:\n pr_keys.append(pr_key)\n new_candidates.append((new_candidate + [inc_path_step_CHILD, inc_path_step_SIBLING],\n new_pr))\n else:\n new_pr = pr * p\n pr_key = get_key(new_pr)\n if pr_key not in pr_keys:\n pr_keys.append(pr_key)\n new_candidates.append((new_candidate + [inc_path_step_SIBLING], new_pr))\n\n # bound candidates with the beam width\n new_candidates += [c for c in complete_candidates if c not in new_candidates]\n new_candidates.sort(key=lambda x: x[1], reverse=True)\n candidates = new_candidates[:beam_width]\n\n # convert each set of paths into an AST\n asts = []\n for (candidate, pr) in candidates:\n ast = {'ast': self.paths_to_ast(candidate),\n 'probability': '{:e}'.format(pr)}\n if ast not in asts:\n asts.append(ast)\n return asts",
"def start_search_vms(self, hStringsList = consts.PRL_INVALID_HANDLE):\n\t\treturn Job(SDK.PrlSrv_StartSearchVms(self.handle, conv_handle_arg(hStringsList))[0])",
"def main(reference, vcf, max_nodes):\n\n # setting command line for vg construct\n cli = [\"vg\",\n \"construct\"]\n \n # reference and vcf files (1 or more)\n for reference_file in reference:\n cli += [\"-r\", reference_file]\n # vcf file is optional\n for vcf_file in vcf:\n if check(vcf_file):\n cli += [\"-v\", vcf_file]\n\n # nodes \n cli += [\"-m\", max_nodes]\n \n print(\"Running fastqc subprocess with command: {}\".format(cli))\n\n p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE, shell=False)\n stdout, stderr = p.communicate()\n\n # Attempt to decode STDERR output from bytes. If unsuccessful, coerce to\n # string\n try:\n stderr = stderr.decode(\"utf8\")\n except (UnicodeDecodeError, AttributeError):\n stderr = str(stderr)\n\n print(\"Finished vg construct subprocess with STDOUT:\\\\n\"\n \"======================================\\\\n{}\".format(stdout))\n print(\"Fished vg construct subprocesswith STDERR:\\\\n\"\n \"======================================\\\\n{}\".format(stderr))\n print(\"Finished vg construct with return code: {}\".format(p.returncode))\n\n # save vg file\n with open(\"reference.vg\", \"wb\") as vg_fh:\n vg_fh.write(stdout)"
] | [
"0.72215056",
"0.63250434",
"0.60648054",
"0.4627217",
"0.4626055",
"0.4610181",
"0.45514858",
"0.45476955",
"0.45376977",
"0.45119616",
"0.4479351",
"0.44458568",
"0.4383127",
"0.4380449",
"0.4356244",
"0.43545336",
"0.43387768",
"0.43217507",
"0.42751554",
"0.42354617",
"0.4228719",
"0.42195252",
"0.4218843",
"0.41947514",
"0.41888216",
"0.41799828",
"0.416966",
"0.4167739",
"0.41667286",
"0.41645208"
] | 0.8319855 | 0 |
Invoke a command as subprocess for the given context. The command string is treated as template string and occurences of "%ioc" are replaced with the actually matched IoC. Returns stdout from the invoked command. cmd The command, including flags, to invoke as subprocess. cmd is treated as template string and occurrences of '%ioc' are replaced with the actually matched IoC. context The context to forward as JSON ioc The value to replace '%ioc' with in the `cmd` string | async def invoke_cmd_for_context(cmd: str, context: dict, ioc: str = "%ioc"):
if not ioc:
ioc = "%ioc"
cmd = cmd.replace("%ioc", ioc)
proc = await asyncio.create_subprocess_exec(
*lexical_split(cmd),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE,
)
proc.stdin.write(json.dumps(context).encode())
await proc.stdin.drain()
proc.stdin.close()
stdout, stderr = await proc.communicate()
if stderr:
logger.error(f"Error while transforming sighting context: {stderr}")
return stdout | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call(cmd):\n\tif \"|\" in cmd:\n\t\tcmd_parts = cmd.split('|')\n\telse:\n\t\tcmd_parts = []\n\t\tcmd_parts.append(cmd)\n\ti = 0\n\tp = {}\n\tfor cmd_part in cmd_parts:\n\t\t#print(cmd_part)\n\t\tcmd_part = cmd_part.strip()\n\t\tif i == 0:\n\t\t p[i]=Popen(shlex.split(cmd_part),stdin=None, stdout=PIPE, stderr=PIPE)\n\t\telse:\n\t\t p[i]=Popen(shlex.split(cmd_part),stdin=p[i-1].stdout, stdout=PIPE, stderr=PIPE)\n\t\ti = i +1\n\t(output, err) = p[i-1].communicate()\n\texit_code = p[0].wait()\n\n\treturn str(output).strip(), str(err), exit_code",
"def run(cmd, cmd_input=None, cwd=None):\n\n with Popen(\n \" \".join(cmd) if cwd else cmd,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE,\n cwd=cwd,\n shell=True,\n env={\"PATH\": cwd} if cwd else None,\n ) as proc:\n out, err = proc.communicate(\n input=cmd_input.encode(\"utf-8\") if cmd_input else None\n )\n rcode = proc.returncode\n\n return out.decode(\"utf-8\"), err.decode(\"utf-8\"), rcode",
"def _cmd(argv):\n dirpath_lwc_root = argv[2]\n dirpath_cwd = argv[3]\n pid = argv[4]\n cmd_b64 = argv[5]\n cmd_argv = argv[6:]\n outer_cmd = base64.b64decode(cmd_b64).decode()\n\n import click\n import da.cli\n da.cli.main(\n args = cmd_argv,\n standalone_mode = True,\n obj = da.cli.CustomContextObject({\n 'dirpath_lwc_root': dirpath_lwc_root,\n 'dirpath_cwd': dirpath_cwd,\n 'outer_cmd': outer_cmd,\n 'pid': pid,\n 'args': cmd_argv }))",
"def run(cmd):\n cmd = str(cmd)\n\n if env['verbose']:\n sys.stdout.write('--> %s\\n' % cmd)\n\n cmd_list = shlex.split(cmd)\n\n p = subprocess.Popen(\n cmd_list,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n return p.communicate()",
"def getCommand(self, context, cmd=None, device=None):\n # Perform a TALES eval on the expression using self\n if cmd is None:\n cmd = self.commandTemplate\n if not cmd.startswith('string:') and not cmd.startswith('python:'):\n cmd = 'string:%s' % cmd\n compiled = talesCompile(cmd)\n d = device if device is not None else context.device()\n environ = {'dev' : d,\n 'device': d,\n 'devname': d.id,\n 'ds': self,\n 'datasource': self,\n 'here' : context,\n 'context': context,\n 'zCommandPath' : context.zCommandPath,\n 'nothing' : None,\n 'now' : DateTime() }\n res = compiled(getEngine().getContext(environ))\n if isinstance(res, Exception):\n raise res\n res = self.checkCommandPrefix(context, res)\n return res",
"def run_cmd(self, cmd, cwd=None):\n logging.debug('Running %s', cmd)\n proc = subprocess.Popen(\n cmd,\n cwd=cwd or self._app_dir,\n stdout=subprocess.PIPE)\n output, _ = proc.communicate()\n if proc.returncode:\n sys.stderr.write('\\n' + output + '\\n')\n raise subprocess.CalledProcessError(proc.returncode, cmd, output)\n return output",
"def run_cmd(self, cmd, stdout=True, stderr=True, stdin=False, tty=False, privileged=False, user='', detach=False,\n stream=False, socket=False, environment=None, exit_status_ok=0):\n cmd_str = ' '.join(cmd) if isinstance(cmd, (list, tuple)) else cmd\n logger.debug(self.fmt_ctn_log('Running \"%s\"'), cmd_str)\n out, exit_status = self.exec_run2(cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,\n privileged=privileged, user=user, detach=detach, stream=stream,\n socket=socket, environment=environment)\n logger.debug(self.fmt_ctn_log('Result of running \"%s\" (exit status: %s):\\n%s'), cmd_str, exit_status, out)\n if exit_status_ok is not None and exit_status != exit_status_ok:\n raise docker.errors.ContainerError(self,\n exit_status,\n ' '.join(cmd) if isinstance(cmd, (list, tuple)) else cmd,\n self.image,\n out)\n else:\n return out, exit_status",
"def run_cmd(context, exec_cmd, pty=True, hide=False, error_message=\"An unknown error has occurred!\"):\n print(f\"LOCAL - Running command {exec_cmd}\")\n result = context.run(exec_cmd, pty=pty, hide=hide, warn=True)\n if not result:\n print(f\"ERROR - {error_message}\\n{result.stdout if pty else result.stderr}\")\n raise invoke.exceptions.UnexpectedExit(result)\n\n return result",
"def process(cmd_string, stdin=None):\n return process_results(process_run(cmd_string, stdin=stdin))",
"def run_cmd(\n cmd: Union[List[str], str],\n cwd: Optional[Union[str, Path]] = None,\n env: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n redirect: Optional[Union[str, Path, TextIOWrapper]] = None,\n) -> str:\n if isinstance(cmd, str):\n args = shlex.split(cmd)\n else:\n args = cmd\n cwd = cwd or Path.cwd()\n env = env or os.environ.copy()\n log.info(\"Calling: %s\", \" \".join(args))\n try:\n output = subprocess.run(\n args,\n shell=is_windows(),\n cwd=cwd,\n env=env,\n timeout=timeout,\n universal_newlines=True,\n check=True,\n stdout=PIPE,\n stderr=STDOUT, # combine stdout,stderr streams\n ).stdout\n except subprocess.CalledProcessError as err:\n handle_output(err.stdout, redirect)\n raise\n return handle_output(output, redirect)",
"def do_shell(command, context=None, **kwargs):\n logging.info(\"%s: executing %s\" % (context, command))\n\n child_env = {'CRANKD_CONTEXT': context}\n\n # We'll pull a subset of the available information in for shell scripts.\n # Anyone who needs more will probably want to write a Python handler\n # instead so they can reuse things like our logger & config info and avoid\n # ordeals like associative arrays in Bash\n for k in [ 'info', 'key' ]:\n if k in kwargs and kwargs[k]:\n child_env['CRANKD_%s' % k.upper()] = str(kwargs[k])\n\n if 'user_info' in kwargs:\n for k, v in kwargs['user_info'].items():\n child_env[create_env_name(k)] = str(v)\n\n try:\n rc = call(command, shell=True, env=child_env)\n if rc == 0:\n logging.debug(\"`%s` returned %d\" % (command, rc))\n elif rc < 0:\n logging.error(\"`%s` was terminated by signal %d\" % (command, -rc))\n else:\n logging.error(\"`%s` returned %d\" % (command, rc))\n except OSError, exc:\n logging.error(\"Got an exception when executing %s:\" % (command, exc))",
"def __call__(self, *args, **kwargs):\n env = kwargs.pop('_env', os.environ)\n debug = kwargs.pop('_debug', False)\n _in = kwargs.pop('_in', None)\n _out = kwargs.pop('_out', None)\n _err = kwargs.pop('_err', None)\n\n if self._is_string(_out):\n _out = open(_out, 'wb')\n\n if self._is_string(_err):\n _err = open(_err, 'wb')\n\n if args and isinstance(args[0], DeferredProcess):\n # This is a piped call.\n d = args[0]\n d.addCallback(lambda exc: exc.stdout)\n d.addCallback(lambda stdout: self._make_protocol(stdout, debug))\n d.addCallback(\n lambda protocol: self._spawn(protocol, [self.cmd], env))\n d.addCallback(lambda process: process.proto._process_deferred)\n return d\n\n txsh_protocol = self._make_protocol(\n stdin=_in, stdout=_out, stderr=_err, debug=debug)\n\n # Twisted requires the first arg to be the command itself\n args = self.build_arguments(*args, **kwargs)\n args.insert(0, self.cmd)\n if self.subcommand:\n args.insert(1, self.subcommand)\n args.extend(self._args)\n process = self._spawn(txsh_protocol, args, env)\n return process.proto._process_deferred",
"def subproces_func(cmd):\n pipe = subprocess.run(cmd, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n check=True)\n return pipe",
"def call(cmd):\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n return check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)",
"def execCMD(self, cmd, arg):\n result = subprocess.check_output([cmd, arg])\n return result",
"def Run(cmd, include_stderr=False, return_pipe=False):\n cmd = to_unicode(str(cmd))\n if include_stderr:\n err = STDOUT\n fds = True\n else:\n err = None\n fds = False\n tmpenv = os.environ.copy()\n tmpenv[\"LC_ALL\"] = \"C\"\n tmpenv[\"LANG\"] = \"C\"\n f = Popen(cmd, shell=True, stdout=PIPE, stderr=err, close_fds=fds,\n env=tmpenv)\n if return_pipe:\n return f.stdout\n else:\n return f.communicate()[0]",
"def exec_to_string_with_input ( cmd, input):\n vlog(4, 'Running command: %s' % cmd) \n try:\n with subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='/tmp/') as p:\n stdout, stderr = p.communicate(input=input)\n return [p.returncode, stdout, stderr ]\n except:\n vlog(1, 'Command %s failed' % cmd)\n return [-1, '', 'Failed to run']",
"def run_cmd(call, cmd, *, echo=True, **kwargs):\n if echo:\n print('$> ' + ' '.join(map(pipes.quote, cmd)))\n return call(cmd, **kwargs)",
"def command(cmd: list, stdin: str):\n proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE)\n out, err = proc.communicate(stdin.encode(\"utf-8\"))\n exit = proc.wait()\n return out.decode(\"utf-8\")",
"def shell_command(context, cmd, err_msg=\"Shell command error\"):\n try:\n\n context.last_cmd = cmd\n output = check_output(cmd, shell=True, cwd=os.getcwd())\n context.output = output\n\n except:\n raise Exception(err_msg)",
"def call2(*args, **kwargs):\n env = kwargs.pop('env', {})\n assert not kwargs, \"call kwargs not understood in %s\" % kwargs\n fullcmd = []\n if len(args) == 1:\n cmd = args[0]\n else:\n cmd = args\n cmd = _shlex_split(cmd)\n # flatten\n fullcmd = []\n for el in cmd:\n if isinstance(el, basestring):\n fullcmd.append(el)\n elif isinstance(el, collections.Iterable):\n fullcmd.extend(el)\n else:\n fullcmd.append(str(el))\n assert len(fullcmd) >= 1, \"Need to pass at least a command\"\n return _call(fullcmd, env=env)",
"def execute_cli(self, cmd, **kwargs):\n cli = CLI()\n cli.set_connection(self.connection)\n response = cli.execute(cmd, **kwargs)\n return response.http_response.json()",
"def get_cmd(cmd, ignore_error=False):\n try:\n logger.debug(\"get_cmd: %s\", cmd)\n # check_output in 2.7 only, apic may be on 2.6 if executing on-apic\n if hasattr(subprocess, \"check_output\"):\n # execute command\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n else:\n # apic may not support check_output, use communicate\n cmd = re.sub(\"2> /dev/null\", \"\", cmd)\n p = subprocess.Popen(cmd.split(\" \"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n data, err = p.communicate()\n return data\n except subprocess.CalledProcessError as e:\n if not ignore_error:\n logger.warn(\"error executing command: %s\", e)\n return None",
"def runCommand(self, cmd, stdin=None, env=None):\n\n\t mycmd=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t output, error=mycmd.communicate()\n\t while not mycmd.wait():\n\t \t# do stuff\n\t \treturn 0\n\n\n\n\t #if not isList(cmd):\n\t #cmd = shlex.split(cmd)\n\t #opts = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\t #if env:\n\t # opts.update(env=env)\n\t #if stdin:\n\t # opts.update(stdin=subprocess.PIPE)\n\t # stdout, stderr=subprocess.Popen(cmd, **opts).communicate(stdin)\n\t #else :\n\t # stdout, stderr=subprocess.Popen(cmd, **opts).communicate()\n\t #return stdout, stderr",
"def process_cmd(config, cmd):\n # Separate command from arguments\n cmd_parts = cmd.split(' ', 1)\n head = cmd_parts[0]\n args = ''\n if len(cmd_parts) == 2:\n args = cmd_parts[1]\n\n # Call the command\n if not common.call_cmd(head, config, args):\n print(\"RabbitHole: Unknown command '{}'\".format(head))",
"def run_command(cmd):\n if env.PY2 and isinstance(cmd, unicode):\n cmd = cmd.encode(sys.getfilesystemencoding())\n\n # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of\n # the subprocess is set incorrectly to ascii. Use an environment variable\n # to force the encoding to be the same as ours.\n sub_env = dict(os.environ)\n encoding = output_encoding()\n if encoding:\n sub_env['PYTHONIOENCODING'] = encoding\n\n proc = subprocess.Popen(\n cmd,\n shell=True,\n env=sub_env,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output, _ = proc.communicate()\n status = proc.returncode\n\n # Get the output, and canonicalize it to strings with newlines.\n if not isinstance(output, str):\n output = output.decode(output_encoding())\n output = output.replace('\\r', '')\n\n return status, output",
"def run_cmd(cmd, **kwargs):\n log.info(f\"Executing command: {cmd}\")\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n r = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n **kwargs\n )\n log.debug(f\"CMD output: {r.stdout.decode()}\")\n if r.stderr:\n log.error(f\"CMD error:: {r.stderr.decode()}\")\n if r.returncode:\n raise CommandFailed(\n f\"Error during execution of command: {cmd}.\"\n f\"\\nError is {r.stderr.decode()}\"\n )\n return r.stdout.decode()",
"def run(self, cmd, secrets=None, rcOk=(0,), dryRun=False):\n\n result = super().run(cmd, secrets=secrets, rcOk=rcOk, dryRun=dryRun)\n\n if not result:\n\n runCmd = Command._instantiateSecrets(cmd, secrets, hide=False)\n\n cProc = subprocess.run(runCmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True, check=False)\n out = cProc.stdout.decode().strip()\n err = cProc.stderr.decode().strip()\n rcode = cProc.returncode\n\n result = Command.buildResult(out, err, rcode, rcOk)\n\n return result",
"def subprocess(cls, cmd, **kwargs):\r\n def call(args):\r\n return subprocess.call(cmd + args, **kwargs)\r\n return cls(call)",
"def exec_cmd(cmd):\n print(' '.join(str(e) for e in cmd))\n try:\n res = subprocess.run(cmd, capture_output=True, check=True)\n print(res.stdout.decode(\"utf8\"))\n return res\n except subprocess.CalledProcessError as err:\n logging.error(err.stderr)\n raise err"
] | [
"0.58215666",
"0.5792031",
"0.574026",
"0.5625099",
"0.556116",
"0.5544668",
"0.5538419",
"0.55206203",
"0.55202276",
"0.5509302",
"0.5505767",
"0.5440929",
"0.5433163",
"0.5378438",
"0.537392",
"0.5325623",
"0.5315594",
"0.52947664",
"0.5293445",
"0.5287935",
"0.52774465",
"0.52663606",
"0.5220733",
"0.52171683",
"0.5216434",
"0.51812905",
"0.5178869",
"0.5174612",
"0.5169314",
"0.51658744"
] | 0.8529022 | 0 |
Transforms the context of a sighting using the command configured in `transform_context` sighting the sighting as it was reported by VAST transform_cmd The command to use to pipe sightings to. Treated | async def transform_context(sighting: Sighting, transform_cmd: str) -> Sighting:
context = (
sighting.x_threatbus_sighting_context
if ThreatBusSTIX2Constants.X_THREATBUS_SIGHTING_CONTEXT.value
in sighting.object_properties()
else None
)
if not context:
logger.error(
f"Cannot invoke `transform_context` command because no context data is found in the sighting {sighting}"
)
return
indicator = (
sighting.x_threatbus_indicator
if ThreatBusSTIX2Constants.X_THREATBUS_INDICATOR.value
in sighting.object_properties()
else None
)
if indicator:
_, ioc_value = split_object_path_and_value(indicator.pattern)
else:
# try to find the indicator value instead
ioc_value = (
sighting.x_threatbus_indicator_value
if ThreatBusSTIX2Constants.X_THREATBUS_INDICATOR_VALUE.value
in sighting.object_properties()
else None
)
if not ioc_value:
logger.error(
f"Cannot invoke `transform_context` command because no indicator value is found in the sighting {sighting}"
)
return
transformed_context_raw = await invoke_cmd_for_context(
transform_cmd, context, ioc_value
)
try:
transformed_context = json.loads(transformed_context_raw)
# recreate the sighting with the new transformed context
ser = json.loads(sighting.serialize())
ser[
ThreatBusSTIX2Constants.X_THREATBUS_SIGHTING_CONTEXT.value
] = transformed_context
return parse(json.dumps(ser), allow_custom=True)
except Exception as e:
logger.error(
f"Cannot parse transformed sighting context (expecting JSON): {transformed_context_raw}",
e,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform():\n pass",
"def transform():",
"def transform(config, data, transfo, *args, **kwargs):\n \n# stderr.write(str((config, data, transfo) + args) + \"\\n\")\n pipe = ktpipes.KtPipe.from_json(config[transfo])\n\n return pipe.fit_transform(get_raw(data))",
"async def invoke_cmd_for_context(cmd: str, context: dict, ioc: str = \"%ioc\"):\n if not ioc:\n ioc = \"%ioc\"\n cmd = cmd.replace(\"%ioc\", ioc)\n proc = await asyncio.create_subprocess_exec(\n *lexical_split(cmd),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n stdin=asyncio.subprocess.PIPE,\n )\n proc.stdin.write(json.dumps(context).encode())\n await proc.stdin.drain()\n proc.stdin.close()\n stdout, stderr = await proc.communicate()\n if stderr:\n logger.error(f\"Error while transforming sighting context: {stderr}\")\n return stdout",
"def transform(self, unit, transformation):\n command = 'transform {} {}'.format(unit, transformation)\n command = command.replace(r'[', '{{').replace(r']', '}}')\n command = command.replace('\\n', '').replace(' ', ' ')\n self.add_commands(command)",
"def _apply_transform(self):\n pass",
"def transform(self, X):\n return self.act_on(X)",
"def transform(self):",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, data, allow_timestamps=False):\n assert_is_type(data, H2OFrame)\n assert_is_type(allow_timestamps, bool)\n return H2OFrame._expr(ExprNode(\"mojo.pipeline.transform\", self.pipeline_id[0], data, allow_timestamps))",
"def transform(self, stdout):\n return stdout",
"def transform(self, node):\n return self.get_transform_func(node)(node)",
"def __transform(self):\n try:\n self.tokenized_document, self.stack = None, []\n\n InlineProcessor.initialize()\n LinkHelper.initialize()\n\n POGGER.debug(\"\\n\\n>>>>>>>parse_blocks_pass>>>>>>\")\n first_pass_results = self.__parse_blocks_pass()\n\n POGGER.debug(\"\\n\\n>>>>>>>coalesce_text_blocks>>>>>>\")\n coalesced_results = CoalesceProcessor.coalesce_text_blocks(\n first_pass_results\n )\n\n POGGER.debug(\"\\n\\n>>>>>>>parse_inline>>>>>>\")\n final_pass_results = InlineProcessor.parse_inline(coalesced_results)\n\n POGGER.debug(\"\\n\\n>>>>>>>final_pass_results>>>>>>\")\n return final_pass_results\n except Exception as this_exception:\n raise BadTokenizationError(\n \"An unhandled error occurred processing the document.\"\n ) from this_exception",
"def forward_transform(self):\n\n if self._pipeline:\n #return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])\n return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])\n else:\n return None",
"def transform(self, *fs):\n return transform(self, *fs)",
"def __call__(self):\n self.tree = etree.parse(self.src)\n\n agent = transformer_factory(self.tree, self.options)\n self.tree = agent.transform()\n\n # Write out the finished product\n file = self._targetFile()\n self.tree.write(file, pretty_print=False)\n print 'wrote transformed channel:', file.name",
"def transform(self, x):",
"def transform(self, x):\n return self._test_transform(x)",
"def _apply_transform(self, w2w_transform):\n raise NotImplementedError",
"def transform(self, X):\n return self.transformer.transform(X)",
"def _transform(self, document):\n pass",
"def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)",
"def ts_transform(self, transform):\n try:\n assert transform.lower().strip() in ['log10', 'box-cox']\n except AssertionError:\n self._uvts_cls_logger.error(\n \"transform should be in ['log10', 'box-cox'] or empty. Assuming no transform! \"\n \"Hence, if you get bad results, you would like maybe to choose e.g., log10 here.\")\n self._uvts_cls_logger.exception(\"Assertion exception occurred, transform\")\n self.transform = None\n else:\n self.transform = transform.lower()\n # transform\n if sum(self.ts_df['y'] > 0) < len(self.ts_df['y']):\n self._uvts_cls_logger.warning(\"Zero, negative, or both values present in your data. Transformation will not be used!\")\n return self\n if self.transform == 'log10':\n try:\n self.ts_df['y'] = self.ts_df['y'].apply(np.log10)\n except ValueError:\n self._uvts_cls_logger.exception(\"log10 transformation did not work! Possibly negative \"\n \"values present?\")\n elif self.transform == 'box-cox':\n if input(\"Do you want to provide lambda for box.cox? y/n?\").strip().lower() == 'y':\n self._boxcox_lmbda = float(input())\n else:\n self._boxcox_lmbda = None\n try:\n if self._boxcox_lmbda is None:\n bc, lmbda_1 = stats.boxcox(self.ts_df['y'], lmbda=self._boxcox_lmbda)\n self.ts_df['y'] = stats.boxcox(self.ts_df['y'], lmbda=lmbda_1)\n else:\n self.ts_df['y'] = stats.boxcox(self.ts_df['y'], lmbda=self._boxcox_lmbda)\n except ValueError:\n self._uvts_cls_logger.exception(\"box-cox transformation did not work! \"\n \"Possibly negative values present or bad lambda?\")\n return self",
"def process_context(command):\n global MOVEMENT\n global ACTION\n global FIGHT\n #global CURRENT_OPTIONS\n\n enemy = ENEMY_LIST[ZERO_BASE_PLYR_POS] # Check for enemies at the new location\n \n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"\n\n if enemy != 4:\n MOVEMENT = False\n ACTION = False\n FIGHT = True\n show_action(False, 10)\n show_movement(False, 10)\n show_special(False, 10)\n show_fight(True, 10)\n fight(enemy, command)\n\n elif MOVEMENT:\n MOVEMENT = False\n ACTION = True\n FIGHT = False\n show_fight(False, 10)\n show_movement(False, 10)\n show_special(False, 10)\n show_action(True, 10)\n\n else:\n MOVEMENT = True\n ACTION = False\n FIGHT = False\n show_fight(False, 10)\n show_action(False, 10)\n show_movement(True, 10)\n show_special(True, 10)\n\n #clear_messages(0)\n update_player_on_map()"
] | [
"0.62180775",
"0.6137687",
"0.5702457",
"0.5616719",
"0.5601462",
"0.5513075",
"0.5509026",
"0.5358405",
"0.53441906",
"0.53441906",
"0.53441906",
"0.53441906",
"0.53441906",
"0.53441906",
"0.53441906",
"0.53037167",
"0.5293181",
"0.5258532",
"0.5219467",
"0.516099",
"0.5137712",
"0.51317716",
"0.5125087",
"0.5118151",
"0.5113629",
"0.50990224",
"0.5091809",
"0.5082788",
"0.50622153",
"0.5055174"
] | 0.75897956 | 0 |
Predicate to check if `reply` is a dict and contains the keyvalue pair "status" = "success" reply A python dict True if the dict contains "status" = "success" | def reply_is_success(reply: dict):
return (
reply
and type(reply) is dict
and reply.get("status", None)
and reply["status"] == "success"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_success_response(resp: Response) -> bool:\n return \\\n resp and \\\n is_dict(resp) and \\\n resp.get(\"success\", False) is True",
"def validate_reply(request, reply):\n assert isinstance(reply, dict) and 'id' in reply\n assert ('result' in reply) != ('error' in reply)\n assert reply['id'] == request['id'] or \\\n reply['id'] == '00' and 'error' in reply",
"def is_success(msg):\n return msg['status'] == 'success'",
"def is_item_in_the_response(key, value, jsonResponse):\n flag = False\n for item in jsonResponse:\n if type(jsonResponse[item]) == int:\n if item == key and jsonResponse[item] == int(value):\n flag = True\n\n if type(jsonResponse[item]) == str:\n if item == key and jsonResponse[item] == str(value):\n flag = True\n\n if type(jsonResponse[item]) == bool:\n if item == key and jsonResponse[item] == bool(value):\n flag = True\n else:\n #log and error\n pass\n return flag",
"def json_status_ok(data_json, status_attr):\n status = data_json[status_attr]\n return status == 'OK'",
"def check_for_dict(check):",
"def isResp(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '76270-8'",
"def validate_post_response(response, status, job, keys=None):\n assert (response[\"status\"]) == status\n json_response = json.loads(response[\"body\"])\n if not keys:\n keys = list(job.keys())\n assert sorted(keys) == sorted(list(json_response.keys()))\n compare(json_response, job, keys)",
"def is_response_ok(cls, data):\n if data is None:\n cls.notify_widget.show_message(\"一个不好的消息\", \"网络出现一点问题\")\n return False\n\n if not isinstance(data, dict):\n return True\n\n if data['code'] == 200:\n return True\n\n cls.notify_widget.show_message(\"一个不好的消息\", \"网络出现一点问题\")\n return False",
"def reply(f, *args, **kwargs):\n r = f(*args, **kwargs)\n\n if r:\n if isinstance(r, dict):\n r['needs_reply'] = True\n elif isinstance(r, basestring):\n r = dict(answer=r, needs_reply=True)\n\n return r",
"def fusion_api_validate_response(self, respDict, valDict):\n success = True\n returnDict = {}\n keys = []\n for key in valDict:\n if not valDict[key]:\n continue\n # logger._log_to_console_and_log_file('key: %s' % (key))\n keyDict = {'key': key, 'expected': valDict[\n key], 'actual': respDict[key], 'success': True}\n if key in respDict:\n pattern = re.compile(str(valDict[key]))\n # if not re.search(str(valDict[key]), str(respDict[key])):\n # t = re.compile('(?i)Warning|Unknown|Terminated|Killed|Error|Completed')\n\n if not re.search(pattern, str(respDict[key])):\n\n success = False\n keyDict['success'] = False\n else:\n success = False\n keyDict['success'] = False\n keys.append(keyDict)\n\n returnDict['success'] = success\n returnDict['keys'] = keys\n return returnDict",
"def _is_successful(response) -> bool:\n return response.status_code == 200",
"def _validate_response(self, response):\n # Check for unexpected response - all should be JSON dicts that have\n # already been deserialised\n if not isinstance(response, types.DictionaryType):\n self.message(\n \"\\t\\t[!] ERROR - Unexpected value returned from the API: '%s'\" %\n (response))\n return False\n\n # Check for valid errors\n if \"error\" in response and \"msg\" in response:\n self.message(\n \"\\t\\t[!] ERROR - %s (%s)\" %\n (response[\"msg\"], response[\"timestamp\"]))\n return False\n\n # Is this a valid response message\n if \"msg\" in response:\n return True\n\n # Catch all...dictionary returned but does not contain expected keys?\n # Who know's what's going on here?!\n else:\n self.message(\n \"\\t\\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'\" %\n (response))\n return False",
"def is_data_response(resp: Response, data_field: str = None) -> bool:\n\n return \\\n resp and \\\n is_dict(resp) and \\\n is_success_response(resp) and \\\n resp.get(\"data\") is not None and \\\n (not data_field or data_field in resp.get(\"data\"))",
"def checkSelfReply(body):\n return 'WHAT IS MY PURPOSE' in body",
"def _check_reply(self):\n self._more_packets_available = False\n try:\n if self._reply is None:\n self._status = (3, '{} without reply'.format(\n REPLAY_INFO[unpack_dint(self._message[:2])]))\n return False\n # Get the type of command\n typ = unpack_uint(self._reply[:2])\n\n # Encapsulation status check\n if unpack_dint(self._reply[8:12]) != SUCCESS:\n self._status = (3, \"{0} reply status:{1}\".format(\n REPLAY_INFO[typ],\n SERVICE_STATUS[unpack_dint(self._reply[8:12])]))\n return False\n\n # Command Specific Status check\n if typ == unpack_uint(ENCAPSULATION_COMMAND[\"send_rr_data\"]):\n status = unpack_usint(self._reply[42:43])\n if status != SUCCESS:\n status_msg = \"send_rr_data reply:{0} - Extend status:{1}\"\n self._status = (3, status_msg.format(\n SERVICE_STATUS[status],\n get_extended_status(self._reply, 42)))\n return False\n else:\n return True\n return True\n except Exception as e:\n raise DataError(e)",
"def test_response_dict():\n\n obj = {\"key\": \"value\"}\n retval = apigateway.response(obj)\n\n assert retval[\"body\"] == json.dumps(obj)\n assert retval[\"statusCode\"] == 200",
"def checkResponseOK(response):\n assert response['result'] == 'OK'",
"def is_ok(r) -> bool:\n\tif r.status_code == 200:\n\t\treturn True",
"def match(self, result: dict):\n if self._matchStatus(result['Status']):\n if self._comparator['Length']:\n return self._matchLength(int(result['Length']))\n if self._comparator['Time']:\n return self._matchTime(result['Time Taken'])\n return True\n return False",
"def check_response(response):\n status = response.get('status')\n ret = status and status == 'OK'\n if not ret:\n logging.error('Received unexpected failure response from polyswarmd: %s', response)\n return ret",
"def action_success(self, resp):\n return resp[0] in SUCCESS_CODES",
"def action_success(self, resp):\n return resp[0] in SUCCESS_CODES",
"def check_response(rv):\n if rv != 'OK':\n print \"No message found\"\n return False\n return True",
"def validate_state(retval):\n success = True\n for data in retval.itervalues():\n for result in data.itervalues():\n if not result.get('result'):\n success = False\n break\n return success",
"def testReponse(question, reponse):\r\n if reponse == question[5]:\r\n return True\r\n else:\r\n return False",
"def assertJsonReplyEqual(self, ans, expected):\n self.assertTrue(ans.startswith(crud_handler.JSON_PREFIX))\n self.assertTrue(expected.startswith(crud_handler.JSON_PREFIX))\n\n ans = json.loads(ans[len(crud_handler.JSON_PREFIX):])\n expected = json.loads(expected[len(crud_handler.JSON_PREFIX):])\n self.assertSameStructure(ans, expected)",
"def status_check():\n return {\"status\": \"OK\"}",
"def check_reply(user):\n if not user.is_authenticated():\n return 'not_auth'\n\n return 'ok'",
"def check_status(response):\n if response.status_code == 200:\n return True"
] | [
"0.7250996",
"0.6504684",
"0.6264166",
"0.6229269",
"0.61349565",
"0.60097516",
"0.5948076",
"0.5893526",
"0.58669925",
"0.581646",
"0.5764716",
"0.56838953",
"0.5681368",
"0.566706",
"0.5639373",
"0.56248266",
"0.55777353",
"0.55689406",
"0.55523485",
"0.5548644",
"0.55417967",
"0.5527407",
"0.5527407",
"0.55140007",
"0.5505336",
"0.54702395",
"0.5463014",
"0.5450367",
"0.54477835",
"0.5446746"
] | 0.8270404 | 0 |
Unsubscribes this app from Threat Bus for the given topic. endpoint The ZMQ management endpoint of Threat Bus topic The topic to unsubscribe from timeout The period after which the connection attempt is aborted | def unsubscribe(endpoint: str, topic: str, timeout: int = 5):
global logger
logger.info(f"Unsubscribing from topic '{topic}' ...")
action = {"action": "unsubscribe", "topic": topic}
reply = send_manage_message(endpoint, action, timeout)
if not reply_is_success(reply):
logger.warning("Unsubscription failed")
return
logger.info("Unsubscription successful") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def unsubscribe(self, topic: str, subscription_id: int = None) -> None:\n ...",
"def unsubscribe(self, topic):\n request = protos.RequestUnsubscribe(topic=topic)\n return self.stub.unsubscribe(request)",
"def unsubscribe(self, user_token, topic):\n response = _request('DELETE',\n url=self.url_v1('/user/subscriptions/' + topic),\n user_agent=self.user_agent,\n user_token=user_token,\n )\n _raise_for_status(response)",
"def test_unsubscribe(self):\n dest = '/topic/dest'\n\n self.tm.subscribe(self.conn, dest)\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n self.assertEqual(len(self.conn.frames), 1)\n subscription = self.conn.frames[0].headers.pop(\"subscription\", None)\n self.assertEqual(subscription, 0)\n self.assertEqual(self.conn.frames[0], f)\n\n self.tm.unsubscribe(self.conn, dest)\n f = Frame(frames.MESSAGE, headers={'destination': dest}, body='Empty')\n self.tm.send(f)\n\n self.assertEqual(len(self.conn.frames), 1)",
"async def unsubscribe_topics(self) -> None:\n self._sub_state = await self._mqtt_client.unsubscribe(self._sub_state)",
"def unsubscribeTopic(self, topic:str|MQTTTopic) -> None:\n\t\tif isinstance(topic, MQTTTopic):\n\t\t\tif topic.topic not in self.subscribedTopics:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.WARNING, f'MQTT: unknown topic: {topic.topic}')\n\t\t\t\treturn\n\t\t\tif (r := self.mqttClient.unsubscribe(topic.topic))[0] == 0:\n\t\t\t\ttopic.mid = r[1]\n\t\t\telse:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot unsubscribe: {r[0]}')\n\t\t\t\treturn\n\n\t\telse:\t# if topic is just the name we need to subscribe to\n\t\t\tif topic not in self.subscribedTopics:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.WARNING, f'MQTT: unknown topic: {topic}')\n\t\t\t\treturn\n\t\t\tt = self.subscribedTopics[topic]\n\t\t\tif t.isSubscribed:\n\t\t\t\tif (r := self.mqttClient.unsubscribe(t.topic))[0] == 0:\n\t\t\t\t\tt.mid = r[1]\n\t\t\t\telse:\n\t\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot unsubscribe: {r[0]}')\n\t\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.WARNING, f'MQTT: topic not subscribed: {topic}')\n\n\t\t# topic is removed in _onUnsubscribe() callback",
"def _async_unsubscribe(self, topic: str) -> None:\n if self._is_active_subscription(topic):\n if self._max_qos[topic] == 0:\n return\n subs = self._matching_subscriptions(topic)\n self._max_qos[topic] = max(sub.qos for sub in subs)\n # Other subscriptions on topic remaining - don't unsubscribe.\n return\n if topic in self._max_qos:\n del self._max_qos[topic]\n if topic in self._pending_subscriptions:\n # Avoid any pending subscription to be executed\n del self._pending_subscriptions[topic]\n\n self._pending_unsubscribes.add(topic)\n self._unsubscribe_debouncer.async_schedule()",
"def onUnsubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount -= 1\n\t\treturn True",
"def deregister_event_topic(DirectoryId=None, TopicName=None):\n pass",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(receiver):",
"def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)",
"def unsubscribe(self, destination, *args, **kwargs):",
"def on_unsubscribe(self, mqtt_client, userdata, mid ):\n logging.debug(\"DEBUG - unsubscribe ack received\")",
"def desubscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = DeSubscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()",
"def unsubscribe_topic(self, topic_id: str) -> bool:\n result = self.__twitch.delete_eventsub_subscription(topic_id)\n if result:\n self.__callbacks.pop(topic_id, None)\n return result",
"def _onUnsubscribe(self, client:mqtt.Client, userdata:Any, mid:int) -> None:\n\t\t# TODO doc, error check when not connected, not subscribed\n\t\tfor t in self.subscribedTopics.values():\n\t\t\tif t.mid == mid:\n\t\t\t\tdel self.subscribedTopics[t.topic]\n\t\t\t\tself.messageHandler and self.messageHandler.onUnsubscribed(self, t.topic)\n\t\t\t\tbreak",
"def unsubscribe(self):\n if self._subscribed and self._connected:\n try:\n msg = self._create_message(strings.UNSUB_MSG)\n self.write(msg)\n except (OSError, KeyError) as ex:\n _LOGGER.error(\n \"PyISY encountered a socket error while writing unsubscribe message to the socket: %s.\",\n ex,\n )\n self._subscribed = False\n self.disconnect()",
"def subscribe_off(self, callback: callable):\n topic = f\"{self._subscriber_topic}_off\"\n subscribe_topic(callback, topic)",
"def unsubscribeFromEvent(eventName,subscriber):",
"def unsubscribe(self, namespace, unsub_strings=None):\n req = JSONRPCRequest('unsubscribe', [namespace, unsub_strings])\n result = yield self._send(req)\n self._cache_jsonrpc_request(req)\n raise tornado.gen.Return(result)",
"def unsubscribe(self, subject):\n pass",
"def subscribe_off(self, callback: callable):\n subscribe_topic(callback, self._off_subscriber_topic)",
"def stop_subscription(event):\n _LOGGER.info(\"Shutting down subscriptions\")\n hass.data[vera.VERA_CONTROLLER].stop()",
"def unsubscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/unsubscribe\", post_args=args)",
"def unsubscribe(self, subscription):\r\n params = {'ContentType' : 'JSON',\r\n 'SubscriptionArn' : subscription}\r\n response = self.make_request('Unsubscribe', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)",
"def unlisten(self, prefix: str) -> None:\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)"
] | [
"0.72822154",
"0.6926093",
"0.64411914",
"0.63961035",
"0.63747597",
"0.6357573",
"0.6245065",
"0.61568964",
"0.6072447",
"0.60447603",
"0.60447603",
"0.60447603",
"0.60447603",
"0.60447603",
"0.6013516",
"0.60046285",
"0.59764683",
"0.59599453",
"0.59538406",
"0.59074706",
"0.5841843",
"0.5797226",
"0.5788195",
"0.57852834",
"0.5783623",
"0.577339",
"0.57108",
"0.5705728",
"0.5674817",
"0.5672112"
] | 0.79936963 | 0 |
Sends heartbeats to Threat Bus periodically to check if the given p2p_topic is still valid at the Threat Bus host. Cancels all async tasks of this app when the heartbeat fails and stops the heartbeat. endpoint The ZMQ management endpoint of Threat Bus p2p_topic The topic string to include in the heartbeat timeout The period after which the connection attempt is aborted | async def heartbeat(endpoint: str, p2p_topic: str, interval: int = 5):
global logger
action = {"action": "heartbeat", "topic": p2p_topic}
while True:
reply = send_manage_message(endpoint, action, interval)
if not reply_is_success(reply):
logger.error("Subscription with Threat Bus host became invalid")
return await cancel_async_tasks()
await asyncio.sleep(interval) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_hb_off(self):\n await self.async_setup()\n off = TopicItem(\n self._off_topic,\n {\n \"cmd1\": 0x13,\n \"cmd2\": 0x00,\n \"target\": Address(\"000004\"),\n \"user_data\": None,\n \"hops_left\": 3,\n },\n 0.05,\n )\n send_topics([off])\n await asyncio.sleep(0.1)\n assert not self._heartbeat\n assert not self._heartbeat_off\n assert self._heartbeat_on is None",
"def heartbeat():\n\n timestamp = format_time(get_time())\n\n if Config.use_broker:\n bmessage = json.dumps({'event_type': 'heartbeat', '@timestamp': timestamp, 'honeypotID': ID})\n BrokerEndpoint.BrokerEndpoint.sendLogs(bmessage)\n\n message = ('{} - [Heartbeat]'.format(timestamp))\n _log_status(message)",
"async def test_hb_on(self):\n await self.async_setup()\n on = TopicItem(\n self._on_topic,\n {\n \"cmd1\": 0x11,\n \"cmd2\": 0xFF,\n \"target\": Address(\"000004\"),\n \"user_data\": None,\n \"hops_left\": 3,\n },\n 0.05,\n )\n send_topics([on])\n await asyncio.sleep(0.1)\n assert not self._heartbeat\n assert self._heartbeat_on\n assert self._heartbeat_off is None",
"def test_uptimerobot_monitor_up(self) -> None:\n expected_topic = \"Mail Server\"\n expected_message = \"\"\"\nMail Server (server2.example.com) is back UP (Host Is Reachable).\nIt was down for 44 minutes and 37 seconds.\n\"\"\".strip()\n self.check_webhook(\"uptimerobot_monitor_up\", expected_topic, expected_message)",
"def broker_null(self, data):\n\n print(\"Heartbeat\")\n #TODO: Reset heartbeat timer or something like that",
"def heartbeat():\n try:\n from django_q.models import Success\n except AppRegistryNotReady: # pragma: no cover\n logger.info(\"Could not perform heartbeat task - App registry not ready\")\n return\n\n threshold = timezone.now() - timedelta(minutes=30)\n\n # Delete heartbeat results more than half an hour old,\n # otherwise they just create extra noise\n heartbeats = Success.objects.filter(\n func='InvenTree.tasks.heartbeat',\n started__lte=threshold\n )\n\n heartbeats.delete()",
"async def send_heartbeat_message(self, *args, **kwargs):\n msg = \"ping\"\n await self._ws.send(msg)",
"def _verify_api_heartbeat(retry=True):\n url = 'http://{0}/heartbeat'.format(env.host_string)\n try:\n resp = urllib2.urlopen(url)\n status_code = resp.getcode()\n except urllib2.HTTPError as error:\n print '[{0}] Error while testing API: {1}'.format(env.host_string,\n error)\n print '[{0}] \\t Received: {1}'.format(env.host_string, error.read())\n status_code = error.getcode()\n\n if status_code == 200:\n print '[{0}] API Test Succesful!'.format(env.host_string)\n return\n\n if not retry:\n fabric.utils.abort('Host: {0} API is not functioning properly'\n .format(env.host_string))\n else:\n print '[{0}] Retrying heartbeat in 2 seconds...' \\\n .format(env.host_string)\n time.sleep(2)\n _verify_api_heartbeat(retry=False)",
"def start_heartbeat(self):\n\n async def heartbeat_loop():\n \"\"\"Continuously send heart beat updates.\"\"\"\n self.debug(\"Started heartbeat loop\")\n while True:\n try:\n await self.heartbeat()\n await asyncio.sleep(HEARTBEAT_INTERVAL)\n except asyncio.CancelledError:\n self.debug(\"Stopped heartbeat loop\")\n raise\n except asyncio.TimeoutError:\n self.debug(\"Heartbeat failed due to timeout, disconnecting\")\n break\n except Exception as ex: # pylint: disable=broad-except\n self.exception(\"Heartbeat failed (%s), disconnecting\", ex)\n break\n\n transport = self.transport\n self.transport = None\n transport.close()\n\n self.heartbeater = self.loop.create_task(heartbeat_loop())",
"def do_heartbeat(self):\n try:\n self.api.heartbeat(\n uuid=self.agent.get_node_uuid(),\n advertise_address=self.agent.advertise_address\n )\n self.error_delay = self.initial_delay\n self.log.info('heartbeat successful')\n except Exception:\n self.log.exception('error sending heartbeat')\n self.error_delay = min(self.error_delay * self.backoff_factor,\n self.max_delay)",
"def pingEsp(self):\n\t\twhile True:\n\t\t\tprint (\"[{}] Keeping alive the ESP8266 connection\".format(\n\t\t\t\tint(time.time()),\n\t\t\t))\n\t\t\tmqttCli.publish(\"ping\", mqttJsonDump('void'))\n\t\t\ttime.sleep(30)",
"def test_uptimerobot_monitor_down(self) -> None:\n expected_topic = \"Web Server\"\n expected_message = \"Web Server (server1.example.com) is DOWN (Host Is Unreachable).\"\n self.check_webhook(\"uptimerobot_monitor_down\", expected_topic, expected_message)",
"def _send_heart_beat(self):\n if self._ws is not None:\n self._ws.write_message('PING')\n else:\n self._logger.debug('Attempting to send a PING over a closed websocket!')",
"async def _heart_beat(self):\n while self._state == const.STATE_CONNECTED:\n old_last_activity = self._last_activity\n last_activity = datetime.utcnow() - self._last_activity\n threshold = timedelta(seconds=self._heart_beat_interval)\n if last_activity > threshold:\n self._heartbeat_command()\n await asyncio.sleep(5)\n if self._last_activity <= old_last_activity:\n await self._handle_connection_error()\n await asyncio.sleep(self._heart_beat_interval / 2)",
"def check_heartbeat(self, hostname: str, current_time, max_interval: int) -> bool:\n with self.lock:\n try:\n host = Query()\n record = self.hosts.search(host.hostname == hostname)[0]\n\n if record:\n latest_update = datetime.strptime(record['latest_recv'], self.time_format)\n difference = current_time - latest_update\n\n return int(difference.total_seconds()) <= max_interval\n\n except Exception as err:\n raise HeartbeatError('Cannot check heartbeat of host with hostname={}'.format(hostname), err)",
"def heartbeat(p_average_time,\n p_message_queue,\n p_num_tasks,\n p_continue_consistency,\n interval = 0.5,\n timeout = 20, \n VERBOSE = False,\n worker_num = 0):\n\n # get average time\n with p_average_time.get_lock():\n avg_time = p_average_time.value\n #get num_tasks\n with p_num_tasks.get_lock():\n num_tasks = p_num_tasks.value\n \n # get initial values before entering loop\n last_wait = avg_time* (num_tasks+1)\n prev_time = time.time()\n \n while prev_time + 2*timeout > time.time():\n # get average time\n with p_average_time.get_lock():\n avg_time = p_average_time.value\n #get num_tasks\n with p_num_tasks.get_lock():\n num_tasks = p_num_tasks.value\n \n cur_wait = avg_time * (num_tasks+1)\n \n # send message\n message = (\"heartbeat\",(time.time(),cur_wait,worker_num))\n p_message_queue.put(message)\n \n if VERBOSE: print(\"w{}: Heartbeat thread added '{:.2f}s' to message queue.\".format(\n worker_num,cur_wait))\n \n # updates prev_time whenever wait time changes\n if cur_wait != last_wait:\n prev_time = time.time() \n last_wait = cur_wait\n \n # a bit imprecise since the other operations in the loop do take some time\n time.sleep(interval)\n \n # tell consistency thread to exit\n with p_continue_consistency.get_lock():\n p_continue_consistency.value = False\n \n # send message to monitor process indicating worker is shutting down gracefully\n message = (\"shutdown\",(worker_num, time.time()))\n \n print(\"w{}: Heartbeat thread exited.\".format(worker_num))",
"async def heartbeat_loop():\n self.debug(\"Started heartbeat loop\")\n while True:\n try:\n await self.heartbeat()\n await asyncio.sleep(HEARTBEAT_INTERVAL)\n except asyncio.CancelledError:\n self.debug(\"Stopped heartbeat loop\")\n raise\n except asyncio.TimeoutError:\n self.debug(\"Heartbeat failed due to timeout, disconnecting\")\n break\n except Exception as ex: # pylint: disable=broad-except\n self.exception(\"Heartbeat failed (%s), disconnecting\", ex)\n break\n\n transport = self.transport\n self.transport = None\n transport.close()",
"def check_heartbeat(self):\n return True",
"def _send_heartbeat(self):\n if time.time() > self.__heartbeat + self.__hb_interval:\n self.__pipe.send({\"command\": \"heartbeat\"})\n self.__heartbeat = time.time()",
"def test_heartbeat(self):\n pass",
"def check_ping(self):\n # If we're still connecting, deny the connection\n if self.state == self.STATE_CONNECTING:\n if self.duration() > self.main_factory.websocket_connect_timeout:\n self.serverReject()\n elif self.state == self.STATE_OPEN:\n if (time.time() - self.last_data) > self.main_factory.ping_interval:\n self._sendAutoPing()\n self.last_data = time.time()",
"async def test_no_hb(self):\n await self.async_setup()\n pyinsteon.managers.heartbeat_manager.HB_CHECK_BUFFER = 1\n self._hb_mgr = pyinsteon.managers.heartbeat_manager.HeartbeatManager(\n self._address, self._group, 0\n )\n await asyncio.sleep(1.1)\n assert self._heartbeat",
"def _check_endpoints_last_communication(self):\n while self._thread_running:\n endpoints = EndpointService.get_all_endpoints()\n for endpoint in endpoints:\n if datetime.now() - endpoint.lastCommunication > timedelta(minutes=TOKEN_EXPIRE_TIME):\n EventService.add_event(Event(\"0\", \"Lost Endpoint At \" +\n str(endpoint[EndpointKeys.LAST_COMMUNICATION_KEY]),\n \"Report\", \"IDLE\", endpoint[EndpointKeys.HOSTNAME_KEY],\n endpoint[EndpointKeys.IP_ADDRESS_KEY]))\n EndpointService.delete_endpoint(endpoint.id)\n sleep(SLEEP_TIME)",
"def online_check(self):\n self.online = False\n online_topic = '{t_topic}/INFO2'.format(**self)\n print('{BLUE}Watching for {}{NC}'.format(online_topic, **colors))\n try:\n self.mqtt.connect(self.mqtt_host)\n except Exception:\n print('MQTT broker not online')\n return False\n\n self.mqtt.message_callback_add(online_topic, lambda *args: \\\n setattr(self, 'online', True))\n self.mqtt.subscribe(online_topic)\n startTime = dt.datetime.now()\n while not self.online and not too_old(startTime, wait_time):\n self.mqtt.loop(timeout=loop_time)\n time_waited = (dt.datetime.now() - startTime).total_seconds()\n # If we did not see device publish INFO2, sometimes platformio causes\n # a delay by checking for updates and we miss seeing this message.\n # To check for that case, query the device for its build timestamp and\n # check if it was built in the last couple minutes.\n if not self.online:\n self.query_tas_status()\n if 'build_time' in self.reported:\n build_time = dt.datetime.strptime(self.reported['build_time'],\n '%Y-%m-%dT%H:%M:%S')\n if dt.datetime.now() - build_time < dt.timedelta(minutes=2):\n self.online = True\n\n if not self.online:\n print('{RED}{f_name} did not come online within {wait_time} '\n 'seconds{NC}'.format(f_name=self.f_name,\n wait_time=str(wait_time),\n **colors))\n elif self.online:\n print('{GREEN}{f_name} came online in {time_waited} '\n 'seconds{NC}'.format(f_name=self.f_name,\n time_waited=time_waited,\n **colors))\n self.mqtt.unsubscribe(online_topic)\n self.mqtt.message_callback_remove(online_topic)\n self.mqtt.disconnect()\n return self.online",
"def test_process_packet_heartbeat(self):\n\n pkt = {'type': 'heartbeat',\n 'endpoint': ''\n }\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called",
"def startHeartbeat(self):\n self.post.__sendHeartbeat()",
"def poll_device(self):\n #self.logger.info(\"poll_device: Checking online status\")\n for tasmota_topic in self.tasmota_devices:\n if self.tasmota_devices[tasmota_topic].get('online', None) is not None:\n if self.tasmota_devices[tasmota_topic]['online_timeout'] < datetime.now():\n self.tasmota_devices[tasmota_topic]['online'] = False\n self.set_item_value(tasmota_topic, 'item_online', False, 'poll_device')\n self.logger.info(f\"poll_device: {tasmota_topic} is not online any more - online_timeout={self.tasmota_devices[tasmota_topic]['online_timeout']}, now={datetime.now()}\")",
"def __heart(self):\n\n while not self.__stop_threads:\n start_time = time.time()\n self.__pulse_all()\n # print(f\"total pulse time = {time()-start_time} seconds\")\n sleep(BusController.PULSE_DELAY)\n print(\"stopped the heartbeats\")",
"def update_host_heartbeat(self, hostname: str) -> bool:\n with self.lock:\n try:\n host = Query()\n self.hosts.update({'latest_recv': datetime.now().strftime(self.time_format)},\n host.hostname.matches(hostname))\n return True\n except Exception as err:\n raise UpdateError('Cannot update latest_recv of host with hostname={}'.format(hostname), err)",
"def periodic_maintainence(self):\n if not hasattr(self,'peer_servers'):\n return\n for key in self.peer_servers.keys():\n if DEBUG: print self.peer_servers[key].last_broadcast_time\n if self.peer_servers[key].is_open_connection == False:\n continue\n if (time.time() - self.peer_servers[key].last_broadcast_time) > 30:\n if DEBUG: print (\"Shutting down inactive peer_server:\",\n self.peer_servers[key].name,\n self.peer_servers[key].server_id,\n 'Last Broadcast time:',\n str(time.time() -float(self.peer_servers[key].last_broadcast_time)),\n 'seconds ago')\n self.peer_servers[key].close()\n #del self.peer_servers[key]\n return # temporary disable\n self.__periodic_maintenance__()"
] | [
"0.5968126",
"0.5751398",
"0.5532078",
"0.53880507",
"0.5279109",
"0.52726096",
"0.5246624",
"0.5200716",
"0.519192",
"0.5134135",
"0.51318514",
"0.5131482",
"0.51096624",
"0.49996907",
"0.4969726",
"0.49536985",
"0.49502018",
"0.49493974",
"0.4914249",
"0.48973852",
"0.4865458",
"0.48480332",
"0.4802582",
"0.47970948",
"0.47589824",
"0.472009",
"0.47118264",
"0.47018504",
"0.46835634",
"0.46601075"
] | 0.7803305 | 0 |
round_hours(datetime, resolutionInHours) => datetime rounded to lower interval Works for hour resolution up to a day (e.g. cannot round to nearest week). | def round_hours(dt, resolutionInHours):
from datetime import datetime, timedelta
# First zero out minutes, seconds and micros
dtTrunc = dt.replace(minute=0,second=0, microsecond=0)
# Figure out how many minutes we are past the last interval
excessHours = (dtTrunc.hour) % resolutionInHours
# Subtract off the excess minutes to get the last interval
return dtTrunc + timedelta(hours=-excessHours) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _round_time(dt=None, round_to=60):\n if hasattr(dt, 'tzinfo'):\n dt.replace(tzinfo=None)\n diff = dt - dt.replace(hour=0, minute=0, second=0, microsecond=0)\n seconds = diff.seconds\n rounding = (seconds + round_to / 2) // round_to * round_to\n return dt + datetime.timedelta(0, rounding-seconds, -dt.microsecond)",
"def roundTime(dt=None, roundTo=60):\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt - dt.min).seconds\n # // is a floor division, not a comment on following line:\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)",
"def roundTime(dt=None, roundTo=60):\n\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt.replace(tzinfo=None) - dt.min).seconds\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n return dt + timedelta(0,rounding-seconds,-dt.microsecond)",
"def roundTime(dt=None, roundTo=60):\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt - dt.min).seconds\n # // is a floor division, not a comment on following line:\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)\n dt.replace(second=0, microsecond=0)\n return dt",
"def round_time(dt, roundTo=60):\n seconds = (dt.replace(tzinfo=None) - dt.min).seconds\n rounding = (seconds + roundTo/2) // roundTo * roundTo\n return dt + timedelta(0, rounding - seconds, -dt.microsecond)",
"def round_time(dt=None, round_to=60):\n if dt == None : dt = datetime.now()\n seconds = (dt.replace(tzinfo=None) - dt.min).seconds\n rounding = (seconds+round_to/2) // round_to * round_to\n return dt + timedelta(0,rounding-seconds,-dt.microsecond)",
"def roundTime(dateTimeString):\n\n dateTimeObj = datetime.strptime(dateTimeString, '%d/%m/%Y %H:%M:%S')\n\n # round to nearest 20 minute\n discard = timedelta(minutes=dateTimeObj.minute % 20)\n dateTimeObj -= discard\n if discard > timedelta(minutes=10):\n dateTimeObj += timedelta(minutes=20)\n\n result = dateTimeObj.strftime('%H:%M')\n\n return result",
"def roundTime(dateTimeString):\n\n dateTimeObj = datetime.strptime(dateTimeString, '%d/%m/%Y %H:%M:%S')\n\n # round to nearest 20 minute\n discard = timedelta(minutes=dateTimeObj.minute % 20)\n dateTimeObj -= discard\n if discard > timedelta(minutes=10):\n dateTimeObj += timedelta(minutes=20)\n\n result = dateTimeObj.strftime('%H:%M')\n\n return result",
"def roundTime(dt=None, roundTo=1):\n if dt == None : dt = datetime.now()\n seconds = total_seconds(dt - dt.min)\n # // is a floor division, not a comment on following line:\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n return dt + timedelta(0,rounding-seconds,-dt.microsecond)",
"def round_utc_hour_up(dateString):\n date_object = datetime.strptime(dateString, \"%Y-%m-%d %H:%M:%S\")\n newHour = (date_object.hour + 1) % 24\n date_object = date_object.replace(hour=newHour)\n return date_object.strftime(\"%Y-%m-%d %H:00:00\")",
"def round_time(dt=None, roundTo=60): # IGNORE:W0621\n\n if dt is None:\n dt = datetime.now()\n\n dt = np.asarray(dt, dtype='datetime64[s]').reshape(-1)\n\n for li in range(len(dt)):\n date = dt[li].astype(object)\n seconds = (date - date.min).seconds\n\n # // is a floor division, not a comment on following line:\n rounding = (seconds + roundTo / 2) // roundTo * roundTo\n\n dt[li] = date + timedelta(0, rounding - seconds, -date.microsecond)\n\n return len(dt) == 1 and dt[0].astype(object) or dt",
"def round_time(self, time):\n hour, mins, _ = time.split(\":\")\n return '{:02d}:00:00'.format(int(hour)+1 ) if int(mins) >= 30 else '{:02d}:00:00'.format(int(hour))",
"def round_time(time):\n t_min = time.minute % 5\n t_sec = time.second\n t_mic = time.microsecond\n time = time - timedelta(minutes=t_min, seconds=t_sec, microseconds=t_mic)\n return time",
"def round_datetime(dt, freq):\n \n freq_list = ['YEARLY', 'MONTHLY', 'WEEKLY', \n 'DAILY', 'HOURLY', 'MINUTELY', \n\t\t 'SECONDLY']\n \n assert isinstance(dt, datetime.datetime)\n assert freq in freq_list \n \n year = dt.year\n month = dt.month if eval('dates.'+freq) >= dates.MONTHLY else 1\n day = dt.day if eval('dates.'+freq) >= dates.WEEKLY else 1\n hour = dt.hour if eval('dates.'+freq) >= dates.HOURLY else 0\n minute = dt.minute if eval('dates.'+freq) >= dates.MINUTELY else 0\n second = dt.second if eval('dates.'+freq) >= dates.SECONDLY else 0\n \n return datetime.datetime(year, month, day, hour, minute, second)",
"def round_up_to_quarter_hour(self, dt: datetime) -> str:\n delta = timedelta(minutes=15)\n # Round time backwards to the hour\n rounded_hour = dt.replace(minute=0, second=0, microsecond=0)\n rounded_qtr_hour = rounded_hour + ceil((dt - rounded_hour) / delta) * delta\n return self.date_to_intermediate_time_str(rounded_qtr_hour)",
"def datetime_round(dt, period, start=None):\n result = datetime_mod(dt, period, start)\n if abs(dt - result) >= period // 2:\n result += period\n return result",
"def roundTime(dt=None, dateDelta=datetime.timedelta(minutes=1)):\n roundTo = dateDelta.total_seconds()\n\n if dt == None : dt = datetime.datetime.now()\n seconds = (dt - dt.min).seconds\n # // is a floor division, not a comment on following line:\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n\n return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)",
"def hours_on(series, on_power_threshold=DEFAULT_ON_POWER_THRESHOLD):\n\n i_above_threshold = np.where(series[:-1] >= on_power_threshold)[0]\n # now calculate timedelta ('td') above threshold...\n td_above_thresh = (series.index[i_above_threshold + 1].values -\n series.index[i_above_threshold].values)\n secs_on = timedelta64_to_secs(td_above_thresh.sum())\n return secs_on / SEC_PER_HOUR",
"def seconds2hours(time_in_seconds):\n seconds_since_midnight = np.mod(time_in_seconds, SECONDS_PER_DAY)\n fraction_hour = seconds_since_midnight/SECONDS_PER_HOUR\n if fraction_hour[-1] == 0:\n fraction_hour[-1] = 24\n return fraction_hour",
"def round_minute(time, round_to):\n rounded = time + datetime.timedelta(minutes=round_to/2.)\n rounded -= datetime.timedelta(minutes=rounded.minute % round_to, \n seconds=rounded.second, \n microseconds=rounded.microsecond)\n return rounded",
"def closest_half(iso_datetime):\n d_time = datetime.fromisoformat(iso_datetime)\n approx = round(d_time.minute / 30.0) * 30\n d_time = d_time.replace(minute=0)\n d_time += timedelta(seconds=approx * 60)\n d_time = d_time.replace(second=0)\n return d_time.isoformat()",
"def round_to_nearest_60(x):\r\n return int(60 * round(float(x) / 60))",
"def get_datetime(hours):\n return datetime.datetime.utcfromtimestamp(hours * 60 * 60)",
"def round_minutes(minutes):\n i = math.floor(minutes / 15)\n under, over = i * 15, (i + 1) * 15\n d1, d2 = abs(minutes - under), abs(minutes - over)\n # Return the increment closest to the original value.\n return over if d2 <= d1 else under",
"def hours_in(sec):\r\n return int(sec//3600)",
"def calculate_seconds_in_hours(hours):\n return int(hours * 3600)",
"def round_time(dt=None, date_delta=datetime.timedelta(minutes=1), to='closest'):\n round_to = date_delta.total_seconds()\n\n if dt is None:\n dt = datetime.datetime.utcnow()\n\n seconds = (dt - dt.min).seconds\n\n if to == 'up':\n rounding = (seconds + round_to) // round_to * round_to\n elif to == 'down':\n rounding = seconds // round_to * round_to\n elif to == 'closest':\n rounding = (seconds + round_to / 2) // round_to * round_to\n else:\n raise ValueError(\n 'Expected `to` to be one of: up, down, closest')\n\n return dt + datetime.timedelta(\n seconds=rounding - seconds,\n microseconds=-dt.microsecond)",
"def build_convert_to_hours(time_units):\n if time_units not in VALID_TIME_UNITS:\n raise ValueError('Time units must be one of', VALID_TIME_UNITS)\n \n if time_units == 'min':\n return lambda x: x/60\n elif time_units == 'h':\n return lambda x: x",
"def get_time_round(date):\r\n return int(date / self.timeframe) * self.timeframe",
"def get_hours_of_interest(current_time, hours=None, add_weekend_hour=True):\n\n if hours is None: # pragma: no cover\n hours = [8, 12, 18]\n else:\n hours = list(hours)\n\n current_hour = current_time.hour\n\n if add_weekend_hour and current_time.weekday() in [4, 5]:\n hours.append(22)\n\n hours_of_interest = []\n\n hours = sorted(hours)\n for n in range(len(hours)):\n if current_hour + 1 < hours[n]:\n hours_of_interest = hours[n:]\n break\n\n logging.debug('Hours of interest: %s', hours_of_interest)\n return hours_of_interest"
] | [
"0.62496",
"0.62407213",
"0.61740667",
"0.6116412",
"0.60953623",
"0.59740126",
"0.5945597",
"0.5945597",
"0.58779293",
"0.58567286",
"0.57889855",
"0.5739316",
"0.56454057",
"0.5618119",
"0.5568635",
"0.5413379",
"0.53959805",
"0.5384869",
"0.5372498",
"0.53567505",
"0.52811795",
"0.51637995",
"0.5145783",
"0.5133272",
"0.5090272",
"0.5068472",
"0.5042526",
"0.5033449",
"0.5025154",
"0.50237757"
] | 0.86196357 | 0 |
Calculates the significance for an A/B test. | def significance(size_a, successes_a, size_b, successes_b):
# Raising an error if the condition of size_sample > successes is not met.
if size_a < successes_a or size_b < successes_b:
raise ValueError('The size numbers must be greater than the number of successes for an '
'experiment')
p_a = successes_a / size_a
p_b = successes_b / size_b
se_a = standard_error(size_a, successes_a)
se_b = standard_error(size_b, successes_b)
numerator = (p_b - p_a)
denominator = (se_a ** 2 + se_b ** 2) ** 0.5
return norm.sf(abs(numerator / denominator)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fmeasure(B, hits, misses, falses) :\r\n x = ((1 + B**2) * hits) / ((1 + B**2) * hits + B**2 * misses + falses)\r\n return x",
"def simulate_significance(self):\n observed_difference = self.p_treatment - self.p_control\n\n try: # check to see if there's an array in self.binom_null\n len(self.binom_null)\n differences = self.binom_null\n except:\n self.binom_distribution()\n differences = self.binom_null\n\n p = (differences >= observed_difference).mean()\n self.p_value = p\n\n return p",
"def fmeasure(B, hits, misses, falses) :\n x = ((1 + B**2) * hits) / ((1 + B**2) * hits + B**2 * misses + falses)\n return x",
"def test_b_grade_exact(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.33)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')",
"def consistancy_test(a, b, aErr, bErr = 0):#TODO: fully test this aproach\n return int(np.ceil(np.abs(b - a) / np.sqrt(bErr**2 + aErr**2)))",
"def expected(A, B):\n return 1 / (1 + 10 ** ((B - A) / 150))",
"def calc_significance(data_subset, data_all, standard_name):\n\n from statsmodels.tsa.stattools import acf\n\n # Data must be three dimensional, with time first\n assert len(data_subset.shape) == 3, \"Input data must be 3 dimensional\"\n \n # Define autocorrelation function\n n = data_subset.shape[0]\n autocorr_func = numpy.apply_along_axis(acf, 0, data_subset, nlags=n - 2)\n \n # Calculate effective sample size (formula from Zieba2010, eq 12)\n k = numpy.arange(1, n - 1)\n \n r_k_sum = ((n - k[:, None, None]) / float(n)) * autocorr_func[1:] \n n_eff = float(n) / (1 + 2 * r_k_sum.sum(axis=0))\n \n # Calculate significance\n var_x = data_subset.var(axis=0) / n_eff\n tvals = (data_subset.mean(axis=0) - data_all.mean(axis=0)) / numpy.sqrt(var_x)\n pvals = stats.t.sf(numpy.abs(tvals), n - 1) * 2 # two-sided pvalue = Prob(abs(t)>tt)\n\n notes = \"One sample t-test, with sample size adjusted for autocorrelation (Zieba2010, eq 12)\" \n pval_atts = {'standard_name': standard_name,\n 'long_name': standard_name,\n 'units': ' ',\n 'notes': notes,}\n\n return pvals, pval_atts",
"def test_bisection_system(testFunctions,tol, printFlag):\n pass",
"def print_significance(result_collector, conditionA, conditionB, levels=None):\n print(\n \"\\n# Testing significance of difference: {} vs. {}\".format(\n conditionA, conditionB\n )\n )\n result_collector.set_metric([\"macro_avg\", \"fscore\"])\n print(\"level\\tp_value\")\n for level in levels or []:\n _, pvalue = result_collector.wilcoxon(conditionA, conditionB, level)\n print(\"{}\\t{:.5f}\".format(level, pvalue))\n\n level = \"lat\"\n result_collector.set_metric([\"accuracy\"])\n _, pvalue = result_collector.wilcoxon(conditionA, conditionB, level)\n print(\"{}\\t{:.5f}\".format(level, pvalue))",
"def poisson_significance(s,b):\n return np.sqrt(2*((s+b)*np.log(1+s/b)-s))",
"def significance(self):\n \n Z = -1*stats.norm.ppf(0.5*self.p_value)\n print(\"Z = {:e} sigma\".format(Z))",
"def calculateStatisticalSignificance():\n ##tau HCS pearson\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y = np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[1], s2=stds[1], n2=17280)\n print(\"stats for HCS pearson, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[2], s2=stds[2], n2=17280)\n print(\"stats for HCS pearson, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n \n ##tau HCS MSE\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[1], s1=stds[1], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[2], s1=stds[2], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated pearon\n ##this one is a bit more involved because we have individual means and STDs over a 3-fold cross-val\n ##we have the following for the ablated ML model (sample size, avg pearson, std), one for each fold:\n # (108330 0.7498484453029202 0.12794946936625312)\n # (108330 0.7507672277328549 0.12978897185198424) \n # (108330 0.7512250395547646 0.12858723725044444)\n ##combining to one sample we have mean = .7506, std=.1288\n ##and the following for the Null Model\n #(108330 0.3951239419846807 0.13861514301358197)\n #(108330 0.39522112186984787 0.1387019314192389)\n #(108330 0.3956142180066648 0.13832544923711507)\n ##combining this into one sample, we have: mean = 0.3953, std = .1385\n z, p = calculateZScoreAndPValue(m1=.7506, s1=.1288, n1=108330*3, m2=.3953, s2=.1385, n2=108330*3)\n print(\"stats for osteosarcoma ablated pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated MSE\n ##ML model performance:\n # (108330 0.5003031 0.25589895)\n # (108330 0.4984656 0.25957793)\n # (108330 0.49754992 0.2571745)\n ##combining to one sample we have mean = 0.4988 , std= .2576\n ##Null Model performance:\n # (108330 1.209752 0.2772303)\n # (108330 1.2095579 0.27740386)\n # (108330 1.2087716 0.27665088)\n ##combining to one sample we have mean = 1.2094 , std= 0.2771\n z, p = calculateZScoreAndPValue(m1=1.2094, s1=.2771, n1=108330*3, m2=.4988, s2=.2576, n2=108330*3)\n print(\"stats for osteosarcoma ablated MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw pearson \n ##ML model performance:\n #(108330 0.8487535502148598, 0.0750789260880985)\n #(108330 0.8482422038817274, 0.0749674444367002)\n # (108330 0.8500693686258434, 0.07491226209365953)\n ##combining to one sample we have mean = .849 , std= 0.075\n ##Null model performance:\n #(108330 0.44372635525546694, 0.11585072713296693)\n #(108330 0.4440357996615424, 0.11573081667714848)\n # (108330 0.4443288449364213, 0.11528081384708891)\n ##combining to one sample we have mean = 0.444 , std= 0.1156\n z, p = calculateZScoreAndPValue(m1=.849, s1=0.075, n1=108330*3, m2=0.444, s2=0.1156, n2=108330*3)\n print(\"stats for osteosarcoma raw pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw MSE\n ##ML model performance:\n #(108330 0.3024929, 0.15015785)\n #(108330 0.3035156, 0.1499349)\n # (108330 0.29986125, 0.14982451)\n ##combining to one sample we have mean = 0.302 , std= 0.15\n ##Null model performance\n # (108330 1.1125473, 0.23170146)\n # (108330 1.1119285, 0.23146166)\n # (108330 1.1113423, 0.23056163)\n ##combining to one sample we have mean = 1.1119 , std= 0.2312\n z, p = calculateZScoreAndPValue(m1=1.1119, s1=0.2312, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for osteosarcoma raw MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated pearson\n z, p = calculateZScoreAndPValue(m1=0.849, s1=0.075, n1=108330*3, m2=0.7506, s2=0.1288, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated pearson: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated MSE\n z, p = calculateZScoreAndPValue(m1=.4988, s1=.2576, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated MSE: z: {}, p: {}\".format(z, p))",
"def grade(self) -> float:\n assert self._results, \"Tests have not been run\"\n return sum(\n weight\n for test, weight in self.test_weights.items()\n if self._results[test].wasSuccessful()\n )",
"def calculateSignificance(state_probs, num, num_rpl, num_cnd, num_exp):\n ps = np.array(state_probs)\n num_state = len(state_probs)\n # Binomial tail probabilities for each state\n p_ks = 1 - binom.cdf(num - 1, num_rpl, ps)\n # Calculate the probability of obtaining at least n\n # replications for each state\n p_tilde_func = lambda n: np.prod([1 - p_ks[m]\n for m in range(num_state) if m != n])\n p_tilde = sum([p_ks[n]*p_tilde_func(n) for n in range(num_state)])\n # Probability that the foregoing happens for each condition\n # in the study variation (plot)\n q_tilde = p_tilde**num_cnd\n # Probability that this happens at least once within the studies\n q = 1 - (1 - q_tilde)**num_exp\n return q",
"def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')",
"def test_intra_power_law_fit(self):\n\n\t\tprint(type(self.fc_layers[0:2]), self.fc_layers[0:2])\n\t\tdetails= self.watcher.analyze(layers=self.fc_layers[0:2], intra=True, randomize=False, vectors=False, pl_package=POWERLAW_PACKAGE, xmax=XMAX_FORCE)\n\t\tactual_alpha = details.alpha[0]\n\t\t#actual_best_fit = details.best_fit[0]\n\t\t#print(actual_alpha,actual_best_fit)\n\n\t\texpected_alpha = 2.654 # not very accurate because of the sparisify transform\n\t\t#expected_best_fit = LOG_NORMAL\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=1)\n\t\t#self.assertEqual(actual_best_fit, expected_best_fit)",
"def test_average_bioequivalence():\n\n # See 10.2 Example from Chow et al.\n h = Average(delta=0.223, stdev=0.40, margin=0.05,\n alpha=0.05, power=0.8, known_stdev=True)\n h.calculate()\n # Chow has 21, but they have the wrong z_beta/2. It should be 1.28,\n # not 0.84. When that is fixed, the correct n is 23\n assert h.n == 23\n assert h.power > 0.8",
"def get_significance(data_subset, data_all, \n p_var, p_standard_name,\n size_subset, size_all):\n\n# alpha = 0.05 \n# w, p_value = scipy.stats.levene(data_included, data_excluded)\n# if p_value > alpha:\n# equal_var = False# Reject the null hypothesis that Var(X) == Var(Y)\n# else:\n# equal_var = True\n\n assert type(size_subset) == type(size_all)\n assert type(size_subset) in [str, float, int]\n\n t, pvals = stats.mstats.ttest_ind(data_subset, data_all, axis=0) # stats.ttest_ind has an equal_var option that mstats does not\n print 'WARNING: Significance test assumed equal variances'\n\n pval_atts = {'id': p_var,\n 'standard_name': p_standard_name,\n 'long_name': p_standard_name,\n 'units': ' ',\n 'notes': \"\"\"Two-tailed p-value from standard independent two sample t-test comparing the subsetted data (size=%s) to a sample containing all the data (size=%s)\"\"\" %(str(size_subset), str(size_all)),\n 'reference': 'scipy.stats.ttest_ind(a, b, axis=t, equal_var=False)'}\n\n if type(size_subset) == str:\n\n\tsize_subset_atts = {'id': size_subset,\n 'standard_name': size_subset,\n 'long_name': size_subset,\n 'units': ' ',\n 'notes': \"\"\"Size of sample that exceeds the threshold\"\"\"}\n\n\tsize_all_atts = {'id': size_all,\n 'standard_name': size_all,\n 'long_name': size_all,\n 'units': ' ',\n 'notes': \"\"\"Size of the entire data\"\"\"}\n\n\treturn pvals, pval_atts, size_subset_atts, size_all_atts\n\n else:\n\n return pvals, pval_atts",
"def test_bisection_1b(self):\n logging.info(\"\\nANSWERS TO EXERCISE 1.1B\")\n left = 0.5\n right = 3.1\n\n # The final interval should contain the desired root.\n root, (left, right) = undertest.bisection(self.func, left, right, self.maxit)\n self.assertTrue(_root_in_interval(self.desired_root, left, right))",
"def test_statistics_calculator_absval():\n from resistics.statistics.calculator import StatisticCalculator\n import numpy as np\n\n specData, evalfreq = get_spectrum_data()\n calculator = StatisticCalculator()\n calculator.winLen = 1\n assert calculator.winLen == 1\n calculator.setSpectra(specData.freqArray, specData, evalfreq)\n statData = calculator.getDataForStatName(\"absvalEqn\")\n testData = {\n 24: {\n \"absExHx\": 53.956000593075835,\n \"absEyHx\": 47.01063709417264,\n \"absHxHx\": 93.5,\n \"absHyHx\": 38.01315561749642,\n \"absExHy\": 28.609439001839934,\n \"absEyHy\": 28.635642126552707,\n \"absHxHy\": 38.01315561749642,\n \"absHyHy\": 105.0,\n \"absExEx\": 57.0,\n \"absEyEx\": 40.0,\n \"absHxEx\": 53.956000593075835,\n \"absHyEx\": 28.609439001839934,\n \"absExEy\": 40.0,\n \"absEyEy\": 40.0,\n \"absHxEy\": 47.01063709417264,\n \"absHyEy\": 28.635642126552707,\n },\n 40: {\n \"absExHx\": 34.60130055359191,\n \"absEyHx\": 31.622776601683793,\n \"absHxHx\": 49.5,\n \"absHyHx\": 24.73863375370596,\n \"absExHy\": 51.24451190127583,\n \"absEyHy\": 22.80350850198276,\n \"absHxHy\": 24.73863375370596,\n \"absHyHy\": 84.0,\n \"absExEx\": 49.0,\n \"absEyEx\": 33.83784863137726,\n \"absHxEx\": 34.60130055359191,\n \"absHyEx\": 51.24451190127583,\n \"absExEy\": 33.83784863137726,\n \"absEyEy\": 30.0,\n \"absHxEy\": 31.622776601683793,\n \"absHyEy\": 22.80350850198276,\n },\n }\n for efreq in evalfreq:\n for key, val in statData[efreq].items():\n np.testing.assert_almost_equal(val, testData[efreq][key])",
"def calculate_f_p(genes, gene_abundance_file, gene_molecular_weight_file):\n gene_abundance = pd.read_csv(gene_abundance_file, index_col=0)\n gene_molecular_weight = json_load(gene_molecular_weight_file)\n enzy_abundance = 0\n pro_abundance = 0\n for gene_i in gene_abundance.index:\n if gene_i in gene_molecular_weight.keys():\n abundance = gene_abundance.loc[gene_i, 'abundance'] * \\\n gene_molecular_weight[gene_i]/1000\n pro_abundance += abundance\n if gene_i in genes.index:\n enzy_abundance += abundance\n f = enzy_abundance/pro_abundance\n return f",
"def analyze_significance(self):\n var_control = 1 * self.p_sample * (1 - self.p_sample)\n var_treatment = 1 * self.p_sample * (1 - self.p_sample) # Same as var_control, because null hyp is no difference\n\n sigma = np.sqrt((var_control / self.n_control) + (var_treatment / self.n_treatment))\n\n z = (self.p_treatment - self.p_control) / sigma\n p = (1 - stats.norm.cdf(z))\n self.p_value = p\n\n return p",
"def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")",
"def test_intra_power_law_fit2(self):\n\t\tprint(type(self.fc_layers[0:2]), self.fc_layers[0:2])\n\t\tdetails= self.watcher.analyze(layers=self.fc_layers[0:2], intra=True, sparsify=False, pl_package=POWERLAW_PACKAGE, xmax=XMAX_FORCE)\n\t\tactual_alpha = details.alpha[0]\n\t\t#actual_best_fit = details.best_fit[0]\n\t\t#print(actual_alpha,actual_best_fit)\n\n\n\t\texpected_alpha = 2.719 # close to exact ?\n\t\t#expected_best_fit = LOG_NORMAL\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=2)\n\t\t#self.assertEqual(actual_best_fit, expected_best_fit)",
"def hypothesis_test_two_means_testvalue(datae,dataf,test_value,alpha):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Sp,t and pvalue\n Sp = np.sqrt((((df_e*var_e) + (df_f*var_f))/(df_e+df_f)))\n t = ((mean_e-mean_f)-test_value)/(Sp*np.sqrt(1/n_e+1/n_f))\n pvalue = 1-scs.t.cdf(t,df_e+df_f,)\n \n # Decision\n if pvalue > alpha:\n decision = 'Fail to Reject H0'\n return t,pvalue,decision\n else:\n decision = 'Reject H0'\n return t,pvalue,decision",
"def test_run_group_significance_test(self):\r\n bt = parse_biom_table(BT_IN_1)\r\n bt_4 = parse_biom_table(BT_4)\r\n\r\n # test with non-paramteric t-test\r\n sample_indices = {'cat1': [0, 5, 1], 'cat2': [2, 4, 3]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [0.17503798979747345, 0.20029818620053824,\r\n -1.5065313062753816, -\r\n 0.043884559904114794, -1.0631239617935129,\r\n -1.2878361428003895]\r\n # we are expecting 1001 comparisons)\r\n exp_pvals = map(lambda x: x / 1001., [888, 899, 279, 1001, 489, 299])\r\n exp_means = [[52.333333333333336, 48.333333333333336],\r\n [34.0, 30.333333333333332],\r\n [20.0, 49.333333333333336],\r\n [55.333333333333336, 56.0],\r\n [20.0, 38.0],\r\n [30.0, 60.333333333333336]]\r\n seed(0) # seed prng for reproducibility\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'nonparametric_t_test',\r\n GROUP_TEST_CHOICES, reps=1000)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with nonparametric t-test but different ordering\r\n sample_indices = {'cat1': [0, 1, 5], 'cat2': [4, 3, 2]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n seed(0) # seed prng for reproducibility\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'nonparametric_t_test',\r\n GROUP_TEST_CHOICES, reps=1000)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with BT_4 biom table\r\n sample_indices = {'cat1': [0, 3, 1, 4], 'cat2': [5, 2, 7, 6]}\r\n row_gen = group_significance_row_generator(bt_4, sample_indices)\r\n exp_test_stats = [-0.38741397129147953, -0.38334158591463874,\r\n 0.077468274988510541, -\r\n 0.2322539745918096, 0.16469600468808282,\r\n -0.49589486133213057]\r\n # we are expecting 1001 comparisons)\r\n exp_pvals = map(lambda x: x / 1001., [821, 719, 916, 935, 938, 604])\r\n exp_means = [[43.5, 51.75],\r\n [29.75, 34.75],\r\n [41.5, 40.0],\r\n [50.5, 53.75],\r\n [28.0, 25.5],\r\n [41.75, 54.0]]\r\n seed(0)\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'nonparametric_t_test',\r\n GROUP_TEST_CHOICES, reps=1000)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n\r\n # test with parametric t test\r\n # bt_1 agrees with Prism\r\n sample_indices = {'cat1': [4, 1, 2], 'cat2': [5, 0, 3]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [-1.0504514628777806, -0.94113003446934629,\r\n -\r\n 0.66264262463016887, 0.17617555832772411, 1.1144416530351877,\r\n -1.2483315640812607]\r\n exp_pvals = [0.3527834167236007, 0.39992473225679626,\r\n 0.5437923932346147, 0.8687158192049661, 0.32753202812350557,\r\n 0.27998887149482976]\r\n exp_means = [[39.666666666666664, 61.0],\r\n [24.333333333333332, 40.0],\r\n [27.0, 42.333333333333336],\r\n [57.0, 54.333333333333336],\r\n [38.333333333333336, 19.666666666666668],\r\n [30.333333333333332, 60.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'parametric_t_test',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with BT_4\r\n sample_indices = {'cat1': [0, 1, 2, 3], 'cat2': [4, 5, 6, 7]}\r\n row_gen = group_significance_row_generator(bt_4, sample_indices)\r\n exp_test_stats = [0.43577690622483684, -2.5911938781738648,\r\n -\r\n 1.3573515147239095, 1.2101173913086851, 2.137178815882979,\r\n 0.0099191576638653078]\r\n exp_pvals = [0.67823972846362579, 0.041145883121579255,\r\n 0.2235024418313547, 0.27174025956151748, 0.076447615888438444,\r\n 0.9924073718332862]\r\n exp_means = [[52.25, 43.0],\r\n [20.5, 44.0],\r\n [29.25, 52.25],\r\n [59.75, 44.5],\r\n [39.0, 14.5],\r\n [48.0, 47.75]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'parametric_t_test',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n\r\n # test with bootstrapped mann_whitney_u\r\n sample_indices = {'cat1': [4, 1, 2], 'cat2': [5, 0, 3]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [7.0, 7.0, 7.0, 6.0, 7.0, 7.0]\r\n exp_pvals = [0.333, 0.305, 0.3, 0.623, 0.295, 0.334]\r\n exp_means = [[39.666666666666664, 61.0],\r\n [24.333333333333332, 40.0],\r\n [27.0, 42.333333333333336],\r\n [57.0, 54.333333333333336],\r\n [38.333333333333336, 19.666666666666668],\r\n [30.333333333333332, 60.0]]\r\n seed(0) # seed prng for reproducibility\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'bootstrap_mann_whitney_u',\r\n GROUP_TEST_CHOICES, reps=1000)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with BT_4\r\n sample_indices = {'cat1': [0, 1, 2, 3], 'cat2': [4, 5, 6, 7]}\r\n row_gen = group_significance_row_generator(bt_4, sample_indices)\r\n exp_test_stats = [10.0, 15.0, 11.0, 14.0, 15.0, 9.0]\r\n exp_pvals = [0.605, 0.033, 0.414, 0.097, 0.041, 0.814]\r\n exp_means = [[52.25, 43.0],\r\n [20.5, 44.0],\r\n [29.25, 52.25],\r\n [59.75, 44.5],\r\n [39.0, 14.5],\r\n [48.0, 47.75]]\r\n seed(0) # seed prng for reproducibility\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'bootstrap_mann_whitney_u',\r\n GROUP_TEST_CHOICES, reps=1000)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n\r\n # test with parametric mann whitney u\r\n sample_indices = {'cat1': [0, 3, 1], 'cat2': [4, 2, 5]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [6.0, 6.0, 5.0, 5.0, 6.0, 5.0]\r\n exp_pvals = [0.51269076026192328, 0.51269076026192328,\r\n 0.82725934656271127, 0.82725934656271127, 0.51269076026192328,\r\n 0.82725934656271127]\r\n exp_means = [[52.666666666666664, 48.0],\r\n [23.666666666666668, 40.666666666666664],\r\n [34.0, 35.333333333333336],\r\n [56.333333333333336, 55.0],\r\n [32.333333333333336, 25.666666666666668],\r\n [46.0, 44.333333333333336]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'mann_whitney_u',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with BT_4\r\n sample_indices = {'cat1': [0, 1, 2, 3], 'cat2': [4, 5, 6, 7]}\r\n row_gen = group_significance_row_generator(bt_4, sample_indices)\r\n exp_test_stats = [10.0, 15.0, 11.0, 14.0, 15.0, 9.0]\r\n exp_pvals = [0.5637028616507731, 0.043308142810791955,\r\n 0.38363032713198975, 0.083264516663550406, 0.043308142810791955,\r\n 0.77282999268444752]\r\n exp_means = [[52.25, 43.0],\r\n [20.5, 44.0],\r\n [29.25, 52.25],\r\n [59.75, 44.5],\r\n [39.0, 14.5],\r\n [48.0, 47.75]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'mann_whitney_u',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n\r\n # test with ANOVA\r\n sample_indices = {'cat1': [0, 3], 'cat2': [4, 5], 'cat3': [2, 1]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [0.022340083574413375, 20.028268551236753,\r\n 2.086854460093897, 0.96500593119810185, 4.8390804597701154,\r\n 0.54346882684796749]\r\n exp_pvals = [0.97806870848824634, 0.018391757629969238,\r\n 0.27043709109167957, 0.47468983920325486, 0.11510587547067222,\r\n 0.62890473306440042]\r\n exp_means = [[53.0, 46.5, 51.5],\r\n [28.5, 55.5, 12.5],\r\n [50.0, 45.5, 8.5],\r\n [50.5, 47.5, 69.0],\r\n [28.0, 9.0, 50.0],\r\n [65.0, 39.5, 31.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'ANOVA',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with uneven group sizes\r\n sample_indices = {'cat1': [0, 2, 3, 1], 'cat2': [4, 5]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [0.05663963168179019, 16.436058700209646,\r\n 0.43828937472444823, 0.675244322576109, 4.7713717693836974,\r\n 0.083541102077687446]\r\n exp_pvals = [0.8235822412182755, 0.015422975290359022,\r\n 0.54414414026513325, 0.45738578176242134, 0.094285405564661875,\r\n 0.78691584834507211]\r\n exp_means = [[52.25, 46.5],\r\n [20.5, 55.5],\r\n [29.25, 45.5],\r\n [59.75, 47.5],\r\n [39.0, 9.0],\r\n [48.0, 39.5]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'ANOVA',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with bt_4\r\n sample_indices = {'cat1': [0, 1, 2, 3], 'cat2': [4, 5, 6, 7]}\r\n row_gen = group_significance_row_generator(bt_4, sample_indices)\r\n exp_test_stats = [0.18990151199889027, 6.7142857142857144,\r\n 1.8424031345232912, 1.4643841007477372, 4.5675332910589734,\r\n 9.8389688760617899e-05]\r\n exp_pvals = [0.6782397284636259, 0.041145883121579234,\r\n 0.22350244183135481, 0.27174025956151771, 0.076447615888438403,\r\n 0.9924073718332751]\r\n exp_means = [[52.25, 43.0],\r\n [20.5, 44.0],\r\n [29.25, 52.25],\r\n [59.75, 44.5],\r\n [39.0, 14.5],\r\n [48.0, 47.75]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'ANOVA',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n\r\n # test with g goodness of fit\r\n sample_indices = {'cat1': [0, 3], 'cat2': [4, 5], 'cat3': [2, 1]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [0.46328913071721711,\r\n 29.810689447160001, 37.234612591840595, 4.7031232724401875,\r\n 31.207185565457102, 13.332324853339509]\r\n exp_pvals = [0.79322801392154108,\r\n 3.3627225458535774e-07, 8.2149818410655555e-09, 0.09522034650579822,\r\n 1.6728066897036456e-07, 0.00127327567601971]\r\n exp_means = [[53.0, 46.5, 51.5],\r\n [28.5, 55.5, 12.5],\r\n [50.0, 45.5, 8.5],\r\n [50.5, 47.5, 69.0],\r\n [28.0, 9.0, 50.0],\r\n [65.0, 39.5, 31.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'g_test',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with individual groups\r\n sample_indices = {'cat1': [0], 'cat2': [1], 'cat3': [3],\r\n 'cat4': [2], 'cat5': [5], 'cat6': [4]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [68.7536611489639, 62.908926545455522,\r\n 115.84654226008865, 26.819713749563704, 84.940231595557307,\r\n 105.37909384565077]\r\n exp_pvals = [1.8616725644907271e-13, 3.0403858229558975e-12,\r\n 2.3772983815049693e-23, 6.1843461955812955e-05,\r\n 7.7481603433718027e-17, 3.8768150325829967e-21]\r\n exp_means = [[28.0, 52.0, 78.0, 51.0, 77.0, 16.0],\r\n [25.0, 14.0, 32.0, 11.0, 63.0, 48.0],\r\n [31.0, 2.0, 69.0, 15.0, 27.0, 64.0],\r\n [36.0, 68.0, 65.0, 70.0, 62.0, 33.0],\r\n [16.0, 41.0, 40.0, 59.0, 3.0, 15.0],\r\n [32.0, 8.0, 98.0, 54.0, 50.0, 29.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'g_test',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with uneven length groups\r\n sample_indices = {'cat1': [0, 3, 4, 5], 'cat3': [2, 1]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [0.030099778845406742,\r\n 16.703388149486191, 29.941854048163027, 3.39187772427496,\r\n 14.935738277477988, 5.4519230964604013]\r\n exp_pvals = [0.86226402523867973, 4.3702877865113464e-05,\r\n 4.451983032513133e-08, 0.065518295867083964, 0.00011123571448583719,\r\n 0.019546798231055287]\r\n exp_means = [[49.75, 51.5],\r\n [42.0, 12.5],\r\n [47.75, 8.5],\r\n [49.0, 69.0],\r\n [18.5, 50.0],\r\n [52.25, 31.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'g_test',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with bt_4\r\n sample_indices = {'cat1': [0, 1, 2, 3], 'cat2': [4, 5, 6, 7]}\r\n row_gen = group_significance_row_generator(bt_4, sample_indices)\r\n exp_test_stats = [0.8950130401309585, 8.6948783805472942,\r\n 6.5397009199496443, 2.2281537448054953, 11.541070115516771,\r\n 0.00064935138712822981]\r\n exp_pvals = [0.34412242732851783, 0.0031910540870178925,\r\n 0.010549308294222293, 0.13551569348660794, 0.00068075444949030543,\r\n 0.97967020739471489]\r\n exp_means = [[52.25, 43.0],\r\n [20.5, 44.0],\r\n [29.25, 52.25],\r\n [59.75, 44.5],\r\n [39.0, 14.5],\r\n [48.0, 47.75]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'g_test',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n\r\n # test with Kruskal Wallis\r\n sample_indices = {'cat1': [0, 3], 'cat2': [4, 5], 'cat3': [2, 1]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [0.2857142857142847,\r\n 4.5714285714285694, 3.7142857142857117, 3.7142857142857117,\r\n 4.5714285714285694, 0.85714285714285765]\r\n exp_pvals = [0.86687789975018215, 0.10170139230422694,\r\n 0.15611804531597129, 0.15611804531597129, 0.10170139230422694,\r\n 0.65143905753105535]\r\n exp_means = [[53.0, 46.5, 51.5],\r\n [28.5, 55.5, 12.5],\r\n [50.0, 45.5, 8.5],\r\n [50.5, 47.5, 69.0],\r\n [28.0, 9.0, 50.0],\r\n [65.0, 39.5, 31.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'kruskal_wallis',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with individual groups\r\n sample_indices = {'cat1': [0], 'cat2': [1], 'cat3': [3],\r\n 'cat4': [2], 'cat5': [5], 'cat6': [4]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [5.0, 5.0, 5.0, 5.0, 5.0, 5.0]\r\n exp_pvals = [0.41588018699550794, 0.41588018699550794,\r\n 0.41588018699550794, 0.41588018699550794, 0.41588018699550794,\r\n 0.41588018699550794]\r\n exp_means = [[28.0, 52.0, 78.0, 51.0, 77.0, 16.0],\r\n [25.0, 14.0, 32.0, 11.0, 63.0, 48.0],\r\n [31.0, 2.0, 69.0, 15.0, 27.0, 64.0],\r\n [36.0, 68.0, 65.0, 70.0, 62.0, 33.0],\r\n [16.0, 41.0, 40.0, 59.0, 3.0, 15.0],\r\n [32.0, 8.0, 98.0, 54.0, 50.0, 29.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'kruskal_wallis',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with uneven length groups\r\n sample_indices = {'cat1': [0, 3, 4, 5], 'cat3': [2, 1]}\r\n row_gen = group_significance_row_generator(bt, sample_indices)\r\n exp_test_stats = [0.0, 3.428571428571427, 3.428571428571427,\r\n 3.428571428571427, 3.428571428571427, 0.21428571428571175]\r\n exp_pvals = [1, 0.064077506451059238, 0.064077506451059238,\r\n 0.064077506451059238, 0.064077506451059238, 0.64342884356362262]\r\n exp_means = [[49.75, 51.5],\r\n [42.0, 12.5],\r\n [47.75, 8.5],\r\n [49.0, 69.0],\r\n [18.5, 50.0],\r\n [52.25, 31.0]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'kruskal_wallis',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)\r\n # test with bt_4\r\n sample_indices = {'cat1': [0, 1, 2, 3], 'cat2': [4, 5, 6, 7]}\r\n row_gen = group_significance_row_generator(bt_4, sample_indices)\r\n exp_test_stats = [0.33333333333333215, 4.0833333333333321,\r\n 0.75903614457831325, 3.0, 4.0833333333333321, 0.083333333333332149]\r\n exp_pvals = [0.56370286165077377, 0.043308142810792101,\r\n 0.38363032713198986, 0.08326451666355042, 0.043308142810792101,\r\n 0.77282999268444919]\r\n exp_means = [[52.25, 43.0],\r\n [20.5, 44.0],\r\n [29.25, 52.25],\r\n [59.75, 44.5],\r\n [39.0, 14.5],\r\n [48.0, 47.75]]\r\n obs_test_stats, obs_pvals, obs_means = \\\r\n run_group_significance_test(row_gen, 'kruskal_wallis',\r\n GROUP_TEST_CHOICES)\r\n assert_almost_equal(exp_test_stats, obs_test_stats)\r\n assert_almost_equal(exp_pvals, obs_pvals)\r\n assert_almost_equal(exp_means, obs_means)",
"def test_docstring_examples():\n assert_allclose(background(n_off=4, alpha=0.1), 0.4)\n assert_allclose(background(n_off=9, alpha=0.2), 1.8)\n\n assert_allclose(background_error(n_off=4, alpha=0.1), 0.2)\n assert_allclose(background_error(n_off=9, alpha=0.2), 0.6)\n\n assert_allclose(excess(n_on=10, n_off=20, alpha=0.1), 8.0)\n assert_allclose(excess(n_on=4, n_off=9, alpha=0.5), -0.5)\n\n assert_allclose(excess_error(n_on=10, n_off=20, alpha=0.1), 3.1937439)\n assert_allclose(excess_error(n_on=4, n_off=9, alpha=0.5), 2.5)\n\n result = significance_on_off(n_on=10, n_off=20, alpha=0.1, method='lima')\n assert_allclose(result, 3.6850322025333071)\n result = significance_on_off(n_on=4, n_off=9, alpha=0.5, method='lima')\n assert_allclose(result, -0.19744427645023557)\n\n result = significance_on_off(n_on=10, n_off=20, alpha=0.1, method='simple')\n assert_allclose(result, 2.5048971643405982)\n result = significance_on_off(n_on=4, n_off=9, alpha=0.5, method='simple')\n assert_allclose(result, -0.2)\n\n result = significance_on_off(n_on=10, n_off=20, alpha=0.1, method='lima')\n assert_allclose(result, 3.6850322025333071)\n\n # Check that the Li & Ma limit formula is correct\n actual = significance(n_on=1300, mu_bkg=1100, method='lima')\n assert_allclose(actual, 5.8600870406703329)\n actual = significance_on_off(n_on=1300, n_off=1100 / 1.e-8, alpha=1e-8, method='lima')\n assert_allclose(actual, 5.8600864348078519)",
"def get_importance_weights(self, t_s_a_pi_b: Tuple[int, Tuple[int, int, float]]) -> float:\n t, s_a_pi_b = t_s_a_pi_b\n s, a, pi_b = s_a_pi_b\n return self.eval_policy.pi(s, a) / pi_b",
"def is_consistant(a, b, aErr, bErr, sigma):\n return consistancy_test(a, b, aErr, bErr) <= sigma",
"def significance(wk1, wk2, nout, ofac):\n expy = exp(-wk2) \n effm = 2.0*(nout)/ofac \n sig = effm*expy\n ind = (sig > 0.01).nonzero()\n sig[ind] = 1.0 - (1.0 - expy[ind])**effm\n\n return sig"
] | [
"0.5942177",
"0.5914534",
"0.5898058",
"0.5746552",
"0.5729359",
"0.56870574",
"0.5655487",
"0.5625582",
"0.5611372",
"0.5603035",
"0.55927914",
"0.5591462",
"0.5541222",
"0.54759663",
"0.547294",
"0.54472035",
"0.54131156",
"0.54129475",
"0.54105234",
"0.54044",
"0.5398178",
"0.53973556",
"0.53952795",
"0.5388839",
"0.538586",
"0.5383229",
"0.53718317",
"0.53554356",
"0.53398",
"0.53258926"
] | 0.6507291 | 0 |
Learn using random samples, if enough samples are available in memory | def train(self):
if len(self.memory) > self.batch_size:
selecting_time_start = time.time()
experiences = self.memory.sample()
self.selecting_time += time.time() - selecting_time_start
self.learn(experiences) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def learn(self):\n pass",
"def learn(self):\n pass",
"def learn(self):\n if self.step_count < self.learn_start_step or self.step_count % self.learn_interval != 0:\n return\n\n s, a, r, s_, t = self.sample()\n self.update_critics(s, a, r, t, s_)\n self.update_actor_alpha(s)\n self.update_target()\n self.learn_cur += 1",
"def learner(self):\n for N in range(self.N_learn):\n trajectories = self.sample_trajectories()\n\n # TODO: Both these methods take the full trajectories at the moment, a speedup could be achieved here\n self.qmodel.train(trajectories)\n self.amodel.train(trajectories)",
"def learn(self, Xtrain, ytrain):\n self.weights = np.random.rand(Xtrain.shape[1])",
"def learn_from_memory():\n\n # Get a random minibatch from the replay memory and learns from it.\n if agent.memory.size > args.batch_size:\n s1, a, s2, isterminal, r = agent.memory.get_sample(args.batch_size)\n\n q = agent.get_q_values(s2).cpu().data.numpy()\n q2 = np.max(q, axis=1)\n\n target_q = agent.get_q_values(s1).cpu().data.numpy()\n # target differs from q only for the selected action. The following means:\n # target_Q(s,a) = r + gamma * max Q(s2,_) if isterminal else r\n\n target_q[np.arange(target_q.shape[0]), a] = r + args.discount_factor * (1 - isterminal) * q2\n learn(s1, target_q)",
"def learn(self):\n raise NotImplementedError",
"def learn(self):\n metrics_hist = dict()\n max_runs = 3\n for run in range(max_runs):\n all_indices, initial_indices = self._init_al_dataset()\n\n metrics_hist[str(run)] = dict()\n\n current_indices = list(initial_indices)\n \n for split in self.data_splits_frac:\n print(f'\\nRUN {run} - SPLIT - {split*100:0.0f}%')\n\n # Initialise models\n self._init_models(mode='svaal')\n\n # Do some label stuff\n unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)\n unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)\n unlabelled_dataloader = data.DataLoader(self.datasets['train'],\n sampler=unlabelled_sampler,\n batch_size=64,\n drop_last=False)\n\n print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')\n\n # TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.\n if len(unlabelled_indices) == 0:\n break\n\n metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,\n dataloader_u=unlabelled_dataloader,\n dataloader_v=self.val_dataloader,\n dataloader_t=self.test_dataloader,\n mode='svaal') \n print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%') \n \n # Record performance at each split\n metrics_hist[str(run)][str(split)] = metrics\n\n \n sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg\n current_indices = list(current_indices) + list(sampled_indices)\n sampler = data.sampler.SubsetRandomSampler(current_indices)\n self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)\n \n # write results to disk\n with open('results.json', 'w') as fj:\n json.dump(metrics_hist, fj, indent=4)",
"def learn(self):\n event_batch = self.memory.sample(self.batch_size)\n \n if event_batch is None:\n return\n\n event_batch = self.memory.deserialize(event_batch)\n self.update_critic(event_batch)\n self.update_actor(event_batch)\n self.update_target(self.local_actor, self.target_actor)\n self.update_target(self.local_critic, self.target_critic)",
"def generate_data(data, model, samples, targeted=True, target_num=9, start=0, inception=False, seed=3, handpick=False ):\n random.seed(seed)\n inputs = []\n targets = []\n labels = []\n true_ids = []\n sample_set = []\n\n data_d = data.test_data\n labels_d = data.test_labels\n\n if handpick:\n if inception:\n deck = list(range(0, 1500))\n else:\n deck = list(range(0, 10000))\n random.shuffle(deck)\n print('Handpicking')\n\n while (len(sample_set) < samples):\n rand_int = deck.pop()\n pred = model.model.predict(data_d[rand_int:rand_int + 1])\n\n if inception:\n pred = np.reshape(pred, (labels_d[0:1].shape))\n\n if (np.argmax(pred, 1) == np.argmax(labels_d[rand_int:rand_int + 1], 1)):\n sample_set.append(rand_int)\n print('Handpicked')\n else:\n sample_set = random.sample(range(0, 10000), samples)\n\n for i in sample_set:\n if targeted:\n if inception:\n seq = random.sample(range(1, 1001), target_num)\n else:\n seq = range(labels_d.shape[1])\n\n for j in seq:\n if (j == np.argmax(labels_d[start + i])) and (inception == False):\n continue\n inputs.append(data_d[start + i])\n targets.append(np.eye(labels_d.shape[1])[j])\n labels.append(labels_d[start + i])\n true_ids.append(start + i)\n else:\n inputs.append(data_d[start + i])\n targets.append(labels_d[start + i])\n labels.append(labels_d[start + i])\n true_ids.append(start + i)\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n labels = np.array(labels)\n true_ids = np.array(true_ids)\n return inputs, targets, labels, true_ids",
"def learn(self, purge_memory=True):\n observed_inputs, observed_reward, predicted_outputs, distance_from_reward = self._preprocess_experience()\n # now train. DataFeeder automatically reshuffles data.\n self.dataset_feeder = DataFeeder(\n [observed_inputs, predicted_outputs, observed_reward],\n batch_size=self.batch_size)\n # determine number of iterations:\n self.iterations = int(self.epochs * len(observed_inputs) / self.batch_size)\n for _ in range(self.iterations):\n self._batch()\n # TODO: write a method that computes and prints training stats\n # if _ % 1000:\n # self._train_stats(_)\n if purge_memory:\n self.purge_memory()",
"def learn(self, Xtrain, ytrain):",
"def rand_data():\n # 100 examples, with seq_len=10, each holding 300 features\n return torch.randn((100, 10, 300))",
"def learn(self):\n raise NotImplementedError()",
"def learn_with_bootstrapping(self, sample_count=10000):\n tic = time.clock()\n training_set_size = 150 # TODO: change to 1000, 500 or something\n sample_pool = self.training_stream.extract_training_patches(sample_count, negative_ratio=1.)\n # initialize weights\n weighted_patches = []\n for patch in sample_pool: # weight all patches: training pool P\n weighted_patches.append([patch, 1. / len(sample_pool)])\n # if patch.label == +1:\n # pos_patch = patch # PRESENTATION, REPORT\n # shuffle training pool\n weighted_patches = random_sample_weighted_patches(weighted_patches, len(weighted_patches))\n\n if self.algorithm == 'adaboost': # Shuffle the training data\n training_data = random_sample_weighted_patches(weighted_patches, len(weighted_patches))\n elif self.algorithm == 'wald': # Sample training_set_size samples\n training_data = random_sample_weighted_patches(weighted_patches, training_set_size)\n\n for t in range(self.layers): # choose the weak classifier with the minimum error\n print \"Learn with bootstrapping using %s, layer #%d\" % (self.algorithm.title(), t+1)\n\n if self.algorithm == 'adaboost':\n h_t = self._fetch_best_weak_classifier(weighted_patches)\n elif self.algorithm == 'wald':\n h_t = self._fetch_best_weak_classifier(training_data)\n # h_t.visualize(pos_patch) # PRESENTATION, REPORT\n self.classifiers.append(copy.deepcopy(h_t)) # add it to the strong classifier\n\n if self.algorithm == 'adaboost':\n self.classifiers[-1].update_alpha(weighted_patches)\n weighted_patches = self._adaboost_reweight(weighted_patches, t)\n elif self.algorithm == 'wald':\n kde_n, kde_p, xs_n, xs_p = self._estimate_ratios(training_data, t)\n # find decision thresholds for the strong classifier\n self._tune_thresholds(kde_n, kde_p, xs_n, xs_p, t)\n # throw away training samples that fall in our thresholds\n weighted_patches = self._reweight_and_discard_irrelevant(weighted_patches, t)\n # sample new training data\n training_data = random_sample_weighted_patches(weighted_patches, training_set_size)\n if len(training_data) == 0:\n print \"no more training data!\"\n break\n toc = time.clock()\n print toc - tic\n print self",
"def learn(self, D, **kwargs):\n pass",
"def __sample(self, data, los, n: Optional[int], random: bool):\n if n is None:\n n = len(data)\n else:\n n = min(len(data), n)\n # Uniform random sampling from our data array\n indices = list(range(len(data)))\n if random:\n np.random.shuffle(indices)\n indices = indices[:n]\n data = torch.Tensor(data[indices])\n los = torch.Tensor(los[indices])\n if self.device != 'cpu' and 'cuda' in self.device.type:\n data = data.cuda()\n los = los.cuda()\n return data, los",
"def sample_estimator(model, num_classes, feature_list, train_loader):\n\n model.eval()\n group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n correct, total = 0, 0\n num_output = len(feature_list)\n num_sample_per_class = np.zeros(num_classes)\n list_features = []\n for i in range(num_output):\n temp_list = []\n for j in range(num_classes):\n temp_list.append(0)\n list_features.append(temp_list)\n\n for data, target, _ in train_loader:\n total += data.size(0)\n\n data = Variable(data.cuda())\n output, out_features = model.feature_list(data)\n\n # get hidden features\n for i in range(num_output):\n out_features[i] = out_features[i].view(\n out_features[i].size(0), out_features[i].size(1), -1)\n out_features[i] = torch.mean(out_features[i].data, 2)\n\n # compute the accuracy\n pred = output.data.max(1)[1]\n equal_flag = pred.eq(target.cuda()).cpu()\n correct += equal_flag.sum()\n\n # construct the sample matrix\n for i in range(data.size(0)):\n label = target[i]\n if num_sample_per_class[label] == 0:\n out_count = 0\n for out in out_features:\n list_features[out_count][label] = out[i].view(1, -1)\n out_count += 1\n else:\n out_count = 0\n for out in out_features:\n list_features[out_count][label] \\\n = torch.cat((list_features[out_count][label], out[i].view(1, -1)), 0)\n out_count += 1\n num_sample_per_class[label] += 1\n\n sample_class_mean = []\n out_count = 0\n for num_feature in feature_list:\n temp_list = torch.Tensor(num_classes, int(num_feature)).cuda()\n for j in range(num_classes):\n # print(f'|| Num classes: {num_classes}, j: {j}, temp_list: {temp_list}, list features: {list_features[out_count][j]}')\n temp_list[j] = torch.mean(list_features[out_count][j], 0)\n sample_class_mean.append(temp_list)\n out_count += 1\n\n precision = []\n for k in range(num_output):\n X = 0\n for i in range(num_classes):\n if i == 0:\n X = list_features[k][i] - sample_class_mean[k][i]\n else:\n X = torch.cat(\n (X, list_features[k][i] - sample_class_mean[k][i]), 0)\n\n # find inverse\n group_lasso.fit(X.cpu().numpy())\n temp_precision = group_lasso.precision_\n temp_precision = torch.from_numpy(temp_precision).float().cuda()\n precision.append(temp_precision)\n\n print('\\n Training Accuracy:({:.2f}%)\\n'.format(100. * correct / total))\n\n return sample_class_mean, precision",
"def learn(self):\r\n \r\n # take a mini-batch from replay experience\r\n cur_batch_size = min(len(self.replay_exp), self.batch_size)\r\n mini_batch = random.sample(self.replay_exp, cur_batch_size)\r\n \r\n # batch data\r\n sample_states = np.ndarray(shape = (cur_batch_size, self.state_size)) # replace 128 with cur_batch_size\r\n sample_actions = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_rewards = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_next_states = np.ndarray(shape = (cur_batch_size, self.state_size))\r\n sample_dones = np.ndarray(shape = (cur_batch_size, 1))\r\n\r\n temp=0\r\n for exp in mini_batch:\r\n sample_states[temp] = exp[0]\r\n sample_actions[temp] = exp[1]\r\n sample_rewards[temp] = exp[2]\r\n sample_next_states[temp] = exp[3]\r\n sample_dones[temp] = exp[4]\r\n temp += 1\r\n \r\n \r\n sample_qhat_next = self.brain_target.predict(sample_next_states)\r\n \r\n # set all Q values terminal states to 0\r\n sample_qhat_next = sample_qhat_next * (np.ones(shape = sample_dones.shape) - sample_dones)\r\n # choose max action for each state\r\n sample_qhat_next = np.max(sample_qhat_next, axis=1)\r\n \r\n sample_qhat = self.brain_policy.predict(sample_states)\r\n \r\n for i in range(cur_batch_size):\r\n a = sample_actions[i,0]\r\n sample_qhat[i,int(a)] = sample_rewards[i] + self.gamma * sample_qhat_next[i]\r\n \r\n q_target = sample_qhat\r\n \r\n self.brain_policy.fit(sample_states, q_target, epochs = 1, verbose = 0)\r\n \r\n \r\n \r\n \"\"\"\r\n \r\n for state, action, reward, next_state, done in mini_batch:\r\n target_Q_s_a = 0 # new target for Q(s,a)\r\n state = np.reshape(state, [1, state_size])\r\n next_state = np.reshape(next_state, [1, state_size])\r\n \r\n # if it is not the terminal state\r\n if not done:\r\n qhat_next = self.brain_target.predict(next_state) # estimate Q(s',a')\r\n target_Q_s_a = reward + self.gamma * np.amax(qhat_next[0]) # because the output is m * n, so we need to consider the dimension [0]\r\n else:\r\n target_Q_s_a = reward\r\n \r\n target_output = self.brain_policy.predict(state) # we will replace target of Q(s,a) for specific a later\r\n target_output[0][action] = target_Q_s_a # new target for state s and action a\r\n \r\n self.brain_policy.fit(state, target_output, epochs = 1, verbose = 0)\r\n \r\n \"\"\"",
"def _compute_samples(self, samples):\n return samples",
"def __init__(self, num_features=NUM_FEATURES, num_samp=NUM_SAMP):\n num_samp = NUM_SAMP\n sigma = 0.1\n np.random.seed(31415)\n\n # We're going to learn these paramters\n self.w = np.random.randint(low=0, high=5, size=(num_features, 1))\n self.b = 2\n\n self.index = np.arange(num_samp)\n self.x = np.random.uniform(size=(num_samp, num_features))\n self.y = self.x @ self.w + self.b + sigma * np.random.normal()",
"def train_long_memory(self):\n # get memory\n # if memory is above a certain BATCH SIZE then\n # randomly sample BACTCH SIZE memory\n if len(self.memory) > BATCH_SIZE:\n mini_sample = random.sample(self.memory, BATCH_SIZE) # list of tuples\n else:\n mini_sample = self.memory\n\n # get all states actions, rewards, etc...\n # and train the step using QTrainer\n states, actions, rewards, next_states, dones = zip(*mini_sample)\n self.trainer.train_step(states, actions, rewards, next_states, dones)",
"def generate_data(data, samples, targeted=True, start=0, inception=False):\n inputs = []\n targets_1hot = []\n i = 0\n samples_sofar = 0\n while samples_sofar < samples:\n i += 1\n if torch.argmax(model(torch.tensor(data.test_data[start+i:start+i+1]+0.5, device=\"cuda\", dtype=torch.float32).permute(0, 3, 1, 2))) != np.argmax(data.test_labels_1hot[start+i]):\n continue\n\n if targeted:\n if inception:\n seq = random.sample(range(1, 1001), 10)\n else:\n seq = range(data.test_labels_1hot.shape[1])\n\n # print ('image label:', torch.argmax(data.test_labels[start+i]))\n for j in seq:\n # skip the original image label\n if (j == torch.argmax(data.test_labels_1hot[start+i])) and (inception == False):\n continue\n inputs.append(data.test_data[start+i])\n targets_1hot.append(\n torch.eye(data.test_labels_1hot.shape[1])[j])\n else:\n inputs.append(data.test_data[start+i])\n targets_1hot.append(data.test_labels_1hot[start+i])\n\n samples_sofar += 1\n\n inputs = torch.tensor(inputs).permute(0, 3, 1, 2)\n targets_1hot = torch.tensor(targets_1hot)\n\n return inputs, targets_1hot",
"def _sample(self, rnn_output, temperature):\n pass",
"def RandomLearner(X, y):\n\n\trandom_learner = ActiveLearner(estimator=SVC(),\n\t\tquery_strategy=RandomQuery,\n\t\tX_training=np.array([[0.5, 4.0], [2.0, 1.0]]),\n\t\ty_training=np.array([[0], [1]]))\n\n\t### TODO: Write the main loop for running the random active learner\n\taccuracies = []\n\ti = 0\n\n\t#S, SLabels = np.array([[0.5, 4.0], [2.0, 1.0]]), np.array([[0], [1]])\n\tU, ULabels = copy.deepcopy(X), copy.deepcopy(y)\n\n\twhile (len(U) != 0):\n\t\tidx, instance = random_learner.query(U)\n\t\ti += 1\n\t\trandom_learner._add_training_data(U[idx].reshape(1, 2), ULabels[idx].reshape(1, 1))\n\t\trandom_learner._fit_to_known()\n\t\tU, ULabels = np.delete(U, idx, axis=0), np.delete(ULabels, idx, axis=0)\n\t\tacc = random_learner.score(X, y)\n\t\taccuracies.append(acc)\n\n\treturn np.array(accuracies), i",
"def sample_estimator(model, num_classes, train_loader):\n import sklearn.covariance\n\n model.eval()\n with torch.no_grad():\n group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)\n correct, total = 0, 0\n num_output = 1\n num_sample_per_class = np.empty(num_classes)\n num_sample_per_class.fill(0)\n list_features = []\n for i in range(num_output):\n temp_list = []\n for j in range(num_classes):\n temp_list.append(0)\n list_features.append(temp_list)\n\n for data, target in train_loader:\n total += data.size(0)\n data = data.cuda()\n data = Variable(data)\n output = model(data)['logits']\n # output, out_features = out['logits'], out['logits']\n\n out_features = output.view(output.size(0), output.size(1), -1)\n out_features = torch.mean(out_features, dim=2)\n\n # compute the accuracy\n pred = output.data.max(1)[1]\n equal_flag = pred.eq(target.cuda()).cpu()\n correct += equal_flag.sum()\n\n # construct the sample matrix\n for i in range(data.size(0)):\n label = target[i]\n out_count = 0\n if num_sample_per_class[label] == 0:\n list_features[out_count][label] = out_features[i].view(1, -1)\n else:\n list_features[out_count][label] \\\n = torch.cat((list_features[out_count][label], out_features[i].view(1, -1)), 0)\n num_sample_per_class[label] += 1\n\n sample_class_mean = []\n out_count = 0\n num_feature = num_classes\n temp_list = torch.Tensor(num_classes, num_feature).cuda()\n for j in range(num_classes):\n temp_list[j] = torch.mean(list_features[out_count][j], dim=0)\n sample_class_mean.append(temp_list)\n\n precision = []\n for k in range(num_output):\n X = 0\n for i in range(num_classes):\n if i == 0:\n X = list_features[k][i] - sample_class_mean[k][i]\n else:\n X = torch.cat((X, list_features[k][i] - sample_class_mean[k][i]), dim=0)\n\n # find inverse\n group_lasso.fit(X.cpu().numpy())\n temp_precision = group_lasso.precision_\n temp_precision = torch.from_numpy(temp_precision).float().cuda()\n precision.append(temp_precision)\n\n print('\\n Training Accuracy:({:.2f}%)\\n'.format(100. * correct / total))\n\n return sample_class_mean, precision",
"def sample(self):\n sample_ind = np.random.choice(len(self.memory), self.batch_size)\n # get the selected experiences: avoid using mid list indexing\n es, ea, er, en, ed = [], [], [], [], []\n i = 0\n while i < len(sample_ind):\n self.memory.rotate(-sample_ind[i]) # rotate the memory up to this index\n e = self.memory[0] # sample from the top\n es.append(e.state)\n ea.append(e.action)\n er.append(e.reward)\n en.append(e.next_state)\n ed.append(e.done)\n self.memory.rotate(sample_ind[i])\n i += 1\n states = torch.stack(es).squeeze().float().to(device)\n actions = torch.stack(ea).float().to(device)\n rewards = torch.from_numpy(np.vstack(er)).float().to(device)\n next_states = torch.stack(en).squeeze().float().to(device)\n dones = torch.from_numpy(np.vstack(ed).astype(np.uint8)).float().to(device)\n return (states, actions, rewards, next_states, dones)",
"def samples(self):\n pass"
] | [
"0.6690334",
"0.6690334",
"0.66179794",
"0.66179794",
"0.6616561",
"0.6584631",
"0.6550663",
"0.6443997",
"0.64131385",
"0.6295754",
"0.6280296",
"0.6257376",
"0.62425834",
"0.62313735",
"0.62262535",
"0.6190727",
"0.6181846",
"0.6123597",
"0.60776126",
"0.6068146",
"0.60541725",
"0.60420746",
"0.6041483",
"0.6003201",
"0.5986911",
"0.59846914",
"0.59675676",
"0.5959034",
"0.594723",
"0.593348"
] | 0.7019188 | 0 |
Returns an op to update a list of target variables from source variables. | def update_target_variables(self, target_variables,
source_variables,
tau=1.0,
use_locking=False,
name="update_target_variables"):
if not isinstance(tau, float):
raise TypeError("Tau has wrong type (should be float) {}".format(tau))
if not 0.0 < tau <= 1.0:
raise ValueError("Invalid parameter tau {}".format(tau))
if len(target_variables) != len(source_variables):
raise ValueError("Number of target variables {} is not the same as "
"number of source variables {}".format(
len(target_variables), len(source_variables)))
same_shape = all(trg.get_shape() == src.get_shape()
for trg, src in zip(target_variables, source_variables))
if not same_shape:
raise ValueError("Target variables don't have the same shape as source "
"variables.")
def update_op(target_variable, source_variable, tau):
if tau == 1.0:
return target_variable.assign(source_variable, use_locking)
else:
return target_variable.assign(
tau * source_variable + (1.0 - tau) * target_variable, use_locking)
# with tf.name_scope(name, values=target_variables + source_variables):
update_ops = [update_op(target_var, source_var, tau)
for target_var, source_var
in zip(target_variables, source_variables)]
return tf.group(name="update_all_variables", *update_ops) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hard_update(target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)",
"def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)",
"def get_target_updates(_vars, target_vars, tau, verbose=0):\n if verbose >= 2:\n print('setting up target updates ...')\n\n soft_updates = []\n init_updates = []\n assert len(_vars) == len(target_vars)\n\n for var, target_var in zip(_vars, target_vars):\n if verbose >= 2:\n print(' {} <- {}'.format(target_var.name, var.name))\n init_updates.append(tf.compat.v1.assign(target_var, var))\n soft_updates.append(\n tf.compat.v1.assign(target_var, (1.-tau) * target_var + tau * var))\n\n assert len(init_updates) == len(_vars)\n assert len(soft_updates) == len(_vars)\n\n return tf.group(*init_updates), tf.group(*soft_updates)",
"def add_update_target_op(self, q_scope, target_q_scope):\r\n q_variables = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=q_scope)\r\n target_q_variables = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=target_q_scope)\r\n self.update_target_op = tf.group(*[tf.assign(ref=target_q_variables[i], value=q_variables[i]) for i in range(len(q_variables))])",
"def add_update_target_op(self, q_scope, target_q_scope):\r\n \r\n q_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=q_scope)\r\n target_q_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=target_q_scope)\r\n assign_ops = [tf.assign(tqv, qv) for qv, tqv in zip(q_vars, target_q_vars)]\r\n self.update_target_op = tf.group(*assign_ops)",
"def _ipu_multi_update(op, grads):\n return [\n gen_popops_ops.ipu_multi_update_add(\n array_ops.zeros_like(op.inputs[0]),\n indices=op.inputs[1],\n updates=grads,\n scale=array_ops.constant(1, op.inputs[0].dtype),\n indices_are_sorted=op.get_attr(\"indices_are_sorted\")), None\n ]",
"def _update(self, update_fn, value, **kwargs):\n input_tensor = ops.convert_to_tensor(\n value, name='value_in_tensor', dtype=self.dtype)\n\n return control_flow_ops.group(\n *tuple(\n _on_device_update(update_fn, v, input_tensor, **kwargs)\n for v in self.variables))",
"def update_target(self, target, pred, update_rate):\n for target_param, pred_param in zip(target.parameters(), pred.parameters()):\n target_param.data.copy_((1.0 - update_rate)\n * target_param.data + update_rate * pred_param.data)",
"def _update_targets(self):\n for ga_main, ga_targ in zip(self.ga.variables, self.ga_.variables):\n ga_targ.assign(self._polyak * ga_targ + (1 - self._polyak) * ga_main)\n if self.use_lyapunov:\n for lc_main, lc_targ in zip(self.lc.variables, self.lc_.variables):\n lc_targ.assign(self._polyak * lc_targ + (1 - self._polyak) * lc_main)\n else:\n for q_1_main, q_1_targ in zip(self.q_1.variables, self.q_1_.variables):\n q_1_targ.assign(self._polyak * q_1_targ + (1 - self._polyak) * q_1_main)\n for q_2_main, q_2_targ in zip(self.q_2.variables, self.q_2_.variables):\n q_2_targ.assign(self._polyak * q_2_targ + (1 - self._polyak) * q_2_main)",
"def hard_update(source_net, target_net):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(param.data)",
"def _wip_update(preds: Union[str, List[str]], target: Union[str, List[str]]) ->Tuple[Tensor, Tensor, Tensor]:\n if isinstance(preds, str):\n preds = [preds]\n if isinstance(target, str):\n target = [target]\n total = tensor(0.0)\n errors = tensor(0.0)\n target_total = tensor(0.0)\n preds_total = tensor(0.0)\n for pred, tgt in zip(preds, target):\n pred_tokens = pred.split()\n target_tokens = tgt.split()\n errors += _edit_distance(pred_tokens, target_tokens)\n target_total += len(target_tokens)\n preds_total += len(pred_tokens)\n total += max(len(target_tokens), len(pred_tokens))\n return errors - total, target_total, preds_total",
"def _create_target_network_update_op(self, q_network, target_q_network):\n variables = q_network.get_variables()\n target_variables = target_q_network.get_variables()\n # problem\n return tf.group([\n tf.assign(target_v, target_v + self.tau * (v - target_v)) # same as original arm\n for (target_v, v) in zip(target_variables, variables)\n ])",
"def create_graph_copy_op(self, src, target, tau):\n src_vars = tf.trainable_variables(src)\n target_vars = tf.trainable_variables(target)\n\n op_holder = []\n\n for s, t in zip(src_vars, target_vars):\n op_holder.append(t.assign((s.value() * tau) + ((1 - tau) * t.value())))\n return op_holder",
"def assign(self, other):\n\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n ops = []\n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign(other_var))\n return tf.group(*ops, name=\"assign_\"+self.name)",
"def soft_update(target, source, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)",
"def propagate_values(target_ds, source_ds, exclude=None):\n\n # Find variable names common to target_ds and source_ds, excluding specified exclude variables\n common_variable_names = list(set(target_ds).intersection(source_ds))\n #common_variable_names = list(set(target_ds.variables).intersection(source_ds.variables))\n #print(common_variable_names)\n\n if exclude is not None:\n common_variable_names = [name for name in common_variable_names if name not in exclude]\n\n # Remove any common variables that have different dimensions in target_ds and source_ds\n common_variable_names = [name for name in common_variable_names if target_ds[name].dims == source_ds[name].dims]\n\n # Propagate data\n for common_variable_name in common_variable_names:\n if target_ds[common_variable_name].shape == source_ds[common_variable_name].shape:\n target_ds[common_variable_name].values = source_ds[common_variable_name].values",
"def propagate(source, targets, vals):\n v = vals[source]\n for t in targets:\n vals.setdefault(t, 0)\n vals[t] += v",
"def copy_src_to_dst(from_scope, to_scope):\n from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)\n to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)\n\n op_holder = []\n for from_var, to_var in zip(from_vars, to_vars):\n op_holder.append(to_var.assign(from_var))\n return op_holder",
"def updates(loss: Tensor, var_list, options):\n with tf.name_scope(\"optimization\"):\n if options['update'] == 'momentum':\n optimizer = tf.train.MomentumOptimizer(learning_rate=options['learning_rate'],\n momentum=options['momentum'])\n elif options['update'] == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=options['learning_rate'])\n elif options['update'] == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(learning_rate=options['learning_rate'])\n else:\n assert False, \"Unknown loss minimizer\"\n update_step = optimizer.minimize(loss, var_list=var_list)\n return optimizer, update_step",
"def reset_q_hat(self, action_vars, target_vars):\n op_holder = []\n for name in list(action_vars.keys()):\n op_holder.append(target_vars[name].assign(action_vars[name]))\n\n return op_holder",
"def _create_weight_update_ops(self):\n with tf.name_scope(\"Weight_Update_Operators\"):\n self.weight_vars_assign_ops = []\n for weight_matrix, grad in zip(self._train_vars, self.step_direction_variables):\n self.weight_vars_assign_ops.append(\n tf.assign_add(weight_matrix, self._step_on_line_plh * -grad / self.norm_of_gradient_var).op)",
"def update(self, inputs): # pragma: no cover\n return inputs",
"def _make_hard_copy_ops(target_vars, online_vars):\n return [(target_vars[var_name].assign(online_vars[var_name]))\n for var_name in target_vars.keys()]",
"def copy_network_parameters(sess):\n e1_params = [t for t in tf.trainable_variables() if t.name.startswith('q_network')]\n e1_params = sorted(e1_params, key=lambda v: v.name)\n e2_params = [t for t in tf.trainable_variables() if t.name.startswith('target')]\n e2_params = sorted(e2_params, key=lambda v: v.name)\n\n update_ops = []\n for e1_v, e2_v in zip(e1_params, e2_params):\n op = e2_v.assign(e1_v)\n update_ops.append(op)\n\n sess.run(update_ops)",
"def update(self, preds: Tensor, target: Tensor, indexes: Tensor) -> None:\n if indexes is None:\n raise ValueError(\"Argument `indexes` cannot be None\")\n\n indexes, preds, target = _check_retrieval_inputs(\n indexes, preds, target, allow_non_binary_target=self.allow_non_binary_target, ignore_index=self.ignore_index\n )\n\n self.indexes.append(indexes)\n self.preds.append(preds)\n self.target.append(target)",
"def update(self, sess, states, actions, targets):\n feed_dict = { self.x: states, self.targets_: targets, self.actions_: actions}\n _, loss = sess.run([self.train_op, self.loss], feed_dict)\n \n return loss",
"def _wer_update(preds: Union[str, List[str]], target: Union[str, List[str]]) ->Tuple[Tensor, Tensor]:\n if isinstance(preds, str):\n preds = [preds]\n if isinstance(target, str):\n target = [target]\n errors = tensor(0, dtype=torch.float)\n total = tensor(0, dtype=torch.float)\n for pred, tgt in zip(preds, target):\n pred_tokens = pred.split()\n tgt_tokens = tgt.split()\n errors += _edit_distance(pred_tokens, tgt_tokens)\n total += len(tgt_tokens)\n return errors, total",
"def assign_from_values_fn(var_names_to_values):\n assign_op, feed_dict = assign_from_values(var_names_to_values)\n def callback(session):\n return session.run(assign_op, feed_dict)\n return callback",
"def update(self, **vars):\n for name in vars:\n # Use __setitem__ for all effects\n self[name] = vars[name]",
"def update_inputs(self, compname, exprs):\n expr_info = []\n invalids = []\n\n if compname is not None:\n pred = self._exprmapper._exprgraph.pred\n if exprs:\n ex = ['.'.join([compname, n]) for n in exprs]\n exprs = []\n for e in ex:\n exprs.extend([expr for expr in self._exprmapper.find_referring_exprs(e)\n if expr in pred])\n else:\n exprs = [expr for expr in self._exprmapper.find_referring_exprs(compname)\n if expr in pred]\n for expr in exprs:\n srctxt = self._exprmapper.get_source(expr)\n if srctxt:\n srcexpr = self._exprmapper.get_expr(srctxt)\n invalids.extend(srcexpr.invalid_refs())\n expr_info.append((srcexpr, self._exprmapper.get_expr(expr)))\n\n # if source exprs reference invalid vars, request an update\n if invalids:\n for cname, vnames in partition_names_by_comp(invalids).items():\n if cname is None:\n if self.parent:\n self.parent.update_inputs(self.name, vnames)\n \n # If our source component is in a loop with us, don't\n # run it. Otherwise you have infinite recursion. It is\n # the responsibility of the solver to properly execute\n # the comps in its loop.\n elif self._graph_loops:\n for loop in self._graph_loops:\n if compname in loop and cname in loop:\n break\n else:\n getattr(self, cname).update_outputs(vnames)\n \n else:\n getattr(self, cname).update_outputs(vnames)\n #self.set_valid(vnames, True)\n\n for srcexpr, destexpr in expr_info:\n try:\n destexpr.set(srcexpr.evaluate(), src=srcexpr.text)\n except Exception as err:\n self.raise_exception(\"cannot set '%s' from '%s': %s\" %\n (destexpr.text, srcexpr.text, str(err)), type(err))"
] | [
"0.66675526",
"0.6624005",
"0.6580059",
"0.65401715",
"0.63515884",
"0.5924977",
"0.5856354",
"0.5849391",
"0.58274955",
"0.58029515",
"0.5788468",
"0.57738566",
"0.57068956",
"0.5692191",
"0.5691666",
"0.5661541",
"0.55585796",
"0.55347294",
"0.5515069",
"0.5511939",
"0.5471894",
"0.54599476",
"0.5447054",
"0.54300594",
"0.54044604",
"0.539916",
"0.53872895",
"0.5382891",
"0.5381662",
"0.53752595"
] | 0.6650258 | 1 |
run a function and return the run time and the result of the function if the function requires arguments, those can be passed in too | def calculateRunTime(function, *args):
startTime = time.time()
result = function(*args)
return time.time() - startTime, result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def function_timer(*args, **kwargs):\n start = time.time()\n value = func(*args, **kwargs)\n end = time.time()\n runtime = end - start\n msg = f\"The runtime for {func.__name__} took {runtime} seconds to complete\"\n #print(msg.format(func=func.__name__, time=runtime))\n print(msg)\n return value",
"def wrapper(args):\n if args[\"--time\"]:\n import time\n start_time = time.time()\n result = fun(args)\n LOGGER.info(\"Total time:\", time.time() - start_time)\n return result\n\n return fun(args)",
"def time_func(f, args=[], kw_args={}):\n start_time = time.time()\n result = f(*args, **kw_args)\n end_time = time.time()\n\n return (result, end_time - start_time)",
"def execution_time(function: Callable, args=tuple(), kwargs=dict()):\n start_time = time.time()\n function(*args, **kwargs)\n end_time = time.time()\n return end_time - start_time",
"def wrapper(*args, **kwargs):\n start = time.time()\n value = func(*args, **kwargs)\n end = time.time()\n runtime = end - start\n msg = \"The runtime for {func} took {time} seconds to complete\"\n printer(msg.format(func=func.__name__,\n time=runtime))\n return value",
"def time_wrapper(*args) -> Tuple[float,str]:\n t1 = time.time()\n for _ in range(trials):\n function(*args)\n return ((time.time()-t1)/trials) * 1e6, function.__name__",
"def inner(*args, **kwargs):\n start = perf_counter()\n result = fn(*args, **kwargs)\n end = perf_counter()\n time_elapsed = (end - start)\n return time_elapsed, result",
"def time_function(func: \"Function call to be evaluted as str.\") -> float:\n start = time.time()\n eval(func)\n return time.time() - start",
"def wrapper():\n start_time = time.time()\n func()\n end_time = time.time()\n run = end_time - start_time\n print(f'Total time {run}')",
"def _run_time(func):\n start_time = datetime.datetime.now()\n func\n end_time = datetime.datetime.now()\n return end_time - start_time",
"def time_function(f, *args):\r\n tic = time.time()\r\n f(*args)\r\n toc = time.time()\r\n return toc - tic",
"def clocked(*args):\n t0 = time.perf_counter()\n result = func(*args)\n elapsed = time.perf_counter() - t0\n name = func.__name__\n arg_str = \"\".join(repr(arg) for arg in args)\n print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n return result",
"def calculate_time(func):\n def timer(*args, **kwargs):\n start_time = time.time()\n x = func(*args, **kwargs)\n end_time = time.time()\n run_time = end_time - start_time\n print(f'Total time',run_time)\n return x\n return timer",
"def time_me(function, argument, type):\n start = time.perf_counter()\n function(argument, type)\n end = time.perf_counter()\n return end - start",
"def timeThem(*args, **kwargs):\n\n funcs = []\n funcArgs = list(args[:])\n \n #filter arguments\n for arg in args:\n if callable(arg):\n funcs.append(arg)\n funcArgs.remove(arg)\n \n key = \"inNumber\"\n inNumber=10\n if key in kwargs:\n inNumber = kwargs[key]\n del kwargs[key]\n\n durations = []\n refTime = 0.0\n\n for func in funcs:\n retVal = func(*funcArgs, **kwargs)\n duration = timeit(partial(func, *funcArgs, **kwargs), number=inNumber)\n \n comparison = \"\"\n if refTime <= 0.0:\n refTime = duration\n else:\n comparison = \" ( *{:.2f})\".format(duration / refTime)\n \n print(\"{: <16} : {:.4f}\".format(func.__name__, duration) + comparison + \" returns '{}' ({})\".format(retVal, type(retVal).__name__))\n durations.append(duration)\n \n return durations",
"def call_and_return_with_timing(f, *args, **kwargs):\n from datetime import datetime\n before = datetime.now()\n result = f(*args, **kwargs)\n after = datetime.now()\n return (result, after-before)",
"def run_time_wrapper(func):\n\n def inner(*args, **kwargs):\n start_time = datetime.datetime.now()\n func(*args, **kwargs)\n end_time = datetime.datetime.now()\n result = end_time - start_time\n log(level=\"info\", message=\"RUNTIME: {}\".format(result))\n\n return inner",
"def timer(fun):\n @wraps(fun)\n def wrapper(args):\n \"\"\"Wraps function execution time.\"\"\"\n if args[\"--time\"]:\n import time\n start_time = time.time()\n result = fun(args)\n LOGGER.info(\"Total time:\", time.time() - start_time)\n return result\n\n return fun(args)\n\n return wrapper",
"def timeit(func, *args):\n start = time.time()\n func(*args)\n end = time.time()\n\n return end - start",
"def inner(*args):\n # Setup.\n stats = args[-1]\n stats[desc] = -1\n start = time.time()\n\n # Execute the function.\n ret_val = func(*args)\n\n # No exception, so save the runtime and return ret_val.\n stats[desc] = time.time() - start\n return ret_val",
"def clock(func):\n def clocked(*args):\n t0 = time.time()\n result = func(*args)\n elapsed = (time.time() - t0) * 1000 # in ms\n print('elapsed : [{0:0.3f}ms]'.format(elapsed))\n return result\n return clocked",
"def timer(*args):\n \n import time\n \n time1 = time.clock()\n func(*args)\n time2 = time.clock()\n \n return (time2 - time1) * 1000",
"def timed(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n \"\"\"\n Inner function to calculate the time.\n \"\"\"\n start = perf_counter()\n result = fn(*args, **kwargs)\n end = perf_counter()\n time_elapsed = (end - start)\n return time_elapsed, result\n return inner",
"def _timed_execute(self):\n tstart = time.perf_counter()\n self._func(*self._func_args, **self._func_kwargs)\n tend = time.perf_counter() \n\n tdelta = tend - tstart\n\n return tdelta",
"def count_time_args(func):\n def wrapper(*args):\n start_time = time.time()\n res = func(*args)\n end_time = time.time()\n print(\"The progress cost: {:4}\".format(end_time - start_time))\n return res\n return wrapper",
"def timeit(func):\n\n def measure_time(*args, **kw):\n start_time = time.perf_counter()\n result = func(*args, **kw)\n time_ms = (time.perf_counter() - start_time) * 1000\n if time_ms < 0.1:\n print(\"Processing time of %s(): %.1f μs.\"\n % (func.__qualname__, time_ms*1000))\n else:\n print(\"Processing time of %s(): %.3f ms.\"\n % (func.__qualname__, time_ms))\n return result\n\n return measure_time",
"def time_method(method, *arguments):\n start = int(round(time.time() * 1000))\n result = method(*arguments)\n end = int(round(time.time() * 1000))\n total = (end - start) / 1000\n print('Time: %0.03fs' % total)\n return result",
"def time_elapsed(fun):\n def wrapper(*args, **kwargs):\n t0 = time.time()\n fun(*args, **kwargs)\n print(\"\\nTime elapsed = %s\" % (time.time() - t0))\n return wrapper",
"def log(func):\n def timed(*args, **kwargs):\n ts = time.time()\n result = func(*args, **kwargs)\n te = time.time()\n exectime = te - ts\n if (exectime < 1):\n exectime = str(round(exectime * 100, 3)) +\" ms\"\n else:\n exectime = str(round(exectime, 3)) + \" s\"\n logger.info(\"Running: \"+ func.__name__ + \" [ exec-time = \" + exectime + \" ]\")\n return result\n \n return timed",
"def time_func(func):\n \n def timer(*args):\n \"\"\"\n Returns the running time of a function in milliseconds.\n \n The number of parameters is defined by the timed function.\n \"\"\"\n \n import time\n \n time1 = time.clock()\n func(*args)\n time2 = time.clock()\n \n return (time2 - time1) * 1000\n \n return timer"
] | [
"0.7586328",
"0.7548404",
"0.74903107",
"0.7458017",
"0.73813474",
"0.7225557",
"0.7115549",
"0.70576197",
"0.705625",
"0.7045507",
"0.70275384",
"0.702331",
"0.6991577",
"0.6989044",
"0.694265",
"0.69015586",
"0.69004416",
"0.6764218",
"0.67446786",
"0.67346257",
"0.67155105",
"0.6707794",
"0.6704136",
"0.66613847",
"0.6660502",
"0.66323394",
"0.66217434",
"0.6597356",
"0.65966594",
"0.6591369"
] | 0.8080999 | 0 |
Testing if padding works correctly for common scenarios of 2D data (batch_size x sequences). Specifically testing whether it produces proper padded sequences, and their masks. Also, testing if when symbol_to_mask is provided if it correctly masks those symbols. | def test_2D_padding(self):
field_names = ["text"]
mask_field_names = ['text_mask']
data_path = "mldp/tests/data/news.csv"
pad_symbol = "<PAD>"
mask_field_name_suffix = "mask"
padding_modes = ['left', 'right', 'both']
symbols_to_mask = ["The", "a", "to", "as"]
axis = 1
data_chunk = read_data_from_csv_file(data_path, sep="\t")
# tokenize field values
for fn in field_names:
data_chunk[fn] = np.array([seq.split() for seq in data_chunk[fn]])
for padding_mode, symbol_to_mask in product(padding_modes,
symbols_to_mask):
padder = Padder(field_names, pad_symbol=pad_symbol,
new_mask_fname=mask_field_names,
padding_mode=padding_mode, axis=axis,
symbol_to_mask=symbol_to_mask)
padded_data_chunk = padder(copy.deepcopy(data_chunk))
for fn, mask_fn in zip(field_names, mask_field_names):
padded_fv = padded_data_chunk[fn]
mask = padded_data_chunk[mask_fn]
original_fv = data_chunk[fn]
self.assertTrue(len(padded_fv.shape) == 2)
self._test_padded_values(original_field_values=original_fv,
padded_field_values=padded_fv,
mask=mask, pad_symbol=pad_symbol,
symbol_to_mask=symbol_to_mask) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_3D_padding(self):\n field_name = \"dummy\"\n mask_field_name = 'dummy_mask'\n pad_symbol = -99\n mask_fn_suffix = \"mask\"\n padding_mode = \"both\"\n axis = 2\n\n data_chunk = DataChunk(**{field_name: np.array([\n [[0, 1, 2], [3, 4, 5], [], [6]],\n [[1], [1, 2], []]\n ])})\n padder = Padder(field_name, pad_symbol=pad_symbol, axis=axis,\n new_mask_fname=mask_field_name,\n padding_mode=padding_mode)\n padded_data_chunk = padder(copy.deepcopy(data_chunk))\n\n original_fv = data_chunk[field_name]\n padded_fv = padded_data_chunk[field_name]\n mask = padded_data_chunk[mask_field_name]\n\n for ofv, pfv, m in zip(original_fv, padded_fv, mask):\n self._test_padded_values(original_field_values=ofv,\n padded_field_values=pfv, mask=m,\n pad_symbol=pad_symbol)",
"def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)",
"def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)",
"def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def pad_vector_grid(vector_sequences, grid_shape_sequences, max_grid_shape = None, dtype = 'int32', padding = 'pre', truncating = 'pre', value = 0.):\n\t\n row_lengths = [s[0] for s in grid_shape_sequences]\n col_lengths = [s[1] for s in grid_shape_sequences]\n assert vector_sequences[0] is not None \n dim = vector_sequences[0].shape[1] \n nb_samples = len(vector_sequences)\n if max_grid_shape is None:\n max_grid_shape = (np.max(row_lengths), np.max(col_lengths))\n x = np.ones( (nb_samples,)+ max_grid_shape +(dim,)).astype(dtype)* value \n mask = np.zeros((nb_samples,)+max_grid_shape)\n for idx, vs in enumerate(vector_sequences):\n if len(vs) == 0:\n continue\n grid_vec = np.reshape(vs,(tuple(grid_shape_sequences[idx]) + (dim,)) , order='F')\n # testiing code\n #patchRow, patchCol = [25,25]\n #showGrid(grid_vec, grid_shape_sequences[idx], [patchRow, patchCol]) \n if truncating == 'pre': \n trunc = grid_vec[-max_grid_shape[0]:,-max_grid_shape[1]:,:]\n elif truncating == 'post':\n trunc = grid_vec[:max_grid_shape[0],:max_grid_shape[1],:]\n else:\n raise ValueError(\"Truncating type '%s' not understood\" % padding)\n \n if padding == 'post':\n x[idx,:trunc.shape[0],:trunc.shape[1],:] = trunc.copy()\n mask[idx, :trunc.shape[0],:trunc.shape[1]] = 1\n elif padding == 'pre':\n x[idx, -trunc.shape[0]:,-trunc.shape[1]:, :] = trunc.copy()\n mask[idx, -trunc.shape[0]:, -trunc.shape[1]:] = 1\n else:\n raise ValueError(\"PAdding type '%s' not understood\" % padding) \n #showGrid(x[idx,::], max_grid_shape, [patchRow, patchCol]) \n return x , mask# -*- coding: utf-8 -*-",
"def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")",
"def _is_padding_necessary(self, signal: np.array) -> bool:\n if len(signal) < self.number_expected_samples:\n return True\n else:\n return False",
"def generate_padding_masks(data, pad_value=0):\n with torch.no_grad():\n mask = (data == pad_value).to(data.device).t().unsqueeze(1)\n return mask",
"def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)",
"def _dynamic_padding(self, batch_data, pad_id = 0 ):\n #print 'dynamic _padding...'\n #print 'pad_id' + str(pad_id)\n max_p_len = 1000\n max_q_len =1000\n pad_p_len = min(max_p_len, max(batch_data['passage_length']))+1\n #print 'pad_p_len' + str(pad_p_len)\n pad_q_len = min(max_q_len, max(batch_data['question_length']))\n #print 'pad_q_len' + str(pad_q_len)\n #for ids in batch_data['passage_token_ids'] :\n #print 'padding: '\n #print (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)",
"def build_attention_mask_3d_padding(source_mask, target_mask):\n mask = make_attention_mask_3d(source_mask, target_mask)\n # invert mask for Megatron\n return mask < 0.5",
"def generate_visual_features_padding_masks(data, pad_value=0):\n with torch.no_grad():\n return (data == pad_value).all(dim=-1).t().to(data.device).unsqueeze(1)",
"def make_padding_mask(input_ids, padding_idx=1):\r\n padding_mask = input_ids.eq(padding_idx)\r\n if not padding_mask.any():\r\n padding_mask = None\r\n return padding_mask",
"def pad_vector_grid_sequence(vector_sequences, grid_shape_sequences, max_grid_shape = None, dtype = 'int32', padding = 'pre', truncating = 'pre', value = 0.):\n\t\n grid_X, grid_mask = pad_vector_grid(vector_sequences, grid_shape_sequences, max_grid_shape, dtype, padding , truncating, value)\n \n padded_X = np.reshape(grid_X, (grid_X.shape[0], np.prod(max_grid_shape,grid_X.shape[-1])))\n padded_mask = np.reshape(grid_mask, (grid_mask.shape[0], np.prod(max_grid_shape)), order='F')\n # needs to test if the reshape works just fine.\n return padded_X, padded_mask",
"def create_padding_mask(seq):\r\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\r\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\r",
"def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)",
"def _dynamic_padding(self, batch_data, pad_id):\n pad_p_len = min(self.max_p_len, max(batch_data['passage_length']))\n pad_q_len = min(self.max_q_len, max(batch_data['question_length']))\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def compute_mask_indices(\n shape: Tuple[int, int],\n padding_mask: Optional[torch.Tensor],\n mask_prob: float,\n mask_length: int,\n mask_type: str = \"static\",\n mask_other: float = 0.0,\n min_masks: int = 0,\n no_overlap: bool = False,\n min_space: int = 0,\n) -> np.ndarray:\n\n bsz, all_sz = shape\n mask = np.full((bsz, all_sz), False)\n\n all_num_mask = int(\n # add a random number for probabilistic rounding\n mask_prob * all_sz / float(mask_length)\n + np.random.rand()\n )\n\n all_num_mask = max(min_masks, all_num_mask)\n\n mask_idcs = []\n for i in range(bsz):\n if padding_mask is not None:\n sz = all_sz - padding_mask[i].long().sum().item()\n num_mask = int(\n # add a random number for probabilistic rounding\n mask_prob * sz / float(mask_length)\n + np.random.rand()\n )\n num_mask = max(min_masks, num_mask)\n else:\n sz = all_sz\n num_mask = all_num_mask\n\n if mask_type == \"static\":\n lengths = np.full(num_mask, mask_length)\n elif mask_type == \"uniform\":\n lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)\n elif mask_type == \"normal\":\n lengths = np.random.normal(mask_length, mask_other, size=num_mask)\n lengths = [max(1, int(round(x))) for x in lengths]\n elif mask_type == \"poisson\":\n lengths = np.random.poisson(mask_length, size=num_mask)\n lengths = [int(round(x)) for x in lengths]\n else:\n raise Exception(\"unknown mask selection \" + mask_type)\n\n if sum(lengths) == 0:\n lengths[0] = min(mask_length, sz - 1)\n\n if no_overlap:\n mask_idc = []\n\n def arrange(s, e, length, keep_length):\n span_start = np.random.randint(s, e - length)\n mask_idc.extend(span_start + i for i in range(length))\n\n new_parts = []\n if span_start - s - min_space >= keep_length:\n new_parts.append((s, span_start - min_space + 1))\n if e - span_start - keep_length - min_space > keep_length:\n new_parts.append((span_start + length + min_space, e))\n return new_parts\n\n parts = [(0, sz)]\n min_length = min(lengths)\n for length in sorted(lengths, reverse=True):\n lens = np.fromiter(\n (e - s if e - s >= length + min_space else 0 for s, e in parts),\n np.int,\n )\n l_sum = np.sum(lens)\n if l_sum == 0:\n break\n probs = lens / np.sum(lens)\n c = np.random.choice(len(parts), p=probs)\n s, e = parts.pop(c)\n parts.extend(arrange(s, e, length, min_length))\n mask_idc = np.asarray(mask_idc)\n else:\n min_len = min(lengths)\n if sz - min_len <= num_mask:\n min_len = sz - num_mask - 1\n\n mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)\n\n mask_idc = np.asarray(\n [\n mask_idc[j] + offset\n for j in range(len(mask_idc))\n for offset in range(lengths[j])\n ]\n )\n\n mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))\n\n min_len = min([len(m) for m in mask_idcs])\n for i, mask_idc in enumerate(mask_idcs):\n if len(mask_idc) > min_len:\n mask_idc = np.random.choice(mask_idc, min_len, replace=False)\n mask[i, mask_idc] = True\n\n return mask",
"def make_masks(self, src, tgt, src_len=None, pad_idx=0):\n if src_len is not None:\n abs_len = torch.round(src_len * src.shape[1])\n src_key_padding_mask = (\n torch.arange(src.shape[1])[None, :].to(abs_len)\n > abs_len[:, None]\n )\n\n tgt_key_padding_mask = get_key_padding_mask(tgt, pad_idx=pad_idx)\n\n src_mask = None\n tgt_mask = get_lookahead_mask(tgt)\n return src_key_padding_mask, tgt_key_padding_mask, src_mask, tgt_mask",
"def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)",
"def apply_mask(data, mask_func, seed=None, padding=None):\n shape = np.array(data.shape)\n shape[:-3] = 1\n mask = mask_func(shape, seed)\n if padding is not None:\n mask[:, :, :padding[0]] = 0\n mask[:, :, padding[1]:] = 0 # padding value inclusive on right of zeros\n\n masked_data = data * mask + 0.0 # The + 0.0 removes the sign of the zeros\n return masked_data, mask",
"def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")",
"def make_inference_attention_mask_3d(source_block, target_block, pad_id):\n # mask = (target_block[:, None, :] != pad_id) * (source_block[:, :, None] != pad_id)\n return make_attention_mask_3d(source_block != pad_id, target_block != pad_id)",
"def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)",
"def _pad_masks(self, results):\n pad_shape = results['pad_shape'][:2]\n pad_val = self.pad_val.get('masks', 0)\n for key in results.get('mask_fields', []):\n results[key] = results[key].pad(pad_shape, pad_val=pad_val)",
"def test_pad7():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 2, 1, 1, 0, 0)\n mode = \"reflect\"\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n ]\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)",
"def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def pad_conv_pattern():\n pattern = is_op(\"nn.pad\")(wildcard(), is_constant())\n pattern = is_op(\"nn.conv2d\")(pattern, is_constant())\n pattern = pattern.optional(lambda x: is_op(\"nn.bias_add\")(x, is_constant()))\n pattern = pattern.optional(lambda x: is_op(\"add\")(x, is_constant()))\n pattern = pattern.optional(\n lambda x: is_tuple_get_item(\n is_op(\"nn.batch_norm\")(\n x, is_constant(), is_constant(), is_constant(), is_constant()\n )\n )\n )\n pattern = pattern.optional(is_op(\"nn.relu\"))\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern",
"def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask"
] | [
"0.66571194",
"0.6441682",
"0.632566",
"0.6228574",
"0.6153739",
"0.61087537",
"0.6105533",
"0.60827976",
"0.6003496",
"0.5982256",
"0.5972349",
"0.5965401",
"0.59637886",
"0.59558815",
"0.5952085",
"0.591786",
"0.5871366",
"0.5843445",
"0.5820535",
"0.5779922",
"0.5775166",
"0.57672703",
"0.5751178",
"0.57325435",
"0.5689469",
"0.5669374",
"0.5654493",
"0.56238085",
"0.5619554",
"0.56134987"
] | 0.75791377 | 0 |
Light version test to check if the padder works for 3D data. | def test_3D_padding(self):
field_name = "dummy"
mask_field_name = 'dummy_mask'
pad_symbol = -99
mask_fn_suffix = "mask"
padding_mode = "both"
axis = 2
data_chunk = DataChunk(**{field_name: np.array([
[[0, 1, 2], [3, 4, 5], [], [6]],
[[1], [1, 2], []]
])})
padder = Padder(field_name, pad_symbol=pad_symbol, axis=axis,
new_mask_fname=mask_field_name,
padding_mode=padding_mode)
padded_data_chunk = padder(copy.deepcopy(data_chunk))
original_fv = data_chunk[field_name]
padded_fv = padded_data_chunk[field_name]
mask = padded_data_chunk[mask_field_name]
for ofv, pfv, m in zip(original_fv, padded_fv, mask):
self._test_padded_values(original_field_values=ofv,
padded_field_values=pfv, mask=m,
pad_symbol=pad_symbol) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_Pad3D3():\n input_shape = (1, 1, 2, 3, 2)\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def test_Pad3D10():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)",
"def test_Pad3D11():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 1]).astype('int32')\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)",
"def is3DImage(self):\n\t\treturn self.is3D",
"def test_Pad3D13():\n input_shape = (1, 2, 3, 1)\n pad = [1, 1, 1, 0]\n mode = \"reflect\"\n res = [[[[5], [4], [5], [6], [5]], [[2], [1], [2], [3], [2]], [[5], [4], [5], [6], [5]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)",
"def test_Pad3D7():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)",
"def test_Pad3D8():\n input_shape = (1, 2, 3)\n pad = [1, 1]\n mode = \"reflect\"\n res = [[[4, 5, 6], [1, 2, 3], [4, 5, 6], [1, 2, 3]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)",
"def test_Pad3D14():\n input_shape = (1, 1, 2, 2, 3)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n [[5, 4, 5, 6, 5], [2, 1, 2, 3, 2], [5, 4, 5, 6, 5]],\n [[11, 10, 11, 12, 11], [8, 7, 8, 9, 8], [11, 10, 11, 12, 11]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1",
"def is3_d(self):\n return self.container['is3_d']",
"def test_Pad3D_base():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)",
"def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def Has3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Has3d(self, *args)",
"def test_Pad3D12():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")",
"def test_Pad3D2():\n input_shape = (1, 1, 2, 3)\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)",
"def test_Pad3D1():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)",
"def test_Pad3D4():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"constant\"\n res = [[[0, 1, 2, 3, 0, 0], [0, 4, 5, 6, 0, 0]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCL\", data=data)",
"def test_Pad3D23():\n input_shape = (1, 2, 3)\n # pad = np.array([1, 2]).astype('int32')\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6], [4, 5, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NLC\", data=data)",
"def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)",
"def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)",
"def test_Pad3D15():\n input_shape = (1, 2, 2, 2, 2)\n pad = [1, 1, 1, 0, 1, 0]\n mode = \"reflect\"\n res = [\n [\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n [[[7, 8], [5, 6], [7, 8], [5, 6]], [[3, 4], [1, 2], [3, 4], [1, 2]], [[7, 8], [5, 6], [7, 8], [5, 6]]],\n [\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n [[11, 12], [9, 10], [11, 12], [9, 10]],\n [[15, 16], [13, 14], [15, 16], [13, 14]],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)",
"def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)",
"def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")",
"def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)",
"def test_Pad3D20():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)",
"def test_Pad3D22():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")",
"def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)"
] | [
"0.6824534",
"0.66838425",
"0.6630269",
"0.66077274",
"0.65853065",
"0.6516807",
"0.65082043",
"0.65043795",
"0.64946616",
"0.648253",
"0.6417899",
"0.63924766",
"0.6375224",
"0.6359866",
"0.6327758",
"0.6305439",
"0.62622947",
"0.6182932",
"0.617922",
"0.61383235",
"0.6111061",
"0.61090094",
"0.6066312",
"0.6058055",
"0.6045036",
"0.60120153",
"0.5916626",
"0.5907093",
"0.58973205",
"0.58940995"
] | 0.6957212 | 0 |
Add a default share ID to C{store}, pointing to C{shareID} with a priority C{priority}. The highestpriority share ID identifies the share that will be retrieved when a user does not explicitly provide a share ID in their URL (e.g. /host/users/username/). | def addDefaultShareID(store, shareID, priority):
_DefaultShareID(store=store, shareID=shareID, priority=priority) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDefaultShareID(store):\n defaultShareID = store.findFirst(\n _DefaultShareID, sort=_DefaultShareID.priority.desc)\n if defaultShareID is None:\n return u''\n return defaultShareID.shareID",
"def get_share(self, activity_user_id, activity_id, share_id):\n return None",
"def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):\n if shareID is None:\n shareID = genShareID(sharedItem.store)\n return Share(store=self.store,\n shareID=shareID,\n sharedItem=sharedItem,\n sharedTo=self,\n sharedInterfaces=interfaces)",
"def put(self, item, priority=False):\n id = uuid.uuid4().hex\n pipe = self.redis.pipeline()\n\n if priority:\n pipe.rpush(self.feed_ids, id)\n else:\n pipe.lpush(self.feed_ids, id)\n pipe.incr(self.feed_publishes)\n pipe.hset(self.feed_items, id, item)\n pipe.zadd(self.feed_published, **{id: int(time.time()*1000)})\n pipe.execute()\n return id",
"def test_set_share(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=200\n )",
"def ensure_share(self, context, share, share_server=None):\n pass",
"def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")",
"def share(self, share):\n if share is None:\n raise ValueError(\"Invalid value for `share`, must not be `None`\") # noqa: E501\n\n self._share = share",
"def addShare(self, name, path, securityMode = NFSSecurityMode.NONE):\n if name in self.shares:\n raise ValueError(\"Share '%s' is already in use\"%name)\n \n share = NFSShare()\n share.name = name\n share.path = path\n share.securityMode = securityMode\n self.shares[name] = share\n return share",
"def shareItem(sharedItem, toRole=None, toName=None, shareID=None,\n interfaces=ALL_IMPLEMENTED):\n warnings.warn(\"Use Role.shareItem() instead of sharing.shareItem().\",\n PendingDeprecationWarning,\n stacklevel=2)\n if toRole is None:\n if toName is not None:\n toRole = getPrimaryRole(sharedItem.store, toName, True)\n else:\n toRole = getEveryoneRole(sharedItem.store)\n return toRole.shareItem(sharedItem, shareID, interfaces)",
"def create_share(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_CreateShare', self.handle))",
"def sendShare(self, program_counter, share):\n self.sendData(program_counter, SHARE, hex(share.value))",
"def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n \n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n if settings.image_to_share==u'site_logo':\n portal = self.portal_state.portal()\n logoName = portal.restrictedTraverse('base_properties').logoName\n return \"%s/%s\" % (portal_state.portal_url(), logoName)\n \n share_image_view = getMultiAdapter((portal_state.portal(), self.request),\n name=u'collective.fbshare.default_image')\n if share_image_view.data():\n return \"%s/@@collective.fbshare.default_image\" % portal_state.portal_url()",
"def _mpd_add_track(uri, position = None):\n \n if position != None:\n _mpd_client.addid(uri, position)\n else:\n _mpd_client.addid(uri)",
"def store_id(self, store_id):\n self._store_id = store_id\n return self",
"def genShareID(store):\n return unicode(os.urandom(16).encode('hex'), 'ascii')",
"def _setorder(req, stores):\n for store in stores.get_all_stores():\n stores[store] = int(req.args.get(store.__class__.__name__, 0))\n continue",
"def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})",
"def getShare(store, role, shareID):\n warnings.warn(\"Use Role.getShare() instead of sharing.getShare().\",\n PendingDeprecationWarning,\n stacklevel=2)\n return role.getShare(shareID)",
"async def ws_set_preferred_item(\n self,\n hass: HomeAssistant,\n connection: websocket_api.ActiveConnection,\n msg: dict[str, Any],\n ) -> None:\n try:\n self.storage_collection.async_set_preferred_item(msg[self.item_id_key])\n except ItemNotFound:\n connection.send_error(\n msg[\"id\"], websocket_api.const.ERR_NOT_FOUND, \"unknown item\"\n )\n return\n connection.send_result(msg[\"id\"])",
"def _exchange_shares(self, peer_id, field_element):\n assert isinstance(field_element, FieldElement)\n\n if peer_id == self.id:\n return Share(self, field_element.field, field_element)\n else:\n share = self._expect_share(peer_id, field_element.field)\n pc = tuple(self.program_counter)\n self.protocols[peer_id].sendShare(pc, field_element)\n return share",
"def _insert_default_fallback(self):\n db.add_destination_with_aliases(self.dbm,\n \"https://duckduckgo.com?q={}\",\n \"DuckDuckGo\",\n [\"ddg\"],\n True,\n True)",
"def assign_sites(self, action, site, paired_site=None):\n self._current_site += 1\n if paired_site:\n base_current, base_paired = self.action_to_pair[action]\n self._primary_list[site] = base_current\n self._primary_list[paired_site] = base_paired\n else:\n self._primary_list[site] = self.action_to_base[action]",
"def copy(self, cr, uid, id, default=None, context=None):\n if default is None:\n default = {}\n if context is None:\n context = {}\n default.update({\n 'name': self.pool.get('ir.sequence').get(cr, uid, 'services.contracts.archive'),\n \n })\n return super(env_and_safety_allowances_archive, self).copy(cr, uid, id, default, context)",
"def add_site(self, site):\n assert self.default_model is not None\n self.default_model.add_site(site)",
"def _add_ID(self, preferred_id):\n self.id = preferred_id\n while self.id in Thing.ID_dict: # unique-ify self.id if necessary\n self.id = self.id + str(random.randint(0, 9))\n Thing.ID_dict[self.id] = self\n return self.id",
"def randomEarlyShared(store, role):\n for r in role.allRoles():\n share = store.findFirst(Share, Share.sharedTo == r,\n sort=Share.storeID.ascending)\n if share is not None:\n return share.sharedItem\n raise NoSuchShare(\"Why, that user hasn't shared anything at all!\")",
"def share_link(cls, user, link):",
"def share_link(cls, user, link):",
"def add():\n # Get URL from GET request\n site = str(request.query.site)\n \n # Check if URL starts with \"http://\" if not, add it\n if (site[:7] != \"http://\") and (site[:8] != \"https://\"):\n site = \"http://\" + site\n \n print(\"Adding site to index queue: \" + site)\n \n # Add URL to queue.db\n cursor.execute('INSERT INTO queue (url, priority) VALUES (?, 100)', (site,))\n # Save the queue database\n sql.commit()\n \n \n return 'OK'"
] | [
"0.68312156",
"0.5360172",
"0.5310793",
"0.52944005",
"0.52378845",
"0.5170581",
"0.51500475",
"0.497758",
"0.49575073",
"0.4938343",
"0.49230355",
"0.48810166",
"0.48424843",
"0.48328438",
"0.48196656",
"0.47416145",
"0.4726872",
"0.4690981",
"0.46693465",
"0.4620207",
"0.45870006",
"0.4586725",
"0.4579653",
"0.45454827",
"0.4524235",
"0.45064613",
"0.4502177",
"0.44881842",
"0.44881842",
"0.44854423"
] | 0.91040206 | 0 |
Get the highestpriority default share ID for C{store}. | def getDefaultShareID(store):
defaultShareID = store.findFirst(
_DefaultShareID, sort=_DefaultShareID.priority.desc)
if defaultShareID is None:
return u''
return defaultShareID.shareID | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addDefaultShareID(store, shareID, priority):\n _DefaultShareID(store=store, shareID=shareID, priority=priority)",
"def get_highest_id(self):\n\n return self.mint.get_highest_id()",
"def genShareID(store):\n return unicode(os.urandom(16).encode('hex'), 'ascii')",
"def max_share_count(self) -> str:\n return pulumi.get(self, \"max_share_count\")",
"def get_default_store_name_for_current_request():\r\n store_name = 'default'\r\n\r\n # see what request we are currently processing - if any at all - and get hostname for the request\r\n hostname = get_current_request_hostname()\r\n\r\n # get mapping information which is defined in configurations\r\n mappings = getattr(settings, 'HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', None)\r\n\r\n # compare hostname against the regex expressions set of mappings\r\n # which will tell us which store name to use\r\n if hostname and mappings:\r\n for key in mappings.keys():\r\n if re.match(key, hostname):\r\n store_name = mappings[key]\r\n return store_name\r\n\r\n return store_name",
"def maxid() -> int:\n pass",
"def get_max_id(self, type_name):\n return self._symtab[type_name].get_max_id()",
"def get_default_version(self):\n # latest is a special case where we don't have to check if it exists\n if self.default_version == 'latest':\n return self.default_version\n # check if the default_version exists\n version_qs = self.versions.filter(\n slug=self.default_version,\n active=True\n )\n if version_qs.exists():\n return self.default_version\n return 'latest'",
"def default(self):\n return self._policies[0]",
"def get_first_system_store(self):\n\t\ttry:\n\t\t\treturn self.stores[1]\n\t\texcept IndexError:\n\t\t\traise SafeException(_(\"No system stores have been configured\"))",
"def max_shares(self) -> Optional[int]:\n return pulumi.get(self, \"max_shares\")",
"def create_default_identifier():\n return random.randint(0, constants.UINT64_MAX)",
"def get_id(self):\n return self.get_sitename()",
"def default():\n return DefaultSwh.default()",
"def last_shared(self):\n return self.properties.get(\"lastShared\", SharingDetail())",
"def get_max_id(self):\r\n max_id = None\r\n for pid in self.players:\r\n if max_id is None or pid > max_id:\r\n max_id = pid\r\n return max_id",
"def get_default_output_id():\n _check_init()\n return _pypm.GetDefaultOutputDeviceID()",
"def standard_id(self):\n return self.get(\"standard_id\", decode=True)",
"def get_default(self):\n\n\t\treturn self.__default",
"def get_default_sensor_type():\n return get_sensor_type_id(DEFAULT_SENSOR_TYPE)",
"def default_storage_account_id(self) -> str:\n return pulumi.get(self, \"default_storage_account_id\")",
"def current_global_datamart_id(self, **kwargs) -> int:\n\n max_idx_query = json.dumps(\n {\n \"aggs\": {\n \"max_id\": {\n \"max\": {\n \"field\": \"datamart_id\"\n }\n }\n },\n \"size\": 0\n }\n )\n result = self.es.search(index=kwargs[\"index\"], body=max_idx_query)\n return int(result[\"aggregations\"][\"max_id\"][\"value\"]) if result[\"aggregations\"][\"max_id\"][\n \"value\"] else 0",
"def get_id(self):\n from ranger_performance_tool import perf_globals\n enabled_services = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"enabled_services\")\n service = random.choice(enabled_services)\n policy_list = self.remote_store.get_policy_list()[service]\n return random.choice(policy_list).id",
"def Default():\n return _DEFAULT",
"def default_magento_root_category_id():\n return 1",
"def public_default_scope_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"public_default_scope_id\")",
"def get_largest_id(self):\n try:\n cur = self.conn.execute(\"\"\"SELECT MAX(id) FROM todo;\"\"\")\n row = cur.fetchone()\n if row[0] == None:\n return 0\n else:\n return row[0]\n except Exception as e:\n print(e)",
"def get_primary_id(self):",
"def default_thermostat_identifier(self):\n\n return self._default_thermostat_identifier",
"def get_max_sid(self):\n session = self.DBSession()\n # first element of the first result or None if no rows present.\n # If multiple rows are returned, raises MultipleResultsFound.\n data = session.query(func.max(CurrentPropertySheet.sid)).scalar() or 0\n return data"
] | [
"0.7206265",
"0.60183716",
"0.5947432",
"0.5896664",
"0.57326794",
"0.5730939",
"0.56178427",
"0.5572699",
"0.5549776",
"0.55466425",
"0.5538893",
"0.55001765",
"0.54829454",
"0.5482721",
"0.54605675",
"0.54046506",
"0.53794336",
"0.5372752",
"0.5364573",
"0.53643966",
"0.53571373",
"0.534556",
"0.534383",
"0.531395",
"0.52884966",
"0.5283186",
"0.5282972",
"0.52828896",
"0.52724975",
"0.52619064"
] | 0.8637612 | 0 |
Retrieve a L{SharingIndex} for a particular user, or rend.NotFound. | def locateChild(self, ctx, segments):
store = _storeFromUsername(
self.loginSystem.store, segments[0].decode('utf-8'))
if store is None:
return rend.NotFound
return (SharingIndex(store, self.webViewer), segments[1:]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_share(self, activity_user_id, activity_id, share_id):\n return None",
"def get_user(request, user_id):\n\n try:\n get_user = User.objects.get(id=user_id)\n except:\n return JsonResponse(\n \"Not Found - User does not exist.\", status=404, safe=False\n )\n\n # Check for share code.\n valid_sc = False\n if get_user.share_code:\n if request.GET.get(\"sharecode\") == get_user.share_code:\n valid_sc = True\n\n if not valid_sc:\n try:\n verify_user_login(request)\n except PermissionDenied:\n return JsonResponse(\n \"Unauthorized - Login required.\", status=401, safe=False\n )\n\n response = get_user.serialize()\n response[\"graphs\"] = get_graphs(get_user)\n return JsonResponse(response, status=200)",
"def get_object(self):\n requested_user = self.kwargs.get('username')\n loggedin_user = self.request.user.username\n if str(requested_user) == str(loggedin_user) or requested_user == 'me':\n requested_user = loggedin_user\n return get_object_or_404(User, username__iexact=requested_user, is_active=True)\n else:\n raise PermissionDenied",
"def locateChild(self, ctx, segments):\n shareID = segments[0].decode('utf-8')\n\n role = self.webViewer.roleIn(self.userStore)\n\n # if there is an empty segment\n if shareID == u'':\n # then we want to return the default share. if we find one, then\n # let's use that\n defaultShareID = getDefaultShareID(self.userStore)\n try:\n sharedItem = role.getShare(defaultShareID)\n except sharing.NoSuchShare:\n return rend.NotFound\n # otherwise the user is trying to access some other share\n else:\n # let's see if it's a real share\n try:\n sharedItem = role.getShare(shareID)\n # oops it's not\n except sharing.NoSuchShare:\n return rend.NotFound\n\n return (self.webViewer.wrapModel(sharedItem),\n segments[1:])",
"def get(cls, username, server, bucket=None):\n\t\tusername = cls._clean_username(username)\n\t\tif not username:\n\t\t\traise IDMException(\"you must provide a username\")\n\t\t\n\t\tres = cls.find_on({'type': 'user', 'username': username}, server, bucket)\n\t\tif res and len(res) > 0:\n\t\t\treturn res[0]\n\t\traise IDMException(\"no user with the given username\", 404)",
"def get_object(self, username):\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n raise Http404",
"def get_by_username_or_404(cls, username):\n\n user = cls.query.filter(cls.username == username).first()\n\n if user is None:\n abort(404, description='Resource not found.')\n\n return user",
"def getShare(self, shareID):\n shares = list(\n self.store.query(Share,\n AND(Share.shareID == shareID,\n Share.sharedTo.oneOf(self.allRoles()))))\n interfaces = []\n for share in shares:\n interfaces += share.sharedInterfaces\n if shares:\n return SharedProxy(shares[0].sharedItem,\n interfaces,\n shareID)\n raise NoSuchShare()",
"def user(user_id):\n\n user = User.query.get(user_id)\n\n if user:\n return jsonify({'status': 'success',\n 'user_id': user.spread_id,\n 'user_name': user.spread_name})\n else:\n return jsonify({'status': 'error',\n 'message': 'No spread found with that ID'})",
"def with_id(cls, user_id, server, bucket=None):\n\t\tif ObjectId.is_valid(user_id):\n\t\t\tuser_id = ObjectId(user_id)\n\t\tres = cls.find_on({'type': 'user', '_id': user_id}, server, bucket)\n\t\tif res and len(res) > 0:\n\t\t\treturn res[0]\n\t\traise IDMException(\"no user with the given id “{}”\".format(user_id), 404)",
"def share(self):\n friend_sql = self.db.text(\"SELECT u.id, concat(u.second_name, ' ', u.first_name) text FROM user u \"\n \"inner join friend f on f.friend_id=u.id where f.user_id =%s \"\n \"and f.status = 1\" % self.current_user.id)\n share_sql = self.db.text(\"SELECT fs.user_assigned_id id, \"\n \"concat(u.first_name, ' ', COALESCE(u.second_name, '')) text \"\n \"FROM file_share fs inner join user u on u.id = fs.user_assigned_id \"\n \"where fs.file_id = %s\" % self.file_id)\n\n friend_db_list, share_db_list = self.db.execute([friend_sql, share_sql])\n return {'success': True,\n 'friend_list': friend_db_list,\n 'shared_list': share_db_list}",
"def share_type_get_by_name_or_id(context, name_or_id):\n try:\n return _share_type_get(context, name_or_id)\n except exception.ShareTypeNotFound:\n try:\n return _share_type_get_by_name(context, name_or_id)\n except exception.ShareTypeNotFoundByName:\n return None",
"def getShare(store, role, shareID):\n warnings.warn(\"Use Role.getShare() instead of sharing.getShare().\",\n PendingDeprecationWarning,\n stacklevel=2)\n return role.getShare(shareID)",
"def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)",
"def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)",
"def for_user(self, user):\n return self.get_query_set().filter(owner=user)",
"def get_actioncluster_for_user(slug, user):\n actioncluster = get_object_or_404(ActionCluster.active, slug__exact=slug)\n # Application is published, no need for validation:\n if actioncluster.is_visible_by(user):\n return actioncluster\n raise Http404",
"def show(user_id):\n return users.get_or_404(user_id)",
"def get_or_create_spreadsheet(gc, name, share_with):\n try:\n sh = gc.open(name)\n except SpreadsheetNotFound:\n sh = gc.create(name)\n sh.share(share_with, perm_type='user', role='writer')\n return sh",
"def get(self, username=None):\n ownprofile = False\n if username is None:\n # try to use the logged in user if existing\n user = self.user\n if user is None:\n raise werkzeug.exceptions.NotFound()\n else:\n user = self.settings.users.get_by_id(username)\n if user is None:\n raise werkzeug.exceptions.NotFound()\n \n if self.user is not None:\n ownprofile = self.user['_id'] == user['_id']\n\n return self.render(myuser = user, ownprofile = ownprofile)",
"def index(request):\n if request.user is None:\n return view_all(request, index_call=True)\n else:\n return mine(request)",
"def user_to_index(self, user_id):\n if user_id in self.user_dict:\n return self.user_dict[user_id]\n else:\n return -1",
"def get_one_user(idx):\n return UserModel.query.filter_by(id=idx, deleted_at=None).first()",
"def get_object(self):\n return get_object_or_404(User, pk__iexact=self.request.user.id)",
"def randomEarlyShared(store, role):\n for r in role.allRoles():\n share = store.findFirst(Share, Share.sharedTo == r,\n sort=Share.storeID.ascending)\n if share is not None:\n return share.sharedItem\n raise NoSuchShare(\"Why, that user hasn't shared anything at all!\")",
"def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user",
"def getUser(self, ind):\r\n if ind >= 0 and ind < len(self.users):\r\n return self.users[ind]\r\n return None",
"def get_host_or_404(user, *args, **kw):\n h = get_object_or_404(Host, *args, **kw)\n if not h.available_for(user):\n raise Http404\n return h",
"def get_object(self, queryset=None):\n obj = super(EditAccountSettings, self).get_object()\n if not str(obj.username) == str(self.request.user):\n raise Http404\n\n return obj",
"def find_one(self, user_id):\n pass"
] | [
"0.5690004",
"0.5583716",
"0.5333351",
"0.5305234",
"0.52540773",
"0.52362347",
"0.52299106",
"0.51833224",
"0.5173709",
"0.5127668",
"0.5119951",
"0.50898397",
"0.5076749",
"0.5047834",
"0.5047834",
"0.50185806",
"0.49993867",
"0.49953124",
"0.4970683",
"0.49701992",
"0.49638736",
"0.49315754",
"0.49292448",
"0.48980418",
"0.48625678",
"0.485484",
"0.48477003",
"0.48334083",
"0.48269936",
"0.4815519"
] | 0.5849322 | 0 |
Look up a shared item for the role viewing this SharingIndex and return a L{PublicAthenaLivePage} containing that shared item's fragment to the user. These semantics are UNSTABLE. This method is adequate for simple uses, but it should be expanded in the future to be more consistent with other resource lookups. In particular, it should allow share implementors to adapt their shares to L{IResource} directly rather than L{INavigableFragment}, to allow for simpler child dispatch. | def locateChild(self, ctx, segments):
shareID = segments[0].decode('utf-8')
role = self.webViewer.roleIn(self.userStore)
# if there is an empty segment
if shareID == u'':
# then we want to return the default share. if we find one, then
# let's use that
defaultShareID = getDefaultShareID(self.userStore)
try:
sharedItem = role.getShare(defaultShareID)
except sharing.NoSuchShare:
return rend.NotFound
# otherwise the user is trying to access some other share
else:
# let's see if it's a real share
try:
sharedItem = role.getShare(shareID)
# oops it's not
except sharing.NoSuchShare:
return rend.NotFound
return (self.webViewer.wrapModel(sharedItem),
segments[1:]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fromSharedItem(cls, sharedItem):\n localpart = None\n for (localpart, domain) in userbase.getAccountNames(sharedItem.store):\n break\n if localpart is None:\n raise NoSuchShare()\n for share in sharedItem.store.query(Share,\n Share.sharedItem == sharedItem):\n break\n else:\n raise NoSuchShare()\n return cls(\n shareID=share.shareID,\n localpart=localpart, domain=domain)",
"def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):\n if shareID is None:\n shareID = genShareID(sharedItem.store)\n return Share(store=self.store,\n shareID=shareID,\n sharedItem=sharedItem,\n sharedTo=self,\n sharedInterfaces=interfaces)",
"def locateChild(self, ctx, segments):\n store = _storeFromUsername(\n self.loginSystem.store, segments[0].decode('utf-8'))\n if store is None:\n return rend.NotFound\n return (SharingIndex(store, self.webViewer), segments[1:])",
"def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n object_id = force_unicode(self.target_object._get_pk_val()),\n content_type = ContentType.objects.get_for_model(self.target_object),\n share_date = datetime.datetime.now(),\n )\n \n return new",
"def get_share(self, activity_user_id, activity_id, share_id):\n return None",
"def shareItem(sharedItem, toRole=None, toName=None, shareID=None,\n interfaces=ALL_IMPLEMENTED):\n warnings.warn(\"Use Role.shareItem() instead of sharing.shareItem().\",\n PendingDeprecationWarning,\n stacklevel=2)\n if toRole is None:\n if toName is not None:\n toRole = getPrimaryRole(sharedItem.store, toName, True)\n else:\n toRole = getEveryoneRole(sharedItem.store)\n return toRole.shareItem(sharedItem, shareID, interfaces)",
"def get_shared_object(self):\n if not self.is_valid():\n raise ValueError(\"get_shared_object may only be called on valid forms\")\n\n new = SharedItem(\n content_type = ContentType.objects.get_for_model(self.target_object),\n object_id = force_unicode(self.target_object._get_pk_val()),\n share_date = datetime.datetime.now(),\n )\n \n return new",
"def share_info(self) -> Sequence['outputs.ShareInfoElementResponse']:\n return pulumi.get(self, \"share_info\")",
"def getShare(store, role, shareID):\n warnings.warn(\"Use Role.getShare() instead of sharing.getShare().\",\n PendingDeprecationWarning,\n stacklevel=2)\n return role.getShare(shareID)",
"def getShare(self, shareID):\n shares = list(\n self.store.query(Share,\n AND(Share.shareID == shareID,\n Share.sharedTo.oneOf(self.allRoles()))))\n interfaces = []\n for share in shares:\n interfaces += share.sharedInterfaces\n if shares:\n return SharedProxy(shares[0].sharedItem,\n interfaces,\n shareID)\n raise NoSuchShare()",
"def share(self, request):\n try:\n article = self.get_object()\n except PermissionDenied as pd:\n return Response({'error': str(pd)})\n\n article.shared_by.add(request.user)\n return Response({'message': '\"{}\" is shared'.format(article.title)})",
"def share(self):\n friend_sql = self.db.text(\"SELECT u.id, concat(u.second_name, ' ', u.first_name) text FROM user u \"\n \"inner join friend f on f.friend_id=u.id where f.user_id =%s \"\n \"and f.status = 1\" % self.current_user.id)\n share_sql = self.db.text(\"SELECT fs.user_assigned_id id, \"\n \"concat(u.first_name, ' ', COALESCE(u.second_name, '')) text \"\n \"FROM file_share fs inner join user u on u.id = fs.user_assigned_id \"\n \"where fs.file_id = %s\" % self.file_id)\n\n friend_db_list, share_db_list = self.db.execute([friend_sql, share_sql])\n return {'success': True,\n 'friend_list': friend_db_list,\n 'shared_list': share_db_list}",
"def randomEarlyShared(store, role):\n for r in role.allRoles():\n share = store.findFirst(Share, Share.sharedTo == r,\n sort=Share.storeID.ascending)\n if share is not None:\n return share.sharedItem\n raise NoSuchShare(\"Why, that user hasn't shared anything at all!\")",
"def _share():\n context = get_factcheck_context()\n return make_response(render_template('share.html', **context))",
"def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n\n if settings.content_use_own_image:\n # Stolen from collective.opengraph\n img_size = settings.content_image_size\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field and HAS_LEADIMAGE:\n field = context.getField(IMAGE_FIELD_NAME)\n \n if field and field.get_size(context) > 0:\n if img_size:\n return u'%s/%s_%s' % (obj_url, field.getName(), img_size)\n return u'%s/%s' % (obj_url, field.getName())\n \n return SiteOpenGraphMetaViewlet.share_image(self)",
"def sharing_get(self, request):\n _view = _object_view(self, request)\n queried = SharingCollection(request.params.mixed()).query()\n objs = [request.view(obj) for obj in queried[0]]\n _view.update({\n \"postings\": objs,\n \"result_complete\": queried[1]\n })\n return _view",
"def _get_shared(self, user, callback):\n callback(_build_study_info(\"shared\", user))",
"def public_url(self):\n return '%s/%s-%i' % (settings.SHARE_URL, self.slug, self.pk)",
"def share(self, path, short_url=True):\n path = \"/shares/%s%s\" % (self.session.root, format_path(path))\n\n params = {\n 'short_url': short_url,\n }\n\n url, params, headers = self.request(path, params, method='GET')\n\n return self.rest_client.GET(url, headers)",
"def GET(self, item_id):\n\n asset = sandbox.access(sandbox_name, asset_ids=[item_id])[0]\n return render.info(asset=asset)",
"def share_replica_get(context, replica_id, with_share_data=False,\n with_share_server=False, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server,\n replica_id=replica_id, session=session).first()\n\n if result is None:\n raise exception.ShareReplicaNotFound(replica_id=replica_id)\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)[0]\n\n return result",
"def create_share_from_snapshot(self, context, share, snapshot,\n share_server=None):\n raise NotImplementedError()",
"def share_replicas_get_available_active_replica(context, share_id,\n with_share_data=False,\n with_share_server=False,\n session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, share_id=share_id,\n replica_state=constants.REPLICA_STATE_ACTIVE,\n status=constants.STATUS_AVAILABLE, session=session).first()\n\n if result and with_share_data:\n result = _set_instances_share_data(context, result, session)[0]\n\n return result",
"def share(self, path=None, readonly=True, *, toplevel=False):\n if not readonly:\n if not self.independent:\n msg = \"\"\"{}: Non-readonly HTTP share is not possible.\nThis cell is not fully independent, i.e. it has incoming connections\"\"\"\n raise Exception(msg.format(self))\n\n assert readonly or self.independent\n hcell = self._get_hcell2()\n hcell[\"share\"] = {\n \"path\": path,\n \"readonly\": readonly,\n }\n if toplevel:\n hcell[\"share\"][\"toplevel\"] = True\n hcell[\"UNSHARE\"] = True\n if self._parent() is not None:\n self._parent()._translate()\n return self",
"def get_share(self, nShareIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetShare', self.handle, nShareIndex))",
"def get_home_details(shareable_link: str) -> HousingDetail:\n parsed = urlparse(shareable_link)\n\n try:\n return _URL_MAPPING[parsed.netloc](shareable_link)\n except KeyError as error:\n raise errors.ClientNotSupported(f'No client implementation for {parsed.netloc}') from error",
"def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item",
"def ensure_share(self, context, share, share_server=None):\n\n volume_uuid = self._resolve_volume_name(share['name'],\n share['project_id'])\n\n LOG.debug(\"Ensuring Quobyte share %s\", share['name'])\n\n if not volume_uuid:\n raise (exception.ShareResourceNotFound(\n share_id=share['id']))\n\n result = self.rpc.call('exportVolume', dict(\n volume_uuid=volume_uuid,\n protocol='NFS'))\n\n return self._build_share_export_string(result)",
"def share_type_get(context, id, inactive=False, expected_fields=None):\n return _share_type_get(context, id,\n inactive=inactive,\n expected_fields=expected_fields)",
"def shared_bitshares_instance():\n if not SharedInstance.instance:\n clear_cache()\n SharedInstance.instance = bts.BitShares()\n return SharedInstance.instance"
] | [
"0.5827662",
"0.57348615",
"0.5662804",
"0.56601375",
"0.56514573",
"0.5651063",
"0.5639989",
"0.55624044",
"0.54973114",
"0.53261805",
"0.5240352",
"0.5229794",
"0.5137831",
"0.50752205",
"0.49703422",
"0.48656753",
"0.48469245",
"0.48203176",
"0.4800584",
"0.4796039",
"0.4769127",
"0.47182262",
"0.46969062",
"0.46811953",
"0.46534416",
"0.46292058",
"0.4527935",
"0.45111614",
"0.45045346",
"0.44903696"
] | 0.59761953 | 0 |
Test to check if getting the ID of a petition works. | def testGetPetitionIDEndpoint():
api = c.Api()
petition_url = 'https://www.change.org/p/the-supreme-court-of-missouri-take-the-case-of-michael-brown-popularly-dubbed-the-ferguson-case-to-the-missouri-supreme-court-with-ferguson-officer-darren-wilson-as-the-accused'
petition_id = api.getPetitionId(petition_url)
assert petition_id == 2297566 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_poets_id_get(self):\n pass",
"def test_solareclipses_id_get(self):\n pass",
"def test_variablepresentations_id_get(self):\n pass",
"def test_get_pet_by_id(self):\n response = self.client.open(\n '/pet/{petId}'.format(pet_id=789),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_prefectures_id_get(self):\n pass",
"def test_intercommunalitys_id_get(self):\n pass",
"def test_metrostations_id_get(self):\n pass",
"def test_cyclingleagues_id_get(self):\n pass",
"def test_drugs_id_get(self):\n pass",
"def test_gridironfootballplayers_id_get(self):\n pass",
"def test_id(self):\n result = self.test_client.id\n\n assert result == \"10423098\"",
"def test_plays_id_get(self):\n pass",
"def test_brains_id_get(self):\n pass",
"def test_id(self):\n result = self.test_client.id\n\n assert result == \"86576599\"",
"def test_sport_id(self):\n result = self.test_client.sport_id\n\n assert result == \"1\"",
"def test_sample_one_patient_id(self):\r\n self.assertEqual(self.test_sample.patientID.id, 2)",
"def testGetMultiplePetitionsById():\n\tapi = c.Api()\n\toutput = api.getMultiplePetitionsById([2297756, 1756395])\n\tif type(output) is list:\n\t\tassert True",
"def testValidateId(self):\n #create a different person and try to use their id\n self.directory.invokeFactory(type_name=\"FSDPerson\",id=\"def456\",firstName=\"Joe\",lastName=\"Blow\")\n self.failUnless('def456' in self.person.validate_id('def456'))\n #create a different content object and try to use its id\n self.directory.invokeFactory(\"Document\", \"mydoc\")\n self.failUnless('mydoc' in self.person.validate_id('mydoc'))",
"def testIPerson(self):\n # The id is obtained from the person object directly, uniqueness is enforced\n id = self.person.id\n self.failUnlessEqual(id, 'abc123', \"Person object returned incorrect id.\")",
"def test_get_uniqueId():\n rep=RentRepository()\n rep.store(\"12\",\"23\",\"1\", \"1\")\n try:\n\n idBook=\"13\"\n idCustomer=\"54\"\n flag=\"1\"\n id=\"1\"\n Validator.get_uniqueId(rep.get_all(),id)\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True",
"def test_pod_id(self):\n pod = Pod('1')\n self.assertEqual(pod.pod, '1')",
"def test_variables_id_get(self):\n pass",
"def test_patient_one_id(self):\r\n self.assertEqual(self.test_patient.id, 1)",
"def test_user_id_get(self):\n pass",
"def test_get_info_person_good_id_db(self):\n response = self.client.get('/api/status?id=15f4a3d4-0211-479a-a5c6-c85e56bd4d88', format='json')\n self.assertEqual(response.status_code, 200)",
"def test_get_user_id(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n # for patient\n self.assertEqual(\n PATIENT_ID, self.connection.get_user_id(PATIENT_USERNAME))\n # for doctor\n self.assertEqual(\n DOCTOR_ID, self.connection.get_user_id(DOCTOR_USERNAME))",
"def test_racetracks_id_get(self):\n pass",
"def test_get_recipe_by_id(self):\n recipe = self.request_mgr.get_recipe_by_id(35354)\n self.assertIn(\"Guinness\", recipe.get('title'))",
"def test_presenters_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/presenters/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_comicscreators_id_get(self):\n pass"
] | [
"0.79498094",
"0.7612395",
"0.73243934",
"0.7122424",
"0.7080414",
"0.699587",
"0.69513243",
"0.69329464",
"0.6911171",
"0.67992973",
"0.67653096",
"0.6737983",
"0.67323214",
"0.67052066",
"0.6660955",
"0.66367024",
"0.66249657",
"0.661344",
"0.65653884",
"0.6541388",
"0.651454",
"0.6507249",
"0.6501709",
"0.6497128",
"0.64905745",
"0.64723647",
"0.64642775",
"0.6460986",
"0.64419353",
"0.643244"
] | 0.79774404 | 0 |
Test to check if getting petition details for multiple petitions by ID works correctly. | def testGetMultiplePetitionsById():
api = c.Api()
output = api.getMultiplePetitionsById([2297756, 1756395])
if type(output) is list:
assert True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_poets_id_get(self):\n pass",
"def test_solareclipses_id_get(self):\n pass",
"def testGetPetitionIDEndpoint():\n\tapi = c.Api()\n\tpetition_url = 'https://www.change.org/p/the-supreme-court-of-missouri-take-the-case-of-michael-brown-popularly-dubbed-the-ferguson-case-to-the-missouri-supreme-court-with-ferguson-officer-darren-wilson-as-the-accused'\n\tpetition_id = api.getPetitionId(petition_url)\n\tassert petition_id == 2297566",
"def test_get_pet_by_id(self):\n response = self.client.open(\n '/pet/{petId}'.format(pet_id=789),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_retrieve_ingredients_assigned_unique(self):\n\n ingredient = create_sample_ingredient(user=self.user, name=\"Orange\")\n create_sample_ingredient(user=self.user, name='Chocolate')\n\n recipe1 = create_sample_recipe(\n user=self.user,\n title=\"Orange Juice\",\n time_minutes=10,\n price=6.00\n )\n\n recipe2 = create_sample_recipe(\n user=self.user,\n title=\"Orange Pie\",\n time_minutes=40,\n price=20.00\n )\n\n recipe1.ingredients.add(ingredient)\n recipe2.ingredients.add(ingredient)\n\n response = self.client.get(INGREDIENTS_URL, {\"assigned_only\": 1})\n\n self.assertEqual(len(response.data), 1)\n # we will return 1, because we assigned only 1 id to two recipes\n # also here id is in int",
"def test_get_recipe_ingredients_by_id(self):\n pass",
"def test_intercommunalitys_id_get(self):\n pass",
"def test_get_recipe_equipment_by_id(self):\n pass",
"def test_variablepresentations_id_get(self):\n pass",
"def test_get_ids(civic, main_data, updated_data):\n assert len(civic._get_ids(main_data['assertions'])) == 0\n assert len(civic._get_ids(main_data['variants'])) == 1\n assert len(civic._get_ids(main_data['genes'])) == 2\n assert len(civic._get_ids(main_data['evidence'])) == 1\n\n assert len(civic._get_ids(updated_data['assertions'])) == 1\n assert len(civic._get_ids(updated_data['variants'])) == 1\n assert len(civic._get_ids(updated_data['genes'])) == 1\n assert len(civic._get_ids(updated_data['evidence'])) == 1",
"def testGetSinglePetitionById():\n\texpected_response = \"\"\"\n\t{\"petition_id\":2297566,\"title\":\"The Supreme Court of Missouri: Take the case of Michael Brown, popularly dubbed the 'Ferguson Case', to the Missouri Supreme Court, with Ferguson Officer Darren Wilson as the accused.\",\"status\":\"open\",\"url\":\"https://api.change.org/p/the-supreme-court-of-missouri-take-the-case-of-michael-brown-popularly-dubbed-the-ferguson-case-to-the-missouri-supreme-court-with-ferguson-officer-darren-wilson-as-the-accused\",\"overview\":\"<p>The case of Michael Brown, more often known as the 'Ferguson Case', has received national attention since Michael Brown was shot at least six times, including twice in the head, by Ferguson Officer Darren Wilson. Multiple eyewitness accounts, as well as video footage, contradict statements released by the Ferguson Police Department claiming that the murder was justified. Many media accounts attempted to paint Michael Brown as a 'thug' as a way to justify his murder- Michael Brown was an 18 year old Black boy. This case was brought to the Ferguson Grand Jury, and it was reported on Monday, November 24th, that there would be no indictment of Darren Wilson. Darren Wilson's escaping of indictment, despite the fact that he murdered a teenager, was unjustly decided. This case must be brought to a higher level of court, where a higher level of justice can be fought for.</p>\",\"targets\":[{\"name\":\"The Supreme Court of Missouri\",\"type\":\"custom\"}],\"letter_body\":\"Take the case of Michael Brown, popularly dubbed the 'Ferguson Case', to the Missouri Supreme Court, with Ferguson Officer Darren Wilson as the accused.\", \"image_url\":\"//d22r54gnmuhwmk.cloudfront.net/photos/8/ay/qo/xXaYqoNsmxVzgab-556x313-cropped.jpg\",\"category\":\"Criminal Justice\",\"goal\":200000,\"created_at\":\"2014-11-25T02:37:07Z\",\"end_at\":\"2015-11-25T23:59:59Z\",\"creator_name\":\"Katherine Na\",\"creator_url\":\"https://api.change.org/u/186820136\",\"organization_name\":null,\"organization_url\":null}\n\t\"\"\"\n\tconverted_expected = json.loads(expected_response)\n\tapi = c.Api()\n\toutput = api.getSinglePetitionById(2297566)\n\toutput.pop('signature_count', None)\n\tassert converted_expected == output",
"def test_prefectures_id_get(self):\n pass",
"def test_presenters_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/presenters/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_cyclingleagues_id_get(self):\n pass",
"def test_id(self):\n\n url = '/%s/job-type-names/?id=%d' % (self.api, self.job_type1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['name'], self.job_type1.name)\n\n url = '/%s/job-type-names/?id=%d&id=%d' % (self.api, self.job_type1.id, self.job_type2.id)\n\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 2)\n\n url = '/%s/job-type-names/?id=%d&id=%d' % (self.api, self.job_type4.id, self.job_type5.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)",
"def test_retrieve_ingredients_assigned_unique(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Salt')\n Ingredient.objects.create(user=self.user, name='suggar')\n recipe_1 = Recipe.objects.create(\n user=self.user, title='Massefouf',\n time_minutes=30, price=5\n )\n recipe_2 = Recipe.objects.create(\n user=self.user, title='Chakchouka',\n time_minutes=30, price=5\n )\n recipe_1.ingredients.add(ingredient)\n recipe_2.ingredients.add(ingredient)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)",
"def test_metrostations_id_get(self):\n pass",
"def test_user_id_identities_get(self):\n pass",
"def test_id(self):\n\n url = '/%s/job-types/?id=%d' % (self.api, self.job_type1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['name'], self.job_type1.name)\n\n url = '/%s/job-types/?id=%d&id=%d' % (self.api, self.job_type1.id, self.job_type2.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 2)\n\n url = '/%s/job-types/?id=%d&id=%d' % (self.api, self.job_type4.id, self.job_type5.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 2)",
"def test_get_recipe_by_id(self):\n recipe = self.request_mgr.get_recipe_by_id(35354)\n self.assertIn(\"Guinness\", recipe.get('title'))",
"def test_retrive_ingredients_assigned_to_unique(self):\n ingredient = Ingredients.objects.create(user=self.user, name='supper')\n Ingredients.objects.create(user=self.user, name='meetha')\n\n recipe1 = Recipe.objects.create(\n title='aloo matar',\n time_minutes=15,\n price=50,\n user=self.user\n )\n recipe1.ingredients.add(ingredient)\n\n recipe2 = Recipe.objects.create(\n title='burger',\n time_minutes='10',\n price=500,\n user=self.user\n )\n recipe2.ingredients.add(ingredient)\n\n res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})\n\n self.assertEqual(len(res.data), 1)",
"def test_retrieve_ingredient_assigned_unique(self):\n ingredient = Ingredient.objects.create(user=self.user, name='eggs')\n Ingredient.objects.create(user=self.user, name='cheese')\n recipe1 = Recipe.objects.create(title='eggs benedict', time_minutes=30, price=12, user=self.user)\n recipe1.ingredients.add(ingredient)\n recipe2 = Recipe.objects.create(title='poached eggs and beans', time_minutes=30, price=15, user=self.user)\n recipe2.ingredients.add(ingredient)\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n self.assertEqual(len(res.data), 1)",
"def test_get_specific_by_id(self):\n token = self.get_token()\n self.client.post('/api/v2/party', data=self.add_party,\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json')\n response = self.client.get('/api/v2/party/1',\n headers=dict(Authorization=\"Bearer \" + token),\n content_type='application/json',\n )\n self.assertEqual(response.status_code, 200)",
"def test_multiple_ids_in_query_params(self):\n url = '/identities?ids[]={0}&ids[]={1}'.format(\n self.miles.pk, self.john.pk)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n expected = {\n 'data': {\n 'type': 'users',\n 'id': encoding.force_text(self.john.pk),\n 'attributes': {\n 'first_name': self.john.first_name,\n 'last_name': self.john.last_name,\n 'email': self.john.email\n }\n }\n }\n\n json_content = json.loads(response.content.decode('utf8'))\n links = json_content.get(\"links\")\n meta = json_content.get(\"meta\").get('pagination')\n\n self.assertEquals(expected.get('user'), json_content.get('user'))\n self.assertEquals(meta.get('count', 0), 2)\n self.assertEqual(\n sorted(\n 'http://testserver/identities?ids%5B%5D=2&ids%5B%5D=1&page=2'\n .split('?')[1].split('&')\n ),\n sorted(\n links.get(\"next\").split('?')[1].split('&'))\n )\n self.assertEqual(meta.get(\"page\"), 1)",
"def test_drugs_id_get(self):\n pass",
"def test_get_recipe_price_breakdown_by_id(self):\n pass",
"def test_retrieve_ingredients_assigned_unique(self):\n\n ingredient = Ingredient.objects.create(user=self.user, name='Eggs')\n Ingredient.objects.create(user=self.user, name='Parmaggiano Cheese')\n\n recipe1 = Recipe.objects.create(\n title='Eggs Benedict',\n time_minutes=30,\n price=12.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title='Coriander Eggs on Toast',\n time_minutes=20,\n price=5.00,\n user=self.user\n )\n recipe1.ingredients.add(ingredient)\n recipe2.ingredients.add(ingredient)\n\n response = self.client.get(URL_INGREDIENTS, {'assigned_only': True})\n\n self.assertEqual(len(response.data), 1)",
"def test_visualize_recipe_nutrition_by_id(self):\n pass",
"def test_get_interest_by_id_no_course(self):\n id = self.list_3.pk\n url = reverse('xds_api:interest-list', args=(id,))\n response = self.client.get(url)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(responseDict[\"experiences\"]), 0)\n self.assertEqual(responseDict[\"name\"], self.list_3.name)",
"def test_visualize_recipe_equipment_by_id(self):\n pass"
] | [
"0.7277272",
"0.6892533",
"0.662501",
"0.6578746",
"0.6576278",
"0.6478511",
"0.64663815",
"0.6407937",
"0.6342147",
"0.62757206",
"0.6240228",
"0.62282765",
"0.6172308",
"0.6147652",
"0.61143816",
"0.6091003",
"0.60813",
"0.6016178",
"0.6005982",
"0.5984068",
"0.5963086",
"0.5911123",
"0.59017205",
"0.5898211",
"0.5897044",
"0.589539",
"0.5887852",
"0.5886232",
"0.58725005",
"0.5865621"
] | 0.79930466 | 0 |
Test to check if getting the number of signatures on a petition works correctly | def testGetSignatureCountOnPetition():
api = c.Api()
output = api.getSignatureCountOnPetition(2297566)
if type(output) is int:
assert True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def signature_length(self):",
"def test__len__(self):\n assert len(FSignature([forge.arg('a')])) == 1",
"def test_count_publications(self):\n pass",
"def test_signature_verification(self):\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n\n lines = SIGNATURES.split('\\n')\n\n # empty keyring\n keyring = ima_file_signatures.ImaKeyring()\n self.assertTrue(ima.process_measurement_list(lines, ima_keyring=keyring) is None)\n\n # add key for 1st entry; 1st entry must be verifiable\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:1], ima_keyring=keyring) is not None)\n self.assertTrue(ima.process_measurement_list(lines[1:2], ima_keyring=keyring) is None)\n\n # add key for 2nd entry; 1st & 2nd entries must be verifiable\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n self.assertTrue(ima.process_measurement_list(lines[0:2], ima_keyring=keyring) is not None)",
"def test_dig_sig(self):\n\n for using in [HashTypes.SHA1, HashTypes.SHA2, ]:\n self.do_test_dig_sig(using)",
"def test_sent_count(self):\n self.assertEqual(1, self.alice_storage.sent_count)\n self.assertEqual(1, self.bob_storage.sent_count)\n self.assertEqual(2, self.carol_storage.sent_count)\n self.assertEqual(0, self.anonymous_storage.sent_count)",
"def test_getSignature(self):\n self.assertTrue(ChangeType().getSignature(0) is not '')",
"def testSignature(self):\n raw_data = copy.deepcopy(TEST_DATA)\n # Preserve only the first stack.\n raw_data['subtree_stacks'] = raw_data['subtree_stacks'][0:1]\n root_frame = (\n raw_data['subtree_stacks'][0]['frames'][raw_data['subtree_root_depth']])\n\n # Check that the function gets truncated to the max length.\n root_frame['function_name'] = 'x' * (SIGNATURE_MAX_LENGTH + 1)\n uma_data = UMASamplingProfilerData(\n raw_data, ChromeDependencyFetcher(self.GetMockRepoFactory()))\n self.assertEqual(uma_data.signature, 'x' * SIGNATURE_MAX_LENGTH)\n\n # Check that unsymbolized functions are properly handled.\n del root_frame['function_name']\n uma_data = UMASamplingProfilerData(\n raw_data, ChromeDependencyFetcher(self.GetMockRepoFactory()))\n self.assertEqual(uma_data.signature, 'unsymbolized function')",
"def test_photo_count(self):\n self.assertEqual(self.test_travelogue.photo_count(), 2)\n self.pl.is_public = False\n self.pl.save()\n self.assertEqual(self.test_travelogue.photo_count(), 1)\n\n # Method takes an optional 'public' kwarg.\n self.assertEqual(self.test_travelogue.photo_count(public=False), 2)",
"def test_gethint_signature_error(self):\r\n mock_module = CHModuleFactory.create()\r\n old_answers = copy.deepcopy(mock_module.previous_answers)\r\n old_user_submissions = copy.deepcopy(mock_module.user_submissions)\r\n json_in = {'problem1': 'fish'}\r\n out = mock_module.get_hint(json_in)\r\n self.assertTrue(out is None)\r\n self.assertTrue(mock_module.previous_answers == old_answers)\r\n self.assertTrue(mock_module.user_submissions == old_user_submissions)",
"def test_abcdee():\n assert part_01.count_for('abcdee', 2) == 1\n assert part_01.count_for('abcdee', 3) == 0",
"def test_wrong_signature(self):\r\n response = requests.post(self.launch_uri, data=self.payload)\r\n self.assertIn('Wrong LTI signature', response.content)",
"def test_get_policy_number(mock_send_message):\n A1sim.get_policy_number(BASE_URL)\n mock_send_message.assert_called_once_with('GET',\n 'Get policy numbers for ric',\n (f\"{BASE_URL}/counter/num_instances\"))",
"def verify():",
"def checksignature(self,activeinputs):\n for i in ['1','2','3']:\n inps=''\n start=1\n for j in activeinputs[i]:\n inps=inps+(int(j[0])-start)*'0'+'1'\n start=int(j[0])+1\n print 'checksignature inps= ',inps\n if inps != '': \n cmd=\"FindSignatures(\"+i+\",\"+'\"'+inps+'\"'+\")\"\n output=self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")\n print i,'output=',output,len(output)\n for j in range(0,len(output)-1,2):\n k=self.findinput(output[j],i)\n print k,' k j ',j\n if k != None:\n print 'checksignature: ',j,output[j],k,self.inputs[k].name\n self.inputs[k].signatureM=output[j+1]",
"def count():",
"def test_create_image_signature(self):\n pass",
"def do_test_dig_sig(self, hashtype):\n\n if hashtype == HashTypes.SHA1:\n sha = hashes.SHA1\n elif hashtype == HashTypes.SHA2:\n sha = hashes.SHA256\n sk_priv = rsa.generate_private_key(\n public_exponent=65537,\n key_size=1024, # cheap key for testing\n backend=default_backend())\n sk_ = sk_priv.public_key()\n\n print(\"WARNING: cannot use hashlib's sha code with pyca cryptography\")\n print(\"WARNING: pyca cryptography does not support sha3/keccak\")\n\n signer = sk_priv.signer(\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n\n count = 64 + self.rng.next_int16(192) # [64..256)\n data = bytes(self.rng.some_bytes(count))\n\n signer.update(data)\n signature = signer.finalize() # a binary value; bytes\n\n # BEGIN interlude: conversion to/from base64, w/ 76-byte lines\n b64sig = base64.encodebytes(signature).decode('utf-8')\n sig2 = base64.decodebytes(b64sig.encode('utf-8'))\n self.assertEqual(sig2, signature)\n # END interlude ---------------------------------------------\n\n verifier = sk_.verifier(\n signature,\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n verifier.update(data)\n\n try:\n verifier.verify()\n # digital signature verification succeeded\n except InvalidSignature:\n self.fail(\"dig sig verification unexpectedly failed\")\n\n # twiddle a random byte in data array to make verification fail\n data2 = bytearray(data)\n which = self.rng.next_int16(count)\n data2[which] = 0xff & ~data2[which]\n data3 = bytes(data2)\n\n verifier = sk_.verifier(\n signature, # same digital signature\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n verifier.update(data3)\n\n try:\n verifier.verify()\n self.fail(\"expected verification of modified message to fail\")\n\n except InvalidSignature:\n pass # digital signature verification failed",
"def test_total_renegotiations(self):\n connection = Connection(Context(SSLv23_METHOD), None)\n assert connection.total_renegotiations() == 0",
"def test_counter_proposal_offer(self):\n pass",
"def testArticleCount(self):\n\n self.articleCount(17)",
"def test_get_length(t_list):\n if not get_length(t_list) == 10:\n raise ValueError(\"Wrong number of transactions\")",
"def test_upload_count(self):\n conn = initialize_connection()\n db = conn.picdb\n coll = db.images\n\n num = coll.count_documents({})\n\n self.assertEqual(num, 72389)",
"def test_too_long_signature_validity(self):\n bundle1, bundle2, = self._get_two_bundles(\n bundle1_inception=\"2019-01-01T00:00:00\",\n bundle1_expiration=\"2019-01-22T00:00:00\",\n bundle2_inception=\"2019-01-12T00:00:00\",\n bundle2_expiration=\"2019-06-02T00:00:00\",\n )\n xml = self._make_request(bundle1=bundle1, bundle2=bundle2)\n request = request_from_xml(xml)\n policy = replace(\n self.policy,\n check_bundle_intervals=False, # want to test against ZSK policy, not KSK policy\n check_cycle_length=False, # want to test against ZSK policy, not KSK policy\n )\n with self.assertRaises(KSR_POLICY_SIG_VALIDITY_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n \"Bundle validity 141 days > claimed max_signature_validity 21 days (in bundle test-2)\",\n str(exc.exception),\n )",
"def test_get_signatures_by_participant_id(self):\n response = self.client.open(\n \"/api/signatures/participantId/{participantId}\".format(participantId=789),\n method=\"GET\",\n )\n self.assert200(response, \"Response body is : \" + response.data.decode(\"utf-8\"))",
"def PCTSignatures_create(initSampleCount=None, initSeedCount=None, pointDistribution=None): # real signature unknown; restored from __doc__\n pass",
"def test_unexpected_error_in_signature(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggySignatureResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while retrieving signature' \\\n ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in signature()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message",
"def test_too_short_signature_validity(self):\n bundle1, bundle2, = self._get_two_bundles(\n bundle1_inception=\"2019-01-01T00:00:00\",\n bundle1_expiration=\"2019-01-22T00:00:00\",\n bundle2_inception=\"2019-01-02T00:00:00\",\n bundle2_expiration=\"2019-01-10T00:00:00\",\n )\n xml = self._make_request(bundle1=bundle1, bundle2=bundle2)\n request = request_from_xml(xml)\n policy = replace(\n self.policy, check_bundle_intervals=False, check_cycle_length=False,\n )\n with self.assertRaises(KSR_POLICY_SIG_VALIDITY_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n \"Bundle validity 8 days < claimed min_signature_validity 21 days (in bundle test-2)\",\n str(exc.exception),\n )",
"def run_stats():\r\n\r\n time_signature = {}\r\n time_verification = {}\r\n\r\n # this should speed up things\r\n message = \"Alice, send me 100 bucks. --Bob\"\r\n message = long(sha256(message).hexdigest(), 16)\r\n\r\n curvas = [(i, getattr(nist_curves, i)) for i in dir(nist_curves) if\r\n i.startswith(\"point_p\")]\r\n\r\n for name, point in curvas:\r\n priv_key, pub_key = generate_key_pair(point)\r\n time_signature[name] = []\r\n time_verification[name] = []\r\n\r\n for i in xrange(30):\r\n r, s = sign(message, point, priv_key, time_signature[name])\r\n status = verify(r, s, message, point, pub_key,\r\n time_verification[name])\r\n\r\n print \"%s done\" % (name, )\r\n\r\n return (time_signature, time_verification)",
"def test_new_count(self):\n self.assertEqual(2, self.alice_storage.new_count)\n self.assertEqual(3, self.bob_storage.new_count)\n self.assertEqual(0, self.carol_storage.new_count)\n self.assertEqual(0, self.anonymous_storage.new_count)"
] | [
"0.6730864",
"0.67134356",
"0.6490915",
"0.62044877",
"0.6133203",
"0.60348886",
"0.6005115",
"0.58424556",
"0.57883483",
"0.5781113",
"0.5746445",
"0.57400393",
"0.5731183",
"0.5726611",
"0.57222307",
"0.5709273",
"0.5702754",
"0.5699209",
"0.56738657",
"0.5665725",
"0.5665048",
"0.564698",
"0.5628442",
"0.5609017",
"0.5599451",
"0.55808634",
"0.55799395",
"0.55788094",
"0.55646205",
"0.55629444"
] | 0.7905226 | 0 |
infinite loop to advertise the routing table to all the neighbors. trigger neighbor swtich to update routing information instantly. | def broadcast_thread(self):
while True:
try:
logger.info('broadcast routing table (dpid=%s)', dpid_to_str(self.dp.id))
for port_no, port in self.ports.items():
if port.neighbor_switch_dpid:
self.switches[port.neighbor_switch_dpid].add_to_queue((port, self.tbl))
self.switches[port.neighbor_switch_dpid].trigger_update()
time.sleep(self.tbl.advertise_interval)
except:
logger.info('broadcast thread of dpid=%s is killed', dpid_to_str(self.dp.id))
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _announce(self):\n \n \"Create dictionary to hold key: router, value: all the distances to it\"\n shortestDises = dict()\n\n \"Fill in shortestDises by going through all the values in routing table\" \n for disToDests in self.routingTable.values():\n for dest in disToDests:\n shortestDises[dest] = min(shortestDises.get(dest, (100, 100)), disToDests[dest])\n \n \"Send the update packet to self's neighbors\"\n for neighbor in self.routingTable:\n updatePacket = RoutingUpdate()\n disPortToNeighbor = self.routingTable[neighbor][neighbor]\n \n \"Add destinations(except neighbor) and shortest distances into the update packet\"\n for dest in shortestDises:\n if dest != neighbor:\n \n \"Poison reverse\"\n if shortestDises[dest][1] == disPortToNeighbor[1]:\n distance = 100\n else:\n distance = shortestDises[dest][0]\n \n updatePacket.add_destination(dest, distance)\n \n \"Sending update packet after construction\"\n self.send(updatePacket, disPortToNeighbor[1], False)",
"def send(self):\n while True:\n for neighbor_name in self.neighbors:\n if not self.neighbors[neighbor_name].is_killed:\n if self.neighbors[neighbor_name].update_ready:\n self.send_update(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkup_ready:\n self.send_linkup(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkdown_ready:\n self.send_linkdown(self.neighbors[neighbor_name])",
"def route_update(self, neighbor, dv_list):\n neighbor.is_killed = False\n neighbor.kill_timer = time.time()\n neighbor.dv_update(dv_list)\n # Iterate to see if new node is included in the graph.\n for name in neighbor.distance_vector:\n if name not in self.distance_vector:\n self.distance_vector[name] = Router.OtherRouter(float('Inf'), None)\n if self.update_dv():\n for name in self.neighbors:\n self.neighbors[name].update_ready = True\n self.neighbors[name].send_timer = time.time()",
"def advertise_route_to_neighbors(self, destination):\n distance_vector = self.hosts_to_ports[destination]\n self.handle_proper_packet(distance_vector.port, destination, distance_vector.latency, True)\n self.handle_poison_packet(distance_vector.port, destination)",
"def deploy_routing_table(self):\n for subnet, entry in self.tbl.items():\n if entry.neighbor_port:\n self.deploy_flow_entry(subnet=subnet, outport=entry.receive_port, dstport=entry.neighbor_port)",
"def link_up_respond(self, neighbor):\n neighbor.is_killed = False\n neighbor.send_timer = time.time()\n neighbor.kill_timer = time.time()\n if self.update_dv():\n for name in self.neighbors:\n self.neighbors[name].update_ready = True\n self.neighbors[name].send_timer = time.time()",
"def receive_routing_table(self, router):\n for network, distance in router.networks.items():\n # Only if the network doesn't exist in current routing table or\n # current distance is more than new info then add the new info\n if (network not in self.networks or\n self.networks[network] > distance + 1):\n self.networks[network] = distance + 1",
"def addNeighbor(self, neighbor):",
"def send(self, name):\n router = self.routers[name]\n for neighbour in router.neighbours:\n neighbour = self.routers[neighbour]\n neighbour.receive_routing_table(router)",
"def handle_link_up (self, port, latency):\n self.neighbours[port] = latency\n for dest in self.routesToDest:\n packet = basics.RoutePacket(dest, self.routesToDest[dest][1])\n self.send(packet, port, False)",
"def recv(self):\n while True:\n data, useless = self.sok.recvfrom(1024)\n lines = data.split('\\n')\n neighbor_name = lines[1]\n if neighbor_name in self.neighbors:\n if lines[0] == 'ROUTE UPDATE':\n self.route_update(self.neighbors[neighbor_name], lines[2:])\n elif lines[0] == 'LINK UP':\n self.link_up_respond(self.neighbors[neighbor_name])\n elif lines[0] == 'LINK DOWN':\n self.link_down_respond(self.neighbors[neighbor_name])",
"def main():\n parser = ArgumentParser()\n parser.add_argument(\"router_id\", help=\"id of the router\")\n parser.add_argument(\"port_no\", help=\"port no. at which the\\\n router is listening\", type=int)\n parser.add_argument(\"router_config_file\", help=\"configuration\\\n file for the router\")\n args = parser.parse_args()\n DATA[\"port\"] = args.port_no\n DATA[\"router_id\"] = args.router_id\n read_config_file(args.router_config_file)\n\n # after the read_config_file func completes we set a variable\n # saying that neighbor data is ready\n global READ_CONFIG_COMP\n READ_CONFIG_COMP = True\n\n initial_dvec_and_forw_insert()\n\n SOCKET1.bind((\"\", DATA[\"port\"])) # converts the port to a listening state\n # print(DATA['neighbor'])\n\n # read thread is listening for incoming messages\n recv_th = Thread(target=recving)\n recv_th.start()\n\n # sending thread sends its distance vector to its direct neighbors\n send_th = Thread(target=sending)\n send_th.start()\n\n # link cost change interface thread\n intf_th = Thread(target=interface_thread, args=(\n args.router_config_file, ), daemon=True)\n intf_th.start()\n\n # find thread is checking if every router is available/alive\n find_th = Thread(target=check_if_alive, daemon=True)\n find_th.start()\n\n # we don't need thread.join because all of our threads\n # are non daemon threads\n\n # SOCKET1.close()\n return",
"def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path",
"def main(edges=[(0, 1, 3), (1, 3, 4), (2, 3, 3), (0, 2, 2) ], num=4):\n\n # initialize routers array\n routers = []\n for x in range(num):\n routers.append([1000] * num)\n routers[x][x] = 0\n \n # set distance to all neighbours \n for edge in edges:\n routers[edge[0]][edge[1]] = edge[2]\n routers[edge[1]][edge[0]] = edge[2]\n\n start_table = routers.copy()\n\n flag = True\n while flag:\n upflag = False\n for nbrs in edges:\n routers[nbrs[0]], up_flag1 = update_table(routers[nbrs[0]], routers[nbrs[1]], dist=nbrs[2])\n routers[nbrs[1]], up_flag2 = update_table(routers[nbrs[1]], routers[nbrs[0]], dist=nbrs[2])\n upflag = upflag or up_flag1 or up_flag2\n\n flag = upflag\n\n return start_table, routers",
"def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)",
"def setup_intervlan_host_routes(self):\n if self.routers:\n for src in self.host_information:\n src_host = self.host_information[src]['host']\n src_vlan = self.host_information[src]['vlan']\n src_ip = self.host_information[src]['ip']\n for dst in self.host_information:\n if src != dst:\n dst_host = self.host_information[dst]['host']\n dst_vlan = self.host_information[dst]['vlan']\n dst_ip = self.host_information[dst]['ip']\n if src_vlan != dst_vlan and self.is_routed_vlans(src_vlan, dst_vlan):\n src_faucet_vip = self.faucet_vips[src_vlan]\n dst_faucet_vip = self.faucet_vips[dst_vlan]\n self.add_host_route(src_host, dst_ip, src_faucet_vip.ip)\n self.add_host_route(dst_host, src_ip, dst_faucet_vip.ip)",
"def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])",
"def send_update(self, neighbor):\n message = 'ROUTE UPDATE'\n source = ':'.join([self.name[0], str(self.name[1])])\n dv = []\n for others in self.distance_vector:\n others_sep = others.split(':')\n dv.append(','.join([others_sep[0], others_sep[1], str(self.distance_vector[others].cost)]))\n dv = '\\n'.join(dv)\n to_send = '\\n'.join([message, source, dv])\n neighbor.sok.sendto(to_send, (neighbor.addr, neighbor.port))\n neighbor.send_timer = time.time()\n neighbor.update_ready = False",
"def handle_timer(self):\n\n for dest in self.hosts_to_unused_ports:\n self.hosts_to_unused_ports[dest] = [host for host in self.hosts_to_unused_ports[dest] if api.current_time() != host.time_to_live] \n self.hosts_to_ports[dest] = self.find_minium_latency_unused_ports(self.hosts_to_unused_ports[dest])\n\n #Send the reachable routes (must be less than infinity)\n for dest in self.hosts_to_ports:\n if self.hosts_to_ports[dest].latency < INFINITY: \n distance_vector = self.hosts_to_ports[dest] \n host_latency = distance_vector.latency\n\n distance_vector = self.hosts_to_ports[dest]\n\n # Send normal route packet\n packet = basics.RoutePacket(dest, host_latency)\n self.send(packet, distance_vector.port)\n\n # Send poison packet if POISON_MODE is true\n if self.POISON_MODE == True:\n poison_packet = basics.RoutePacket(dest, INFINITY)\n self.send(poison_packet, distance_vector.port)",
"def link_down_respond(self, neighbor):\n neighbor.is_killed = True\n if self.update_dv():\n for name in self.neighbors:\n self.neighbors[name].update_ready = True\n self.neighbors[name].send_timer = time.time()",
"def __init__ (self):\n self.start_timer() # Starts calling handle_timer() at correct rate\n self.neighbours = dict()\n self.routesToDest = dict()\n self.hosts = dict()",
"def _do_probe(self):\n self._do_expire() \n\n server = self.servers.pop(0)\n self.servers.append(server)\n\n r = arp()\n r.hwtype = r.HW_TYPE_ETHERNET\n r.prototype = r.PROTO_TYPE_IP\n r.opcode = r.REQUEST\n r.hwdst = ETHER_BROADCAST\n r.protodst = server #send arp request to server popped above one by one\n r.hwsrc = self.mac #mac address of switch (connected to controller)\n r.protosrc = self.service_ip #switch is sending arp\n e = ethernet(type=ethernet.ARP_TYPE, src=self.mac,\n dst=ETHER_BROADCAST)\n e.set_payload(r)\n # self.log.debug(\"ARPing for %s\", server)\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD)) #flood to all the port \n msg.in_port = of.OFPP_NONE\n self.con.send(msg)\n\n self.outstanding_probes[server] = time.time() + self.arp_timeout ## disctionary of (IP : expire_time)\n # {IPAddr('10.0.0.1'): 1612113642.861805})\n # {IPAddr('10.0.0.2'): 1612113654.816535}) likewise\n\n core.callDelayed(self._probe_wait_time, self._do_probe) ## Alternate way for simple timers:\n # core.callDelayed(10, function to call)",
"def run(self):\n self.update_link_statistics()\n self.send_packet()",
"def run_forever(self):\n scapy.sniff(prn=self.arp_cb, filter=\"arp\", store=0, count=0)",
"def set_routing(self, rinfo):\n\n self.routing = [ self.Routing(*r) for r in rinfo ]",
"def addSNMPRoutes(self, routingtable):\n\n ipCidrRouteDest = \"\"\n ipCidrRouteNextHopAS = \"\"\n ipCidrRouteMetric1 = 0\n ipCidrRouteMetric2 = 0\n ipCidrRouteMetric3 = 0\n ipCidrRouteMetric4 = 0\n ipCidrRouteMetric5 = 0\n ipCidrRouteStatus = 0\n ipCidrRouteMask = \"\"\n ipCidrRouteTos = 0\n ipCidrRouteNextHop = \"\"\n ipCidrRouteIfIndex = 0\n ipCidrRouteType = 0\n ipCidrRouteProto = 0\n ipCidrRouteAge = 0\n ipCidrRouteInfo = 0\n\n for loop_rtIndex in routingtable:\n for ifAttr in routingtable[loop_rtIndex]:\n if ifAttr == 1:\n ipCidrRouteDest = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 10:\n ipCidrRouteNextHopAS = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 11:\n ipCidrRouteMetric1 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 12:\n ipCidrRouteMetric2 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 13:\n ipCidrRouteMetric3 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 14:\n ipCidrRouteMetric4 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 15:\n ipCidrRouteMetric5 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 16:\n ipCidrRouteStatus = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 2:\n ipCidrRouteMask = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 3:\n ipCidrRouteTos = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 4:\n ipCidrRouteNextHop = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 5:\n ipCidrRouteIfIndex = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 6:\n ipCidrRouteType = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 7:\n ipCidrRouteProto = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 8:\n ipCidrRouteAge = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 9:\n ipCidrRouteInfo = routingtable[loop_rtIndex][ifAttr]\n\n self.routingtable[loop_rtIndex] = device_routingtable( \\\n ipCidrRouteDest, ipCidrRouteNextHopAS, ipCidrRouteMetric1, \\\n ipCidrRouteMetric2, ipCidrRouteMetric3, ipCidrRouteMetric4, \\\n ipCidrRouteMetric5, ipCidrRouteStatus, ipCidrRouteMask, \\\n ipCidrRouteTos, ipCidrRouteNextHop, ipCidrRouteIfIndex, \\\n ipCidrRouteType, ipCidrRouteProto, ipCidrRouteAge, \\\n ipCidrRouteInfo)",
"def set_neighbor(self):\n for node in self.node:\n for other in self.node:\n if other.id != node.id and distance.euclidean(node.location, other.location) <= node.com_ran:\n node.neighbor.append(other.id)",
"def run():\n adapter = pygatt.GATTToolBackend()\n ADDRESS_TYPE = pygatt.BLEAddressType.random\n\n # TODO if a thread is killed then this will never reestablish a new one since connections never has elements removed\n while True:\n try:\n for device in adapter.scan():\n if device[\"name\"] == \"DYP Hydrometer\":\n print(\"NEW HYDROMETER FOUND\")\n t = threading.Thread(target=handle_connection, args=(device[\"address\"],))\n t.start()\n except:\n pass\n ptime.sleep(5)",
"def init_neighbor(self, addr, port, weight):\n neighbor_name = ':'.join([addr, str(port)])\n self.neighbors[neighbor_name] = Router.Neighbor(addr, port, weight)\n self.distance_vector[neighbor_name] = Router.OtherRouter(weight, neighbor_name)",
"def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass"
] | [
"0.68915033",
"0.66389716",
"0.6598268",
"0.6571927",
"0.64771855",
"0.6414562",
"0.5976607",
"0.5940105",
"0.5861183",
"0.5837956",
"0.57994616",
"0.57953876",
"0.5771204",
"0.5767448",
"0.57330596",
"0.5663159",
"0.56330067",
"0.56264037",
"0.56159544",
"0.56072795",
"0.5541128",
"0.5523069",
"0.551944",
"0.5519198",
"0.55069923",
"0.5470119",
"0.5446351",
"0.54303545",
"0.5419968",
"0.5407308"
] | 0.71332824 | 0 |
try to process all the queued routing information. | def process_queued_msg(self):
try:
while not self.queue.empty():
port, tbl = self.queue.get()
reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]
self.tbl.update_by_neighbor(reveived_port, port, tbl)
self.deploy_routing_table()
except:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)",
"def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())",
"def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'",
"def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)",
"def queueStatusAll():",
"def process(self, agent):\n self._process_internal_ports()\n self.process_external(agent)\n # Process static routes for router,using command: ip router xxx\n self.routes_updated()\n\n # Update ex_gw_port and enable_snat on the router info cache\n self.ex_gw_port = self.get_ex_gw_port()\n self.snat_ports = self.router.get(\n l3_constants.SNAT_ROUTER_INTF_KEY, [])\n self.enable_snat = self.router.get('enable_snat')",
"def _process_queue(self, spider):\n site = self.sites.get(spider)\n if not site:\n return\n\n # Delay queue processing if a download_delay is configured\n now = time()\n delay = site.download_delay()\n if delay:\n penalty = delay - now + site.lastseen\n if penalty > 0:\n d = defer.Deferred()\n d.addCallback(self._process_queue)\n call = reactor.callLater(penalty, d.callback, spider)\n site.next_request_calls.add(call)\n d.addBoth(lambda x: site.next_request_calls.remove(call))\n return\n site.lastseen = now\n\n # Process enqueued requests if there are free slots to transfer for this site\n while site.queue and site.free_transfer_slots() > 0:\n request, deferred = site.queue.pop(0)\n if site.closing:\n dfd = defer.fail(Failure(IgnoreRequest()))\n else:\n dfd = self._download(site, request, spider)\n dfd.chainDeferred(deferred)\n\n self._close_if_idle(spider)",
"def process_messages(self):\r\n for p in self._platforms.values():\r\n if p.received_messages > 0:\r\n p.queue_received_messages()\r\n for p in self._platforms.values():\r\n if p.queued_messages > 0:\r\n p.process_queued_messages()",
"def run(self):\n while True:\n path, params = self.path_queue.get()\n errors = check_path(path, **params)\n self.result_queue.put(errors)\n self.path_queue.task_done()",
"def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass",
"def run(self):\n\n self._get_routes()\n self._calculate_emissions()",
"def handle_timer(self):\n\n for dest in self.hosts_to_unused_ports:\n self.hosts_to_unused_ports[dest] = [host for host in self.hosts_to_unused_ports[dest] if api.current_time() != host.time_to_live] \n self.hosts_to_ports[dest] = self.find_minium_latency_unused_ports(self.hosts_to_unused_ports[dest])\n\n #Send the reachable routes (must be less than infinity)\n for dest in self.hosts_to_ports:\n if self.hosts_to_ports[dest].latency < INFINITY: \n distance_vector = self.hosts_to_ports[dest] \n host_latency = distance_vector.latency\n\n distance_vector = self.hosts_to_ports[dest]\n\n # Send normal route packet\n packet = basics.RoutePacket(dest, host_latency)\n self.send(packet, distance_vector.port)\n\n # Send poison packet if POISON_MODE is true\n if self.POISON_MODE == True:\n poison_packet = basics.RoutePacket(dest, INFINITY)\n self.send(poison_packet, distance_vector.port)",
"def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)",
"def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass",
"async def _process_queue(self, callback, socket_info,\n has_heartbeat_seq=True):\n pending_callback = False\n while True:\n unparsed_message = await socket_info.queue.get()\n #log.debug(\"Received: \" + unparsed_message)\n response = json.loads(unparsed_message)\n # Sometimes the response is a list sometimes not. Convert to list.\n message_list = response if type(response) == list else [response]\n if not message_list:\n log.warning(\"Received empty message from Gemini. This isn't a \"\n \"type of response documented in their API docs.\")\n continue\n if message_list[0]['type'] == 'heartbeat':\n if has_heartbeat_seq:\n self._process_heartbeat(message_list[0], socket_info)\n self._check_sequence(message_list[0], socket_info)\n continue\n # A non heartbeat message.\n for message in message_list:\n self._check_sequence(message, socket_info)\n state_update = callback(message)\n if state_update:\n pending_callback = True\n if not socket_info.queue.empty():\n continue\n if pending_callback and self.is_setup():\n self.exchange_state.update_publisher.notify()\n pending_callback = False",
"def dispatch(self, queue):\n context = zmq.Context()\n socket = noBlockREQ(context)\n \n seedsQ1 = Queue()\n seedsQ2 = Queue()\n for address in self.seeds:\n seedsQ1.put(address)\n\n connectT = Thread(target=connectToSeeds, name=\"Connect to Seeds\", args=(socket, seedsQ1))\n connectT.start()\n\n toDisconnectQ = Queue()\n disconnectT = Thread(target=disconnectToSeeds, name=\"Disconnect to Seeds\", args=(socket, toDisconnectQ))\n disconnectT.start()\n\n pFindSeeds = Process(target=findSeeds, name=\"Find Seeds\", args=(set(self.seeds), [seedsQ1], [toDisconnectQ], log, 2000, 10, seedsQ2))\n pFindSeeds.start()\n\n pInput = Process(target=getSeedFromFile, name=\"Get seed from file\", args=(seedsQ1, seedsQ2))\n pInput.start()\n\n graph = {}\n depth = 1\n data = {}\n url_mapper = {url:f\"url_{i}\" for i, url in enumerate(self.urls)}\n \n src = set()\n while True: \n new_data = {}\n while len(self.urls):\n try:\n url = self.urls[0]\n self.urls.pop(0)\n self.urls.append(url)\n with counterSocketReq:\n socket.send_json((\"URL\", self.uuid, url))\n log.debug(f\"Send {url}\", \"dispatch\")\n response = socket.recv_pyobj()\n assert isinstance(response, tuple), f\"Bad response, expected <tuple> find {type(response)}\"\n assert len(response) == 2, \"bad response size\"\n assert response[0] == 'RESPONSE', \"Unexpected response format\"\n _, package = response\n log.debug(f\"Received a package with size: {len(package)}\", \"dispatch\")\n for recv_url, html in package.items():\n try:\n idx = self.urls.index(recv_url)\n log.info(f\"{recv_url} {GREEN}OK{RESET}\", \"dispatch\")\n new_data[recv_url] = html\n self.urls.pop(idx)\n except ValueError:\n log.debug(f'Unnecesary {recv_url}', 'dispatch')\n except AssertionError as e:\n log.error(e, \"dispatch\")\n except zmq.error.Again as e:\n log.debug(e, \"dispatch\")\n except Exception as e:\n log.error(e, \"dispatch\")\n time.sleep(0.8)\n \n log.info(f'Depth {depth} done', 'dispatch')\n for url, html in new_data.items():\n graph[url] = set()\n try:\n text = html.decode()\n soup = BeautifulSoup(html, 'html.parser')\n tags = soup.find_all(valid_tags)\n new_urls = [['src', 'href'][tag.has_attr('href')] for tag in tags]\n changes = []\n for i, attr in enumerate(new_urls):\n url_dir = urljoin(url, tags[i][attr])\n graph[url].add(url_dir)\n if url_dir not in url_mapper:\n url_mapper[url_dir] = f'url_{len(url_mapper)}'\n changes.append((tags[i][attr], url_mapper[url_dir]))\n if attr == 'src' or tags[i].name == 'link':\n src.add(url_dir)\n continue\n self.urls.append(url_dir)\n html = change_html(text, changes).encode()\n except UnicodeDecodeError:\n log.debug(f'{url} is not decodeable', 'dispatch')\n except: # BeautifulSoup strange exceptions related with his's logger\n pass\n new_data[url] = html\n data.update(new_data)\n self.urls = set(self.urls)\n self.urls.difference_update(self.old)\n self.old.update(self.urls)\n self.urls = list(self.urls)\n \n if depth > self.depth:\n break\n if depth == self.depth:\n src.difference_update(self.old)\n self.old.update(src)\n self.urls = list(src)\n depth += 1\n log.info(f\"Number of URLs to be requested for download: {RED}{len(self.urls)}{RESET}\", \"dispatch\")\n \n log.info(f\"Starting to write data\", \"dispatch\")\n for i, url in enumerate(self.originals):\n try:\n res = HtmlResponse(url=url, body=data[url], encoding='utf8')\n base = res.css('title::text')[0].get()\n except:\n base = f\"web_page_{i}\"\n try:\n os.makedirs(f'downloads/{base}-data')\n except:\n pass\n writer(f'downloads/{base}-data', url, set(), data, url_mapper, graph) \n \n html = data[url]\n if len(graph[url]) > 0:\n text = data[url].decode()\n changes = []\n for dep in graph[url]:\n name = url_mapper[dep]\n changes.append((name, f'{base}-data/{name}'))\n html = change_html(text, changes).encode()\n with open(f'downloads/{base}', 'wb') as fd:\n fd.write(html)\n \n log.info(f\"Dispatcher:{self.uuid} has completed his URLs succefully\", \"dispatch\")\n log.debug(f\"Dispatcher:{self.uuid} disconnecting from system\", \"dispatch\")\n #disconnect\n\n queue.put(True)\n pFindSeeds.terminate()\n pInput.terminate()",
"def collect_results(self) -> None:\n ready = multiprocessing.connection.wait(\n self.waitables.keys() - [self._direct_scheduler_conn], timeout=0\n )\n\n for sentinel in ready:\n if sentinel is self._direct_scheduler_conn:\n continue\n processor = cast(DagFileProcessorProcess, self.waitables[sentinel])\n self.waitables.pop(processor.waitable_handle)\n self._processors.pop(processor.file_path)\n self._collect_results_from_processor(processor)\n\n self.log.debug(\"%s/%s DAG parsing processes running\", len(self._processors), self._parallelism)\n\n self.log.debug(\"%s file paths queued for processing\", len(self._file_path_queue))",
"def populatereadyqueue():\n readyQueue.put(Process(\"P1\", time(0, 0, 1), time(0, 0, 4)))\n readyQueue.put(Process(\"P2\", time(0, 0, 2), time(0, 0, 6)))\n readyQueue.put(Process(\"P3\", time(0, 0, 3), time(0, 0, 2)))",
"def process_deferred_queue(self):\n\n self.process_queue(self.deferred_queue)\n\n if self.depth_counter == 0:\n self.process_queue(self.complex_deferred_queue)",
"def _manager_main(self, queue):\r\n for task in self._task_generator():\r\n queue.put(task)",
"def _finish_pending_requests(self) -> None:\n while True:\n num_q, ok_list, err_list = self._multi.info_read()\n for curl in ok_list:\n self._finish(curl)\n for curl, errnum, errmsg in err_list:\n self._finish(curl, errnum, errmsg)\n if num_q == 0:\n break\n self._process_queue()",
"def set_routing(self, rinfo):\n\n self.routing = [ self.Routing(*r) for r in rinfo ]",
"def process_messages(self):\n pass",
"def process_thread(self):",
"def handle_arrivals(self, planes):\n # Immediately return if there are no planes to handle.\n if not planes: return\n\n # Get the subsection indices for dom/intl queues.\n #id_domestic = 0 if self.subsections[0].id == 'domestic' else 1\n #id_foreign = 0 if self.subsections[0].id == 'foreign' else 1\n\n # Loop through the list of Planes.\n for plane in planes:\n\n # While the plane still has passengers on it...\n while len(plane.plist) > 0:\n\n passenger = plane.plist.pop()\n\n for subsection in self.subsections:\n \n # If national, move to national queue.\n if passenger.nationality == subsection.id :\n subsection.assignment_agent.queue.append(passenger)\n break\n \n\n # If intl, move to intl queue.\n #elif plane.plist[-1].nationality == \"foreign\":\n # self.subsections[id_foreign].assignment_agent.queue.append(\n # plane.plist.pop())",
"def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())",
"def _collectCallback(self):\n log.debug(\"Scanning for processes from %s [%s]\",\n self._devId, self._manageIp)\n\n self.state = ZenProcessTask.STATE_SCANNING_PROCS\n\ttables = [NAMETABLE, PATHTABLE, ARGSTABLE]\n\tif AS400PLUG in self._device.zCollectorPlugins: tables = [AS400NAME]\n try:\n tableResult = yield self._getTables(tables)\n summary = 'Process table up for device %s' % self._devId\n self._clearSnmpError(\"%s - timeout cleared\" % summary, 'table_scan_timeout')\n if self.snmpConnInfo.zSnmpVer == 'v3':\n self._clearSnmpError(\"%s - v3 error cleared\" % summary, 'table_scan_v3_error')\n processes = self._parseProcessNames(tableResult)\n self._clearSnmpError(summary, 'resource_mib')\n self._deviceStats.update(self._device)\n processStatuses = self._determineProcessStatus(processes)\n self._sendProcessEvents(processStatuses)\n self._clearSnmpError(summary)\n yield self._fetchPerf()\n log.debug(\"Device %s [%s] scanned successfully\",\n self._devId, self._manageIp)\n except HostResourceMIBExecption as e:\n summary = 'Device %s does not publish HOST-RESOURCES-MIB' %\\\n self._devId\n resolution = \"Verify with snmpwalk %s %s\" %\\\n (self._devId, NAMETABLE )\n log.warn(summary)\n self._sendSnmpError(summary, \"resource_mib\", resolution=resolution)\n\n except error.TimeoutError as e:\n log.debug('Timeout fetching tables on device %s' % self._devId)\n self._sendSnmpError('%s; Timeout on device' % PROC_SCAN_ERROR % self._devId, 'table_scan_timeout')\n except Snmpv3Error as e:\n msg = \"Cannot connect to SNMP agent on {0._devId}: {1.value}\".format(self, str(e))\n log.debug(msg)\n self._sendSnmpError('%s; %s' % (PROC_SCAN_ERROR % self._devId, msg), 'table_scan_v3_error')\n except Exception as e:\n log.exception('Unexpected Error on device %s' % self._devId)\n msg = '%s; error: %s' % (PROC_SCAN_ERROR % self._devId, e)\n self._sendSnmpError(msg)",
"def _multicastRouteIncomingMessages(self):\n with self.lock:\n with self.multicast.lock:\n while self.multicast.inbox.qsize() > 0:\n packet = self.multicast.inbox.get()\n for destinationUUID in packet.keys():\n if self.inbox.has_key(destinationUUID):\n # Copy the message from the packet to the\n # inbox with the correct destination.\n message = packet[destinationUUID]\n self.inbox[destinationUUID].put(message)",
"def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)",
"def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])"
] | [
"0.61211884",
"0.6118696",
"0.5979198",
"0.5806326",
"0.5766877",
"0.5762818",
"0.5662577",
"0.56588477",
"0.5649565",
"0.5638053",
"0.55420285",
"0.5537676",
"0.55065197",
"0.5502671",
"0.54951775",
"0.54863334",
"0.54849994",
"0.5483143",
"0.5479529",
"0.5476768",
"0.54686904",
"0.5451105",
"0.5450134",
"0.5438081",
"0.5437226",
"0.5431594",
"0.54035026",
"0.5403107",
"0.53695667",
"0.5363207"
] | 0.7076636 | 0 |
return ARP table as a list of dictionary. | def get_arp_list(self):
arp_list = []
for ip, value in self.ip_to_mac.items():
arp_list.append({'ip': str(ip),
'hw_addr': str(value[0]),
'last_update': datetime.datetime.fromtimestamp(value[1]).strftime('%Y-%m-%d %H:%M:%S')})
return arp_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_arp_table(self, vrf=\"\"):\n\n arp_table = []\n output = self._send_command('/ip arp print terse')\n\n arps = parse_terse_output(output)\n\n for arp in arps:\n if arp.get('mac-address'):\n arp_table.append({\n 'interface': arp.get('interface'),\n 'mac': cast_mac(arp.get('mac-address')),\n 'ip': arp.get('address'),\n 'age': -1.0,\n })\n\n return arp_table",
"def get_mac_address_table(self):\n\n mac_address_table = []\n command = '/interface bridge host print terse'\n\n output = self._send_command(command)\n\n for host in parse_terse_output(output):\n mac_address_table.append({\n 'mac': cast_mac(host.get('mac-address')),\n 'interface': host.get('interface'),\n 'vlan': -1,\n 'static': True if 'D' not in host.get('_flags') else False,\n 'active': True if 'X' not in host.get('_flags') else False,\n 'moves': -1,\n 'last_move': -1.0\n })\n\n return mac_address_table",
"def get_arp_table():\n IP = ''\n login = ''\n password = ''\n telnet = pexpect.spawn('telnet {}'.format(IP), timeout=30)\n telnet.expect('Username:')\n telnet.sendline(login)\n telnet.expect('Password:')\n telnet.sendline(password)\n telnet.expect('#')\n telnet.sendline('terminal length 0')\n telnet.expect('#')\n telnet.sendline('show arp')\n telnet.expect('#')\n arp_table = telnet.before.decode('utf-8')\n telnet.close()\n return arp_table",
"def _parse_mac_addr_table(self, cmd_output, mac_regex):\n lines = ensure_string(cmd_output).split(\"\\n\")\n\n arp_table = defaultdict(list)\n for line in lines:\n match = mac_regex.match(line)\n\n if not match:\n continue\n\n groups = match.groups()\n ip_address = groups[0]\n mac_address = groups[1]\n arp_table[mac_address].append(ip_address)\n\n return arp_table",
"def _parse_ip_table_arp(self, arp_output):\n arp_regex = re.compile(r\".*?\\((.*?)\\) at (.*?)\\s+\")\n return self._parse_mac_addr_table(arp_output, arp_regex)",
"def getARP(mapDirection = 0, dev = \"\"):\n arpTable = open('/proc/net/arp')\n result = {}\n for line in arpTable:\n if \"IP address\" in line:\n # Skip header\n continue\n\n ip, hw, flags, mac, mask, device = line.strip('\\n').split()\n\n if dev and device != dev:\n continue\n\n if mapDirection:\n result[ip] = [mac, device]\n else:\n result[mac] = [ip, device]\n\n return result",
"def get_routing_table(self):\n routing_tbl = []\n for subnet, entry in self.tbl.items():\n d = entry.to_dict()\n d['subnet'] = str(subnet)\n routing_tbl.append(d)\n\n return routing_tbl",
"def _listing(self, mac_addresses):\n # Initialize key variables\n preliminary_listing = []\n listing = []\n\n # Cycle through mac addresses, get the manufacturer\n for mac_address in mac_addresses:\n # Get manufacturer\n manufacturer = self._manufacturer(mac_address)\n data_dict = {}\n data_dict['mac_address'] = mac_address\n data_dict['manufacturer'] = manufacturer\n preliminary_listing.append(data_dict)\n\n # Get IP address and hostname for each mac address\n for item in preliminary_listing:\n mac_address = item['mac_address']\n manufacturer = item['manufacturer']\n\n if mac_address in self.rarp_table:\n # MAC address has related IP\n if bool(self.rarp_table[mac_address]) is True:\n for ip_address in self.rarp_table[mac_address]:\n data_dict = {}\n data_dict['mac_address'] = mac_address\n data_dict['manufacturer'] = manufacturer\n data_dict['ip_address'] = ip_address\n data_dict['hostname'] = ''\n\n if ip_address in self.arp_table:\n if 'hostname' in self.arp_table[ip_address]:\n hostname = self.arp_table[\n ip_address]['hostname']\n data_dict['hostname'] = hostname\n\n listing.append(data_dict)\n else:\n # MAC address has no related IP\n data_dict = {}\n data_dict['mac_address'] = mac_address\n data_dict['manufacturer'] = manufacturer\n data_dict['ip_address'] = ''\n data_dict['hostname'] = ''\n listing.append(data_dict)\n\n # Return\n return listing",
"def get_arp_table(cls, client_object, switch_vni=None):\n attribute_map = {'mac address': 'adapter_mac',\n 'ip address': 'adapter_ip'}\n nsxa_socket = cls._get_nsxa_socket(client_object)\n cmd = ('%s -t %s vni/arp-table %s' %\n (cls.CLI, nsxa_socket, switch_vni))\n out = client_object.connection.request(cmd).response_data.split('\\n')\n # Skip the VNI number in the output.\n raw_table_data = '\\n'.join(out[1:])\n header_keys = [\"IP Address\", \"Mac Address\"]\n parser = horizontal_table_parser.HorizontalTableParser()\n parsed_data = parser.get_parsed_data(raw_table_data,\n header_keys=header_keys)\n mapped_pydict = utilities.map_attributes(attribute_map, parsed_data)\n return arp_table_schema.ARPTableSchema(py_dict=mapped_pydict)",
"def to_dict(self) -> dict:\n return self._route_table",
"def get_arp_table(cls, client_object, node_id=None, get_arp_table=None):\n url_parameters = {'transport_node_id': node_id}\n client_class = getlogicalrouterportarptable.GetLogicalRouterPortArpTable # noqa\n return super(NSX70PortImpl, cls).read(client_object,\n client_class=client_class,\n query_params=url_parameters)",
"def scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_request_broadcast , timeout = 1, verbose=False)[0]\n target_list=[]\n for element in answered_list:\n target_dict = {\"ip\":element[1].psrc, \"mac\":element[1].hwsrc}\n target_list.append(target_dict)\n return target_list",
"def address_dict(self):\n new_table = {}\n for record in self._table:\n address = self.build_address(record)\n new_table[address] = record\n return new_table",
"def arp_scan(subnet):\n\n answered = scapy.arping(subnet)[0]\n\n machines = []\n for i in answered:\n ip, mac = i[1].psrc, i[1].hwsrc\n try:\n host = socket.gethostbyaddr(i[1].psrc)[0]\n except Exception:\n host = \"??\"\n machines.append({\"ip\": ip, \"mac\": mac, \"host\": host})\n\n return machines",
"def dump(self, packet):\n #self.print_table()\n src = packet[\"dst\"]\n dst = packet[\"src\"]\n routes_dump = []\n for route in self.routes:\n for verat in route[\"varats\"]:\n routes_dump.append({\"network\": verat[\"network\"], \"netmask\": verat[\"netmask\"],\n \"peer\": route[\"peer\"]})\n \n a = {\"src\": src, \"dst\": dst, \"type\": \"table\", \"msg\": routes_dump}\n return a",
"def tree(self):\n keys = [\n 'FirstTimeSeen',\n 'LastTimeSeen',\n 'channel',\n 'Speed',\n 'Privacy',\n 'Cipher',\n 'Authentication',\n 'Power',\n 'beacons',\n 'IV',\n 'LANIP',\n 'IDlength',\n 'ESSID',\n 'Key']\n\n c_keys = [\n 'Station MAC',\n 'FirstTimeSeen',\n 'LastTimeSeen',\n 'Power',\n 'Packets',\n 'BSSID',\n 'ProbedESSIDs'\n ]\n\n self.update_results()\n aps = {}\n for ap_ in self._aps:\n bssid = ap_.pop(0)\n aps[bssid] = dict(zip(keys, ap_))\n aps[bssid]['clients'] = []\n\n for client in self.clients:\n if client[0] == bssid:\n aps[bssid]['clients'].append(dict(zip(c_keys, client)))\n return aps",
"def search_mac_in_arp(args):\n arp_table = get_arp_table()\n \n mac = convert_mac(args.mac, 2)\n columns = ['IP', 'AGE(min)', 'MAC', 'INTERFACE', 'VENDOR']\n result = []\n for row in arp_table.split('\\n'):\n if mac in row:\n lists = []\n _,ip,age,r_mac,_,interface = row.strip().split()\n r_mac = convert_mac(mac, 1)\n vendor = convert_mac(r_mac, 3)\n vendor = find_in_database(vendor)\n r_mac = blue + r_mac + endblue\n lists = [ip, age, r_mac, interface, vendor]\n result.append(lists)\n else:\n pass\n print(tabulate(result, headers=columns))",
"def remote_login_table_format(result):\n table = []\n for item in result:\n row = OrderedDict()\n row['ID'] = item['nodeId']\n row['IP'] = item['ipAddress']\n row['SSH Port'] = int(item['port'])\n table.append(row)\n return table",
"def test_snmptable_return_structure():\n iftable = snmptable(community='public', ipaddress=SNMP_SRV_ADDR,\n oid=IFTABLE_OID, port=SNMP_SRV_PORT, sortkey='ifIndex')\n assert isinstance(iftable, list)\n assert isinstance(iftable[0], dict)\n assert isinstance(iftable[0]['ifDescr'], str)\n assert iftable[1]['ifDescr'] == 'eth0'",
"def converttable(tablecode):\n table = etree.XML(tablecode)\n rows = iter(table)\n headers = [col.text for col in next(rows)]\n data = []\n for row in rows:\n values = [col.text for col in row]\n debugprint(dict(zip(headers, values)), \"RAW JSON\")\n data.append(dict(zip(headers, values)))\n return data",
"def check_host_arp_table(host, asic, neighbor_ip, neighbor_mac, interface, state, arptable=None):\n\n if arptable is None:\n arptable = asic.switch_arptable()['ansible_facts']\n\n if ':' in neighbor_ip:\n table = arptable['arptable']['v6']\n else:\n table = arptable['arptable']['v4']\n for entry in table:\n logger.debug(\"%s ARP: %s => %s\", host.hostname, entry, table[entry])\n pytest_assert(neighbor_ip in table, \"IP %s not in arp list: %s\" % (neighbor_ip, list(table.keys())))\n pytest_assert(table[neighbor_ip]['macaddress'] == neighbor_mac,\n \"table MAC %s does not match neighbor mac: %s\" % (table[neighbor_ip]['macaddress'], neighbor_mac))\n pytest_assert(table[neighbor_ip]['interface'] == interface,\n \"table interface %s does not match interface: %s\" % (table[neighbor_ip]['interface'], interface))\n if state:\n pytest_assert(table[neighbor_ip]['state'].lower() == state.lower(),\n \"table state %s is not %s\" % (table[neighbor_ip]['state'].lower(), state.lower()))",
"def ra_acl_config_table_get(host_id):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n odu16_profile_id = sqlalche_obj.session.query(\n Hosts.config_profile_id).filter(Hosts.host_id == host_id).all()\n ra_conf_table = sqlalche_obj.session.query(SetOdu16RAConfTable.acl_mode).filter(\n SetOdu16RAConfTable.config_profile_id == odu16_profile_id[0][0]).all()\n ra_acl_config = sqlalche_obj.session.query(SetOdu16RAAclConfigTable.mac_address, SetOdu16RAAclConfigTable.index).filter(\n SetOdu16RAAclConfigTable.config_profile_id == odu16_profile_id[0][0]).order_by(SetOdu16RAAclConfigTable.index).all()\n sqlalche_obj.sql_alchemy_db_connection_close()\n return ra_acl_config, odu16_profile_id, ra_conf_table",
"def getAircraftCodeDict():\n table = 'aircraft'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n aircraft = airlineClasses.Aircraft()\n aircraft.aircraftCode = row[0]\n aircraft.name = row[1]\n d[aircraft.aircraftCode] = aircraft\n \n curs.close()\n connection.close()\n return d",
"def get_list():\n hash_map_list = model.hash_table.find()\n data = dict(success=True, hash_map_list=hash_map_list)\n return data",
"def tabledict(self):\n return dict(table=self.tablename,\n dffeld=self.name_dateifuehrungsschluessel,\n statusfeld=self.name_status,\n schluesselfeld=self.name_schluessel)",
"def reformat_attrTable(\n self):\n #format into a dictionary of rows for quick aligning with the tracking_id\n if self.attrTable: attrTable = self.attrTable[:];\n else: attrTable = [];\n\n attrTable_dict = {};\n for row in attrTable:\n attrTable_dict[row['tracking_id']] = row;\n return attrTable_dict;",
"def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]",
"def display_routing_table(appliances=[],\n credentials=[],\n timeout=120,\n no_check_hostname=False,\n web=False):\n logger = make_logger(\"mast.network\")\n check_hostname = not no_check_hostname\n env = datapower.Environment(\n appliances,\n credentials,\n timeout,\n check_hostname=check_hostname)\n logger.info(\n \"Attempting to retrieve routing table from {}\".format(\n str(env.appliances)))\n\n # try RoutingStatus3 first\n try:\n logger.debug(\"Attempting RoutingStatus3\")\n resp = env.perform_action(\n \"get_status\",\n domain=\"default\",\n provider=\"RoutingStatus3\")\n xpath = datapower.STATUS_XPATH + \"RoutingStatus3\"\n except urllib2.HTTPError:\n logger.warn(\n \"RoutingStatus3 unavailable, falling back to RoutingStatus2\")\n resp = env.perform_action(\n \"get_status\",\n domain=\"default\",\n provider=\"RoutingStatus2\")\n xpath = datapower.STATUS_XPATH + \"RoutingStatus2\"\n logger.debug(\"Response received: {}\".format(resp))\n\n header_row = []\n for host, l in resp.items():\n if not web:\n print host, \"\\n\", \"=\" * len(host), \"\\n\"\n fields = [child.tag for child in l.xml.find(xpath)]\n\n if web:\n if not header_row:\n header_row = list(fields)\n header_row.insert(0, \"Appliance\")\n rows = []\n\n width = len(max(fields, key=len))\n template = \"{:<{width}} \" * len(fields)\n header = template.format(*fields, width=width)\n if not web:\n print header\n\n for item in l.xml.findall(xpath):\n values = [child.text for child in item]\n line = template.format(*values, width=width)\n if web:\n _row = list(values)\n _row.insert(0, host)\n rows.append(_row)\n if not web:\n print line\n if web:\n return flask.render_template(\n \"results_table.html\",\n header_row=header_row,\n rows=rows), util.render_history(env)\n print",
"def tables() -> dict[str, str]:\n return {\n \"land_use\": \"zone_id\",\n \"tours\": \"tour_id\",\n \"trips\": \"trip_id\",\n \"persons\": \"person_id\",\n \"households\": \"household_id\",\n }",
"def get_arp_info(pkt):\n if len(pkt) < 8:\n raise ARPError(\"ARP header too short\")\n ar_hrd, ar_pro, ar_hln, ar_pln, ar_op = struct.unpack(\"!HHBBH\", pkt[0:8])\n pkt_len = 8+(2*ar_hln)+(2*ar_pln)\n if len(pkt) < pkt_len:\n raise ARPError(\"ARP packet too short\")\n ofs = 8\n ar_sha = pkt[ofs:ofs+ar_hln]\n ofs += ar_hln\n ar_spa = pkt[ofs:ofs+ar_pln]\n ofs += ar_pln\n ar_tha = pkt[ofs:ofs+ar_hln]\n ofs += ar_hln\n ar_tpa = pkt[ofs:ofs+ar_pln]\n ofs += ar_pln\n return (ar_hrd, ar_pro, ar_hln, ar_pln,\n ar_op, ar_sha, ar_spa, ar_tha, ar_tpa)"
] | [
"0.7437058",
"0.6826946",
"0.67796856",
"0.6630043",
"0.6517875",
"0.63336134",
"0.63289434",
"0.60332114",
"0.5915936",
"0.58368886",
"0.58107936",
"0.55169",
"0.55130374",
"0.5497963",
"0.5460182",
"0.54279923",
"0.5393707",
"0.5380129",
"0.535425",
"0.5350383",
"0.5320536",
"0.5275031",
"0.52749324",
"0.5274088",
"0.52710754",
"0.5242639",
"0.5220761",
"0.5184716",
"0.5174357",
"0.5156539"
] | 0.7094078 | 1 |
return routing table as a list of dictionary. | def get_routing_table(self):
routing_tbl = []
for subnet, entry in self.tbl.items():
d = entry.to_dict()
d['subnet'] = str(subnet)
routing_tbl.append(d)
return routing_tbl | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_dict(self) -> dict:\n return self._route_table",
"def all_routing(G, switch_nodes, table_file_name):\n\n table = OrderedDict({})\n for s in switch_nodes:\n for d in switch_nodes:\n if s != d:\n routing(G, s, d, table)\n\n with open(table_file_name, 'w') as file:\n file.write(json.dumps(table))\n\n return table",
"def routes(self) -> dict:\n return dict(self._routes)",
"def get_routes_timetable():\n\n return Db().get_line_ids()",
"def dump(self, packet):\n #self.print_table()\n src = packet[\"dst\"]\n dst = packet[\"src\"]\n routes_dump = []\n for route in self.routes:\n for verat in route[\"varats\"]:\n routes_dump.append({\"network\": verat[\"network\"], \"netmask\": verat[\"netmask\"],\n \"peer\": route[\"peer\"]})\n \n a = {\"src\": src, \"dst\": dst, \"type\": \"table\", \"msg\": routes_dump}\n return a",
"def get_pathways_list(org='hsa'):\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/pathway/', org]))\r\n if resp.status_code == 200:\r\n d = csv.DictReader(resp.text.split('\\n'),\r\n delimiter='\\t',\r\n fieldnames=('id', 'name'))\r\n return [row for row in d]\r\n return {}",
"def route_table(self) -> Optional['outputs.RouteTableResponse']:\n return pulumi.get(self, \"route_table\")",
"def get_routes():\n\n return Db().get_line_ids()",
"def all_routing_tree(G, tors, table_file_name):\n \n table = OrderedDict({})\n for s in G.nodes():\n table[s] = OrderedDict({})\n for s in tors:\n for d in tors:\n if s != d:\n routing(G, s, d, table)\n\n with open(table_file_name, 'w') as file:\n file.write(json.dumps(table))\n return table",
"def get_routes():\n # get from cache if it exists\n routes = cache.get(\"routes\")\n if routes:\n return routes\n\n trips_url = \"https://data.edmonton.ca/api/views/ctwr-tvrd/rows.json?accessType=DOWNLOAD\"\n bus_heading_url = \"https://data.edmonton.ca/resource/atvz-ppyb.json\"\n\n trips_response = requests.get(trips_url)\n bus_heading_response = requests.get(bus_heading_url)\n\n if trips_response.status_code == 200 and bus_heading_response.status_code == 200:\n trips = trips_response.json()\n headings = bus_heading_response.json()\n\n bus_to_headings = {}\n trip_to_bus = {}\n\n for heading in headings:\n if \"route_long_name\" in heading:\n bus_to_headings[heading[\"route_id\"]] = heading[\"route_long_name\"]\n\n for item in trips[\"data\"]:\n trip_id = item[-4]\n bus_number = item[-6]\n if bus_number in bus_to_headings:\n bus_heading = bus_to_headings[bus_number]\n trip_to_bus[trip_id] = [bus_number, bus_heading]\n \n # store the routes in the cache for five minutes\n cache.set(\"routes\", trip_to_bus, timeout=5*60) \n return trip_to_bus",
"def get_route_list_db(agency):\n \n all_routes = Route.objects.filter(agency__agency_tag=agency)\n routes = {}\n for r in all_routes:\n routes[r.route_tag] = r.title\n\n return routes",
"def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList",
"def as_dict(self):\n data = super(Route, self).as_dict()\n data['family'] = self._get_family()\n return data",
"def buildRoutesDict(self):\n \n # create route number and name xref dictionary\n arcpy.env.workspace = PublicTransit.RTD_PATH\n routes = arcpy.SearchCursor(PublicTransit.BUS_ROUTES, \"\", \"\", \"RouteID; Name\", \"\")\n self.routeXref = dict()\n for route in routes:\n self.routeXref[route.RouteID] = route.Name\n self.routeXref[route.Name] = route.RouteID\n del routes\n \n #get mode lookup table\n mode_table = self.getModeLookupTable()\n \n # Query the RTD database for the route name, operator, mode, and headways.\n # We are querying for weekday routes (DAYTYPE_CLASS Weekday field = 'Y')\n conn = pyodbc.connect(PublicTransit.DB_CONN_STRING)\n cursor = conn.cursor()\n self.transitRoutes = dict()\n qry = \"\"\"\n WITH t AS\n (\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR,\n CASE\n WHEN HOUR_CLASS >= 3 and HOUR_CLASS < 6 THEN 'EA'\n WHEN HOUR_CLASS >= 6 and HOUR_CLASS < 10 THEN 'AM'\n WHEN HOUR_CLASS >= 10 and HOUR_CLASS < 15 THEN 'MD'\n WHEN HOUR_CLASS >= 15 and HOUR_CLASS < 19 THEN 'PM'\n WHEN (HOUR_CLASS BETWEEN 19 AND 24) OR HOUR_CLASS < 3 THEN 'EV'\n END AS tod,\n [HOURLY_FREQUENCY(Daily until HOUR_CLASS update)], HOUR_CLASS\n FROM dbo.[ROUTE HEADWAY AND FREQUENCY]\n WHERE DAYTYPE_CLASS IN\n (SELECT dc.CLASS FROM dbo.DAYTYPE_CLASS dc WHERE WEEKDAY = 'Y')\n )\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod,\n 60.0 / ROUND(AVG(CAST([HOURLY_FREQUENCY(Daily until HOUR_CLASS update)] AS FLOAT)), 0) as headway\n FROM t\n GROUP BY CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod\n ORDER BY SCH_ROUTEID, SCH_PATTERNID, tod\"\"\"\n \n used_route_names = []\n # Iterate through result set and apply attributes.\n for row in cursor.execute(qry):\n routePattern = str(row.SCH_ROUTEID) + \"_\" + str(row.SCH_PATTERNID)\n if routePattern not in self.transitRoutes:\n self.transitRoutes[routePattern] = TransitRoute(routePattern,\n routeId = row.SCH_ROUTEID,\n patternId = row.SCH_PATTERNID)\n self.transitRoutes[routePattern].new_name = self.__cleanRouteName(row.CPT_AGENCYID + \"_\" + row.SCH_ROUTEDESIGNATOR[:(11 - 1 - len(row.CPT_AGENCYID))],used_route_names) #12 is the maximum name length\n self.transitRoutes[routePattern].agency = row.AGENCYNAME\n mode = -1\n for mode_row in mode_table:\n if row.CPT_AGENCYID == mode_row[\"CPT_AGENCYID\"] and row.CPT_MODE == mode_row[\"CPT_MODE\"]:\n if mode_row[\"SCH_ROUTEDESIGNATOR\"] != \"NA\":\n if row.SCH_ROUTEDESIGNATOR == mode_row[\"SCH_ROUTEDESIGNATOR\"]:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n break #this is as detailed as we can get\n else:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n self.transitRoutes[routePattern].mode = mode\n self.transitRoutes[routePattern].mode_group = Mode.getModeName(mode_group)\n # set headways\n if row.tod == 'EA':\n self.transitRoutes[routePattern].eaHeadway = row.headway\n elif row.tod == 'AM':\n self.transitRoutes[routePattern].amHeadway = row.headway\n elif row.tod == 'MD':\n self.transitRoutes[routePattern].mdHeadway = row.headway\n elif row.tod == 'PM':\n self.transitRoutes[routePattern].pmHeadway = row.headway\n elif row.tod == 'EV':\n self.transitRoutes[routePattern].evHeadway = row.headway\n conn.close()",
"def ip4_route(node):\n output = normalize_text(node.run('ip route')).splitlines()\n result = {}\n for line in output:\n columns = line.split(' ')\n route = result[columns[0]] = {}\n prev = None\n for column in columns:\n if prev == 'dev':\n route['dev'] = column\n if prev == 'via':\n route['via'] = column\n if prev == 'proto':\n route['proto'] = column\n if prev == 'metric':\n route['metric'] = column\n if prev == 'scope':\n route['scope'] = column\n prev = column\n\n return result",
"def routes(self) -> pulumi.Output[Sequence['outputs.RouteTableRoute']]:\n return pulumi.get(self, \"routes\")",
"def get_list():\n hash_map_list = model.hash_table.find()\n data = dict(success=True, hash_map_list=hash_map_list)\n return data",
"def populateTable(self):\n\n output_list = self.output_ports.split(', ')\n\n for i in output_list:\n values = i.split('-')\n nextHopPort = values[0]\n linkCost = values[1]\n destId = values[2]\n learnedFrom = 0 # As it was learned from ConfigFile\n row = routing_row.RoutingRow(nextHopPort, destId, linkCost, destId, learnedFrom)\n self.addToRoutingTable(row)",
"def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover",
"def mbta_route_list():\n f = open('complete_routes.txt', 'r')\n complete_routes = ast.literal_eval(f.read())\n\n #creates list of all route_ids in MBTA system\n subway_route_list = []\n for x in range(len(complete_routes['mode'])):\n if complete_routes['mode'][x]['mode_name'] == 'Subway':\n for y in range(len(complete_routes['mode'][x]['route'])):\n subway_route_list.append(complete_routes['mode'][x]['route'][y]['route_id'])\n\n #removes duplicates from list and returns\n return list(OrderedDict.fromkeys(subway_route_list))",
"def show_routes(self):\n routelist= [(handler.regex.pattern, handler.handler_class) for handler in self.handlers[0][1]]\n print(55*\"-\")\n print(\" Routing table (order matters) :\")\n print(55*\"-\")\n for elem in routelist:\n print('{0:<20} {1:<30} '.format(elem[0], str(elem[1])))",
"def __get_network_routes(self):\n routes = []\n\n gws = netifaces.gateways()\n for k in gws.keys():\n if k == 'default':\n continue\n\n\t for r in gws[k]:\n (ip,interface,is_gateway) = r\n\n gw_name = \"{0}\".format(netifaces.address_families[k])\n\n routes.append({\n gw_name : {\n 'ip_address' : ip,\n 'interface' : interface,\n\t\t\t 'default' : is_gateway\n }\n \n }\n )\n\n return routes",
"def addSNMPRoutes(self, routingtable):\n\n ipCidrRouteDest = \"\"\n ipCidrRouteNextHopAS = \"\"\n ipCidrRouteMetric1 = 0\n ipCidrRouteMetric2 = 0\n ipCidrRouteMetric3 = 0\n ipCidrRouteMetric4 = 0\n ipCidrRouteMetric5 = 0\n ipCidrRouteStatus = 0\n ipCidrRouteMask = \"\"\n ipCidrRouteTos = 0\n ipCidrRouteNextHop = \"\"\n ipCidrRouteIfIndex = 0\n ipCidrRouteType = 0\n ipCidrRouteProto = 0\n ipCidrRouteAge = 0\n ipCidrRouteInfo = 0\n\n for loop_rtIndex in routingtable:\n for ifAttr in routingtable[loop_rtIndex]:\n if ifAttr == 1:\n ipCidrRouteDest = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 10:\n ipCidrRouteNextHopAS = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 11:\n ipCidrRouteMetric1 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 12:\n ipCidrRouteMetric2 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 13:\n ipCidrRouteMetric3 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 14:\n ipCidrRouteMetric4 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 15:\n ipCidrRouteMetric5 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 16:\n ipCidrRouteStatus = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 2:\n ipCidrRouteMask = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 3:\n ipCidrRouteTos = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 4:\n ipCidrRouteNextHop = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 5:\n ipCidrRouteIfIndex = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 6:\n ipCidrRouteType = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 7:\n ipCidrRouteProto = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 8:\n ipCidrRouteAge = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 9:\n ipCidrRouteInfo = routingtable[loop_rtIndex][ifAttr]\n\n self.routingtable[loop_rtIndex] = device_routingtable( \\\n ipCidrRouteDest, ipCidrRouteNextHopAS, ipCidrRouteMetric1, \\\n ipCidrRouteMetric2, ipCidrRouteMetric3, ipCidrRouteMetric4, \\\n ipCidrRouteMetric5, ipCidrRouteStatus, ipCidrRouteMask, \\\n ipCidrRouteTos, ipCidrRouteNextHop, ipCidrRouteIfIndex, \\\n ipCidrRouteType, ipCidrRouteProto, ipCidrRouteAge, \\\n ipCidrRouteInfo)",
"def get_time_table(self,day):\n output = []\n for link in self.data[day]:\n df = self.data[link][day]\n for row in df:\n output.append({'actualtime_arr_from':row[0],'acutaltime_arr_to':row[1],\\\n 'routeid':row[2],'link':route})\n from operator import itemgetter\n return sorted(output, key=itemgetter('actualtime_arr_from'))",
"async def get_routes(self) -> Sequence[str]:\n results = []\n storage: BaseStorage = await self._context.inject(BaseStorage)\n async for record in storage.search_records(\n self.RECORD_TYPE, {\"to\": self._sender_verkey}\n ):\n results.append(record.value)\n return results",
"def getRoutes(self):\n pass",
"def data(self):\n data = collections.OrderedDict()\n\n # Pull over the existing blueprint data.\n if '$path' in self.blueprint or 'path' in self.blueprint:\n data['path'] = self.blueprint.get(\n '$path', self.blueprint.get('path'))\n\n tagged_keys = tuple(['{}@'.format(key)\n for key in COLLECTION_BLUEPRINT_KEYS])\n\n for key in sorted(self.blueprint.keys()):\n if key in COLLECTION_BLUEPRINT_KEYS or key.startswith(tagged_keys):\n data[key.lstrip('$')] = self.blueprint[key]\n\n data['routes'] = self.paths\n return data",
"def to_dict(self) -> Dict[int, List[int]]:\n\n sched_list = []\n for r in self.sched:\n for e in self.sched[r]:\n sched_list.append({\n \"event\": e[\"event\"].to_dict(),\n \"room\": r.to_dict(),\n \"color\": e[\"color\"]\n })\n return sched_list",
"def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n for varat in entry[\"varats\"]:\n ip = varat[\"network\"].split(\".\")\n netmask = varat[\"netmask\"].split(\".\")\n\n mask_bit = \"\".join([ format(int(quad), \"08b\") for quad in netmask ])\n num_ones = mask_bit.count(\"1\")\n ip_bin = \"\".join([ format(int(quad), \"08b\") for quad in ip ])\n ip_start = ip_bin[:num_ones]\n daddr_bin = \"\".join([ format(int(quad), \"08b\") for quad in daddr.split(\".\") ])\n if daddr_bin.startswith(ip_start):\n outroutes.append({\"peer\": entry[\"peer\"], \"us\": entry[\"us\"], \"ghoti\": num_ones, \"msg\": varat})\n\n #print(\"outroutessssssssssssssssssssss\", outroutes)\n return outroutes",
"def tables() -> dict[str, str]:\n return {\n \"land_use\": \"zone_id\",\n \"tours\": \"tour_id\",\n \"trips\": \"trip_id\",\n \"persons\": \"person_id\",\n \"households\": \"household_id\",\n }"
] | [
"0.77210844",
"0.6709546",
"0.64302474",
"0.63715637",
"0.623701",
"0.61796683",
"0.6155058",
"0.6075935",
"0.6040823",
"0.603392",
"0.6024642",
"0.59989035",
"0.59968525",
"0.5968736",
"0.5956476",
"0.5927441",
"0.5900913",
"0.5837161",
"0.5820069",
"0.58077735",
"0.5790471",
"0.57826936",
"0.57726437",
"0.5765567",
"0.574821",
"0.5731092",
"0.5728578",
"0.57201946",
"0.57189494",
"0.5703734"
] | 0.8206283 | 0 |
deploy all the routing entry in the routing table. | def deploy_routing_table(self):
for subnet, entry in self.tbl.items():
if entry.neighbor_port:
self.deploy_flow_entry(subnet=subnet, outport=entry.receive_port, dstport=entry.neighbor_port) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy():\n update_treesheets()\n restart_treesheets()",
"def post_route_table_create(self, resource_dict):\n pass",
"def __init__(self):\n self.routingTable = dict()",
"def deploy(self):\n self._switch.odlclient._request_json(self._table_path, method=\"post\", json={\n \"flow\": self._odl_inventory()\n })",
"def deploy():",
"def post_interface_route_table_create(self, resource_dict):\n pass",
"def setup_intervlan_host_routes(self):\n if self.routers:\n for src in self.host_information:\n src_host = self.host_information[src]['host']\n src_vlan = self.host_information[src]['vlan']\n src_ip = self.host_information[src]['ip']\n for dst in self.host_information:\n if src != dst:\n dst_host = self.host_information[dst]['host']\n dst_vlan = self.host_information[dst]['vlan']\n dst_ip = self.host_information[dst]['ip']\n if src_vlan != dst_vlan and self.is_routed_vlans(src_vlan, dst_vlan):\n src_faucet_vip = self.faucet_vips[src_vlan]\n dst_faucet_vip = self.faucet_vips[dst_vlan]\n self.add_host_route(src_host, dst_ip, src_faucet_vip.ip)\n self.add_host_route(dst_host, src_ip, dst_faucet_vip.ip)",
"def update_host_routes(self, config, cache):\n db = cache.get_or_create('host_routes', lambda: {})\n for net in config.networks:\n\n # For each subnet...\n for subnet in net.subnets:\n cidr = str(subnet.cidr)\n\n # determine the set of previously written routes for this cidr\n if cidr not in db:\n db[cidr] = set()\n\n current = db[cidr]\n\n # build a set of new routes for this cidr\n latest = set()\n for r in subnet.host_routes:\n latest.add((r.destination, r.next_hop))\n\n # If the set of previously written routes contains routes that\n # aren't defined in the new config, run commands to delete them\n for x in current - latest:\n if self._alter_route(net.interface.ifname, 'del', *x):\n current.remove(x)\n\n # If the new config contains routes that aren't defined in the\n # set of previously written routes, run commands to add them\n for x in latest - current:\n if self._alter_route(net.interface.ifname, 'add', *x):\n current.add(x)\n\n if not current:\n del db[cidr]\n\n cache.set('host_routes', db)",
"def _link_route_table():\n if dry:\n print(\"Would link the VPC and subnet in the route table.\")\n return True\n\n vpc = _existing.vpc\n sub = _existing.sub\n igw = _existing.igw\n rt = [x for x in vpc.route_tables.all()]\n if len(rt) == 0:\n print('No route table have been created alongside the VPC. Not sure what to do here.')\n for r in rt:\n print('Linking sub {s} in route table {r}.'.format(\n s=sub.id,\n r=r.id\n ))\n r.associate_with_subnet(SubnetId=sub.id)\n _tag_resource(r)\n\n r.create_route(\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw.id,\n #InstanceId='string',\n #NetworkInterfaceId='string',\n #VpcPeeringConnectionId='string'\n )",
"def set_routing(self, rinfo):\n\n self.routing = [ self.Routing(*r) for r in rinfo ]",
"def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()",
"def deploy(self):\n step = 10\n for i in range(0, self.x, step): \n for j in range(0, self.y, step):\n self._place_nodes(i,j, step, max_nodes = 3)",
"def send(self, name):\n router = self.routers[name]\n for neighbour in router.neighbours:\n neighbour = self.routers[neighbour]\n neighbour.receive_routing_table(router)",
"def create_ig_route(self, config):\n for vpc_id, vpc_config in config.iteritems():\n for route in vpc_config[\"RouteTables\"]:\n resource = self.ec2.RouteTable(route[\"RouteTableId\"])\n for route in resource.routes:\n route_exists = False\n for ig in vpc_config[\"InternetGateways\"]:\n route_exists = False\n if ig[\"InternetGatewayId\"] == route[\"GatewayId\"]:\n route_exists = True\n break\n if not route_exists:\n resource.create_route(\n DestinationCidrBlock=\"0.0.0.0/0\",\n GatewayId=ig[\"InternetGatewayId\"],\n )",
"def add_routes(self):\n pass",
"def test_routing_tables(self):\n\n # Validate the following:\n # 1. Create a new public IP range and dedicate to a account\n # 2. Acquire a IP from new public range\n # 3. Create a firewall rule to open up the port, so that IP is associated with network\n # 5. Login to VR and verify routing tables, there should be Table_eth3\n # 6. Delete firewall rule, since its last IP, routing table Table_eth3 should be deleted\n\n self.services[\"extrapubliciprange\"][\"zoneid\"] = self.services[\"zoneid\"]\n self.public_ip_range = PublicIpRange.create(\n self.apiclient,\n self.services[\"extrapubliciprange\"]\n )\n self.cleanup.append(self.public_ip_range)\n\n logger.debug(\"Dedicating Public IP range to the account\");\n dedicate_public_ip_range_response = PublicIpRange.dedicate(\n self.apiclient,\n self.public_ip_range.vlan.id,\n account=self.account.name,\n domainid=self.account.domainid\n )\n ip_address = PublicIPAddress.create(\n self.apiclient,\n self.account.name,\n self.zone.id,\n self.account.domainid,\n self.services[\"virtual_machine\"]\n )\n self.cleanup.append(ip_address)\n\n # Check if VM is in Running state before creating NAT and firewall rules\n vm_response = VirtualMachine.list(\n self.apiclient,\n id=self.virtual_machine.id\n )\n\n self.assertEqual(\n isinstance(vm_response, list),\n True,\n \"Check list VM returns a valid list\"\n )\n\n self.assertNotEqual(\n len(vm_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n self.assertEqual(\n vm_response[0].state,\n 'Running',\n \"VM state should be Running before creating Firewall rule.\"\n )\n\n # Open up firewall port for SSH, this will associate IP with VR\n firewall_rule = FireWallRule.create(\n self.apiclient,\n ipaddressid=ip_address.ipaddress.id,\n protocol=self.services[\"natrule\"][\"protocol\"],\n cidrlist=['0.0.0.0/0'],\n startport=self.services[\"natrule\"][\"publicport\"],\n endport=self.services[\"natrule\"][\"publicport\"]\n )\n self.cleanup.append(firewall_rule)\n\n # Get the router details associated with account\n routers = list_routers(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid,\n )\n router = routers[0]\n\n if (self.hypervisor.lower() == 'vmware'\n or self.hypervisor.lower() == 'hyperv'):\n result = get_process_status(\n self.apiclient.connection.mgtSvr,\n 22,\n self.apiclient.connection.user,\n self.apiclient.connection.passwd,\n router.linklocalip,\n 'ip route list table Table_eth3',\n hypervisor=self.hypervisor\n )\n else:\n hosts = list_hosts(\n self.apiclient,\n id=router.hostid,\n )\n self.assertEqual(\n isinstance(hosts, list),\n True,\n \"Check for list hosts response return valid data\"\n )\n host = hosts[0]\n host.user = self.hostConfig['username']\n host.passwd = self.hostConfig['password']\n try:\n result = get_process_status(\n host.ipaddress,\n 22,\n host.user,\n host.passwd,\n router.linklocalip,\n 'ip route list table Table_eth3'\n )\n except KeyError:\n self.skipTest(\n \"Provide a marvin config file with host\\\n credentials to run %s\" %\n self._testMethodName)\n\n logger.debug(\"ip route list table Table_eth3: %s\" % result)\n public_range_gateway = self.services[\"publiciprange\"][\"gateway\"]\n default_route_rule = \"default via \" + public_range_gateway + \" dev eth3 proto static\"\n logger.debug(\"default route result: %s\" % str(result[0]))\n self.assertEqual(\n default_route_rule,\n str(result[0]),\n \"Check default route table entry for public ip range\"\n )\n\n res = str(result)\n self.assertEqual(\n res.count(\"throw\") == 2,\n True,\n \"Check routing rules to throw rest of the traffic. Count shoule be Atleast 2 for the control and guest traffic \"\n )\n\n firewall_rule.delete(self.apiclient)\n self.cleanup.remove(firewall_rule)\n\n if (self.hypervisor.lower() == 'vmware'\n or self.hypervisor.lower() == 'hyperv'):\n result = get_process_status(\n self.apiclient.connection.mgtSvr,\n 22,\n self.apiclient.connection.user,\n self.apiclient.connection.passwd,\n router.linklocalip,\n 'ip route list table Table_eth3',\n hypervisor=self.hypervisor\n )\n else:\n hosts = list_hosts(\n self.apiclient,\n id=router.hostid,\n )\n self.assertEqual(\n isinstance(hosts, list),\n True,\n \"Check for list hosts response return valid data\"\n )\n host = hosts[0]\n host.user = self.hostConfig['username']\n host.passwd = self.hostConfig['password']\n try:\n result = get_process_status(\n host.ipaddress,\n 22,\n host.user,\n host.passwd,\n router.linklocalip,\n 'ip route list table Table_eth3'\n )\n except KeyError:\n self.skipTest(\n \"Provide a marvin config file with host\\\n credentials to run %s\" %\n self._testMethodName)\n\n logger.debug(\"ip route list table Table_eth3: %s\" % result)\n res = str(result)\n self.assertEqual(\n res.count(\"default via\"),\n 0,\n \"Check to ensure there should not be any default rule\"\n )\n\n self.assertEqual(\n res.count(\"throw\"),\n 0,\n \"Check to ensure there should not be any throw rule\"\n )",
"def populateTable(self):\n\n output_list = self.output_ports.split(', ')\n\n for i in output_list:\n values = i.split('-')\n nextHopPort = values[0]\n linkCost = values[1]\n destId = values[2]\n learnedFrom = 0 # As it was learned from ConfigFile\n row = routing_row.RoutingRow(nextHopPort, destId, linkCost, destId, learnedFrom)\n self.addToRoutingTable(row)",
"def addSNMPRoutes(self, routingtable):\n\n ipCidrRouteDest = \"\"\n ipCidrRouteNextHopAS = \"\"\n ipCidrRouteMetric1 = 0\n ipCidrRouteMetric2 = 0\n ipCidrRouteMetric3 = 0\n ipCidrRouteMetric4 = 0\n ipCidrRouteMetric5 = 0\n ipCidrRouteStatus = 0\n ipCidrRouteMask = \"\"\n ipCidrRouteTos = 0\n ipCidrRouteNextHop = \"\"\n ipCidrRouteIfIndex = 0\n ipCidrRouteType = 0\n ipCidrRouteProto = 0\n ipCidrRouteAge = 0\n ipCidrRouteInfo = 0\n\n for loop_rtIndex in routingtable:\n for ifAttr in routingtable[loop_rtIndex]:\n if ifAttr == 1:\n ipCidrRouteDest = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 10:\n ipCidrRouteNextHopAS = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 11:\n ipCidrRouteMetric1 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 12:\n ipCidrRouteMetric2 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 13:\n ipCidrRouteMetric3 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 14:\n ipCidrRouteMetric4 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 15:\n ipCidrRouteMetric5 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 16:\n ipCidrRouteStatus = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 2:\n ipCidrRouteMask = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 3:\n ipCidrRouteTos = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 4:\n ipCidrRouteNextHop = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 5:\n ipCidrRouteIfIndex = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 6:\n ipCidrRouteType = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 7:\n ipCidrRouteProto = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 8:\n ipCidrRouteAge = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 9:\n ipCidrRouteInfo = routingtable[loop_rtIndex][ifAttr]\n\n self.routingtable[loop_rtIndex] = device_routingtable( \\\n ipCidrRouteDest, ipCidrRouteNextHopAS, ipCidrRouteMetric1, \\\n ipCidrRouteMetric2, ipCidrRouteMetric3, ipCidrRouteMetric4, \\\n ipCidrRouteMetric5, ipCidrRouteStatus, ipCidrRouteMask, \\\n ipCidrRouteTos, ipCidrRouteNextHop, ipCidrRouteIfIndex, \\\n ipCidrRouteType, ipCidrRouteProto, ipCidrRouteAge, \\\n ipCidrRouteInfo)",
"def pre_interface_route_table_create(self, resource_dict):\n pass",
"def deploy(self, topology):\n print \"ABC - Deployer.deploy()\"",
"def pre_route_table_create(self, resource_dict):\n pass",
"def post_route_table_update(self, resource_id, resource_dict):\n pass",
"def post_interface_route_table_update(self, resource_id, resource_dict):\n pass",
"def deploy(self):\n self.loadseasoning()\n for key in self.config:\n print(\"Deploying \" + key + \" formula...\")\n self.cloneformula(key, self.config[key]['reponame'], \\\n self.config[key]['url'], self.config[key]['branch'])",
"def _generate_table(self):\n for i in xrange(32):\n dest = [0]\n gw = [0]\n self._table.append(\n {'destination': dest, 'gateway': gw}\n )",
"def all_routing(G, switch_nodes, table_file_name):\n\n table = OrderedDict({})\n for s in switch_nodes:\n for d in switch_nodes:\n if s != d:\n routing(G, s, d, table)\n\n with open(table_file_name, 'w') as file:\n file.write(json.dumps(table))\n\n return table",
"def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()",
"def setup_nodes(request, node_id):\n servicemap = node.servicemap\n print \"Servicemap is: \"\n pprint(servicemap)\n if node_id == 'all' or node_id in node.inv_table.keys():\n for route in servicemap:\n node_id = route['id']\n addresses = node.inv_table[node_id]\n for addr in addresses:\n # Do the initial setup\n url = 'http://' + addr + '/setup'\n node_routes_json = json.dumps(route['next_hops'])\n print \"NODE ROUTES JSON: \", str(node_routes_json)\n d = treq.post(url, data=node_routes_json)\n d.addCallback(node.ack_response)\n\n # Set attributes\n attributes = route['attr']\n for stat_key, stat_val in attributes.iteritems():\n attr_url = 'http://' + addr + '/' + str(stat_key) + '/' + str(stat_val)\n d = treq.post(attr_url)\n d.addCallback(node.ack_response)\n # TODO: Logic is a bit broken here because if we're only setting up one node_id, we still iterate through the entire list\n return \"OK\"\n else:\n print \"Skipping non-matched URL.\"",
"def all_routing_tree(G, tors, table_file_name):\n \n table = OrderedDict({})\n for s in G.nodes():\n table[s] = OrderedDict({})\n for s in tors:\n for d in tors:\n if s != d:\n routing(G, s, d, table)\n\n with open(table_file_name, 'w') as file:\n file.write(json.dumps(table))\n return table",
"def _post_deploy(self, desired_config, default_route_domain):\n LOGGER.debug(\"Perform post-deploy service tasks...\")\n self._bigip.refresh_ltm()\n\n # Delete/update nodes (no creation)\n LOGGER.debug(\"Post-process nodes.\")\n existing = self._bigip.get_nodes()\n desired = self._desired_nodes(default_route_domain)\n (update_nodes, delete_nodes) = \\\n self._get_resource_tasks(existing, desired)[1:3]\n self._update_resources(update_nodes)\n self._delete_resources(delete_nodes)\n\n # Delete extraneous virtual addresses\n LOGGER.debug(\"Remove superfluous virtual addresses.\")\n desired = desired_config.get('virtual_addresses', dict())\n (referenced, unreferenced) = (\n self._bigip.get_virtual_address_references()\n )\n delete_vaddrs = self._get_resource_tasks(unreferenced, desired)[2]\n self._delete_resources(delete_vaddrs)\n\n # Get the set of virtual addresses that are created by virtuals\n # but not in the set of desired virtual addresses.\n update_vaddrs = list()\n auto_created = self._get_resource_tasks(referenced, desired)[2]\n for vaddr in auto_created:\n if vaddr.data['enabled'] == \"no\":\n vaddr.data['enabled'] = \"yes\"\n update_vaddrs.append(vaddr)\n\n self._update_resources(update_vaddrs)"
] | [
"0.5980628",
"0.5798021",
"0.56709176",
"0.5667072",
"0.5665464",
"0.5662688",
"0.5594924",
"0.55850637",
"0.55674374",
"0.5528769",
"0.5450715",
"0.54399043",
"0.54344666",
"0.542846",
"0.54066753",
"0.5389441",
"0.53138244",
"0.53026205",
"0.53018546",
"0.5285111",
"0.5276249",
"0.5274693",
"0.5245685",
"0.52309084",
"0.5220139",
"0.5214083",
"0.51689655",
"0.5160661",
"0.51444876",
"0.514279"
] | 0.8103183 | 0 |
return port_no by match the destination IP address with the subnets. | def find_outport_by_ip(self, dst_ip):
for port_no, port in self.ports.items():
if port.gateway and dst_ip in port.gateway.ipv4_subnet:
return port_no
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_outport_by_subnet(self, subnet):\n for port_no, port in self.ports.items():\n if port.gateway and port.gateway.ipv4_subnet == subnet:\n return port_no\n return None",
"def get_port(self, dst_ip, access_table):\r\n if access_table:\r\n if isinstance(access_table.values()[0], tuple):\r\n for key in access_table.keys():\r\n if dst_ip == access_table[key][0]: # Use the IP address only, not the MAC address. (hmc)\r\n dst_port = key[1]\r\n return dst_port\r\n return None",
"def check(self, ip, port):\r\n ip = struct.unpack(\">I\", socket.inet_aton(ip))[0]\r\n if (ip & self.netmask) == self.ip:\r\n if self.port_low <= port and port <= self.port_high:\r\n return self.match\r\n return -1",
"def check_routable(self, from_subnets: List[Subnet], to_subnets: List[Subnet]) -> dict:\n # check what ports from subnets allow to any to subnets\n ports = {} # port: (to_subnet, from_subnet)\n for from_subnet in from_subnets:\n for to_subnet in to_subnets:\n # check if traffic from subnet is stopped by to subnet nacl\n if from_subnet.name in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n if 'all' in to_subnet.nacls[from_subnet.name]['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n elif 'None' in to_subnet.nacls[from_subnet.name]['in']:\n # If you don't have access to Enteprise network, you can't act on Operational Host\n # TODO refactor this hacky fix\n permission = self.check_for_enterprise_sessions()\n ports = {'all': (from_subnet.cidr, to_subnet.cidr)} if permission else {}\n return ports\n \n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls[from_subnet.name]['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n elif 'all' in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n # if all ports accepted out then use inbound rules only\n if 'all' in to_subnet.nacls['all']['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls['all']['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n else:\n # this means that traffic cannot reach move between these 2 subnets\n continue\n\n return ports",
"def _get_candidate_port(self):\n range_size = self.upper_port - self.lower_port\n if range_size == 0:\n return 0\n return random.randint(self.lower_port, self.upper_port)",
"def __find_port(self, url):\n match = self.__REGEX_PORT.search(url)\n if match:\n port_num = match.group(0).split(':')[1]\n return port_num\n return None",
"def find(self,port):\n\tif self.portlist == []:\n\t\treturn -1\n\tif isinstance(port,Port):\n\t\tport = (int(port.GetPortNbr()), port.GetPortWay())\n\tmatch = ( (int(self.portlist[0].GetPortNbr()), self.portlist[0].GetPortWay()) == port )\n\ti = 0\n\twhile ( not match and i<len(self.portlist)-1 ):\n\t\ti += 1\n\t\tmatch = ( (int(self.portlist[i].GetPortNbr()), self.portlist[i].GetPortWay()) == port )\n\tif match:\n\t\treturn i\n\telse:\n\t\treturn -1",
"def port(self) -> int:",
"def _check_ip_port_split(self):\n if self._type == \"A\":\n formatted_value = self._value.split(':')\n self._ip = formatted_value[0]\n self._port = int(formatted_value[1])",
"def parse_remote_port(self, reply):\r\n\r\n remote_port = re.findall(\"^REMOTE (TCP|UDP) ([0-9]+)$\", reply)\r\n if not len(remote_port):\r\n remote_port = 0\r\n else:\r\n remote_port = int(remote_port[0][1])\r\n if remote_port < 1 or remote_port > 65535:\r\n remote_port = 0\r\n return remote_port",
"def get_next_hop(self, dpid, dstIP):\n\n if self.nb_api is None:\n self.nb_api = api_nb.NbApi.get_instance(False)\n\n dstIP = str(dstIP)\n\n # TODO: THIS IS JUST A STUB: Use Database for this\n dpid = str(dpid)\n if self.USE_CACHE and dpid in self.cache_logical_router_by_dpid.keys():\n lrouter = self.cache_logical_router_by_dpid[dpid]\n else:\n lrouter = self.nb_api.get(l3.LogicalRouter(id=dpid))\n if dpid == '1':\n if not self.ip_matches_network(SUBNET1, dstIP) and not self.ip_matches_network(SUBNET2, dstIP):\n rport = lrouter.ports[1]\n if self.USE_CACHE:\n nexthop_router = self.cache_logical_router_by_dpid[\"2\"]\n else:\n nexthop_router = self.nb_api.get(l3.LogicalRouter(id=\"2\"))\n nh_port = nexthop_router.ports[0]\n return rport.port_no, rport.mac, nh_port.mac\n else:\n # network is directly connected to switch\n # host mac is learned during arp request.\n return None, None, None\n\n elif dpid == '2':\n if self.ip_matches_network(SUBNET3, dstIP) or self.ip_matches_network(SUBNET4, dstIP):\n rport = lrouter.ports[1] # second port\n if self.USE_CACHE:\n nexthop_router = self.cache_logical_router_by_dpid[\"3\"]\n else:\n nexthop_router = self.nb_api.get(l3.LogicalRouter(id=\"3\"))\n nh_port = nexthop_router.ports[0]\n return rport.port_no, rport.mac, nh_port.mac\n elif self.ip_matches_network(SUBNET1, dstIP) or self.ip_matches_network(SUBNET2, dstIP):\n rport = lrouter.ports[0] # first port\n if self.USE_CACHE:\n nexthop_router = self.cache_logical_router_by_dpid[\"1\"]\n else:\n nexthop_router = self.nb_api.get(l3.LogicalRouter(id=\"1\"))\n nh_port = nexthop_router.ports[1] # second port\n return rport.port_no, rport.mac, nh_port.mac\n elif dpid == '3':\n if not self.ip_matches_network(SUBNET3, dstIP) and not self.ip_matches_network(SUBNET4, dstIP):\n rport = lrouter.ports[0]\n if self.USE_CACHE:\n nexthop_router = self.cache_logical_router_by_dpid[\"2\"]\n else:\n nexthop_router = self.nb_api.get(l3.LogicalRouter(id=\"2\"))\n nh_port = nexthop_router.ports[1]\n return rport.port_no, rport.mac, nh_port.mac\n else:\n # host mac adress\n # Learned by ARP\n return None, None, None\n else:\n print \"Datapath {} not supported. Cannot return nexthop information!\"\n return None, None, None",
"def getValidPort(self, count=4):\n port = self.getAlphaNumeric(count, 'digits')\n if int(port) > 65535:\n print(\"65535\")\n return self.getAlphaNumeric(count, 'digits')",
"def Port(self) -> int:",
"def _find_open_port(worker_ip: str, local_listen_port: int, ports_to_skip: Iterable[int]) -> int:\n max_tries = 1000\n found_port = False\n for i in range(max_tries):\n out_port = local_listen_port + i\n if out_port in ports_to_skip:\n continue\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((worker_ip, out_port))\n found_port = True\n break\n # if unavailable, you'll get OSError: Address already in use\n except OSError:\n continue\n if not found_port:\n msg = \"LightGBM tried %s:%d-%d and could not create a connection. Try setting local_listen_port to a different value.\"\n raise RuntimeError(msg % (worker_ip, local_listen_port, out_port))\n return out_port",
"def get_port_by_ip(cfg_facts, ipaddr):\n if ':' in ipaddr:\n iptype = \"ipv6\"\n else:\n iptype = \"ipv4\"\n\n intf = {}\n intf.update(cfg_facts.get('INTERFACE', {}))\n if \"PORTCHANNEL_INTERFACE\" in cfg_facts:\n intf.update(cfg_facts['PORTCHANNEL_INTERFACE'])\n for a_intf in intf:\n for addrs in intf[a_intf]:\n intf_ip = addrs.split('/')\n if iptype == 'ipv6' and ':' in intf_ip[0] and intf_ip[0].lower() == ipaddr.lower():\n return a_intf\n elif iptype == 'ipv4' and ':' not in intf_ip[0] and intf_ip[0] == ipaddr:\n return a_intf\n\n raise Exception(\"Dod not find port for IP %s\" % ipaddr)",
"def _find_host_port(ports: Dict[str, Any], container_port: int) -> str:\n mappings = ports.get('{}/tcp'.format(container_port), [])\n for mapping in mappings:\n if mapping['HostIp'] == '0.0.0.0':\n return mapping['HostPort']\n else:\n raise ValueError(\n 'No HostPort found for ContainerPort={} (all port mappings: {})'\n .format(container_port, ports))",
"def get_available_portoffset(target=\"localhost\"):\n target_ip = socket.gethostbyname(target)\n for portoffset in range(10000, 61000, 1000):\n i = portoffset + 873\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((target_ip, i))\n sock.close()\n if result != 0:\n logger.debug(\"port open {0}\".format(portoffset))\n return portoffset\n return None",
"def port_number(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port_number\")",
"def find_ports(destination):\n output_ports = set()\n if isinstance(destination, collections.Iterable):\n for device in destination:\n # ports leading to device\n ports_towards_device = self.forwarding_table.get(\n device, self.ports)\n output_ports.update(ports_towards_device)\n else:\n output_ports.update(\n self.forwarding_table.get(destination, self.ports))\n return output_ports",
"def identify_remote_router(remote_address):\n global DATA\n port = remote_address[1]\n for every_router in DATA[\"neighbor\"]:\n if every_router[2] is port:\n return every_router[0]",
"def test_port_create_with_segment_subnets(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'])\n res = self.deserialize(self.fmt, response)\n # Don't allocate IPs in this case because we didn't give binding info\n self.assertEqual(0, len(res['port']['fixed_ips']))",
"def get_port_counts(ssh):\r\n cmd02='netstat -na'\r\n retry_number=3\r\n try:\r\n while True:\r\n if retry_number == 0:\r\n logger.writeLog(\"get port counts fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd02)\r\n data02=(stdout.read().decode('gbk').strip().replace(' ','').replace('\\t','').replace('\\r','').replace('\\n',''))\r\n print(data02)\r\n if data02 == \"\":\r\n retry_number -= 1\r\n logger.writeLog(\"port counts data is null\",level='error')\r\n continue\r\n else:\r\n pattern=re.compile('1.*?:22',re.S)\r\n match_list=re.findall(pattern,data02)\r\n print(match_list)\r\n port_count=len(match_list)\r\n logger.writeLog(\"get port counts success\",level='info')\r\n print(\"port connected counts:\",port_count)\r\n return port_count\r\n break\r\n except:\r\n logger.writeLog(\"get port counts error\",level='error')\r\n return None",
"def find_unused_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n sock.bind(('127.0.0.1', 0))\n sock.listen(socket.SOMAXCONN)\n ipaddr, port = sock.getsockname()\n sock.close()\n return port",
"def _getusableport():\r\n port_found = False\r\n port_min = 63000\r\n port_max = 63150\r\n port_iter = port_min\r\n local_addr = getmyip()\r\n\r\n while not port_found:\r\n if port_iter > port_max:\r\n raise Exception(\"Network restriction error! Unable to find a free port!\")\r\n try:\r\n udp_test_socket = recvmess(local_addr, port_iter, _dummy_function)\r\n stopcomm(udp_test_socket)\r\n port_found = True\r\n except Exception, e:\r\n port_iter += 1\r\n\r\n return port_iter",
"def xtest_find_port(self):\n line, head = self._get_line()\n p1, p2, p3, p4 = self.box1.ports()\n\n head.pos = 110, 50\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p1, port)\n\n head.pos = 140, 60\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p2, port)\n\n head.pos = 110, 95\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p3, port)\n\n head.pos = 100, 55\n port = self.tool.find_port(line, head, self.box1)\n self.assertEquals(p4, port)",
"def _validate_port_range(self) -> Tuple[int, int]:\n\n lower_port = upper_port = 0\n port_range = self.port_range\n try:\n port_ranges = port_range.split(\"..\")\n\n lower_port = int(port_ranges[0])\n upper_port = int(port_ranges[1])\n\n port_range_size = upper_port - lower_port\n if port_range_size != 0:\n if port_range_size < min_port_range_size:\n self.log_and_raise(ValueError(f\"Port range validation failed for range: '{port_range}'. \"\n f\"Range size must be at least {min_port_range_size} as specified by \"\n \"env EG_MIN_PORT_RANGE_SIZE\"))\n\n # According to RFC 793, port is a 16-bit unsigned int. Which means the port\n # numbers must be in the range (0, 65535). However, within that range,\n # ports 0 - 1023 are called \"well-known ports\" and are typically reserved for\n # specific purposes. For example, 0 is reserved for random port assignment,\n # 80 is used for HTTP, 443 for TLS/SSL, 25 for SMTP, etc. But, there is\n # flexibility as one can choose any port with the aforementioned protocols.\n # Ports 1024 - 49151 are called \"user or registered ports\" that are bound to\n # services running on the server listening to client connections. And, ports\n # 49152 - 65535 are called \"dynamic or ephemeral ports\". A TCP connection\n # has two endpoints. Each endpoint consists of an IP address and a port number.\n # And, each connection is made up of a 4-tuple consisting of -- client-IP,\n # client-port, server-IP, and server-port. A service runs on a server with a\n # specific IP and is bound to a specific \"user or registered port\" that is\n # advertised for clients to connect. So, when a client connects to a service\n # running on a server, three out of 4-tuple - client-IP, client-port, server-IP -\n # are already known. To be able to serve multiple clients concurrently, the\n # server's IP stack assigns an ephemeral port for the connection to complete\n # the 4-tuple.\n #\n # In case of JEG, we will accept ports in the range 1024 - 65535 as these days\n # admins use dedicated hosts for individual services.\n def validate_port(port: int) -> None:\n if port < 1024 or port > 65535:\n self.log_and_raise(ValueError(f\"Invalid port range '{port_range}' specified. \"\n \"Range for valid port numbers is (1024, 65535).\"))\n validate_port(lower_port)\n validate_port(upper_port)\n except IndexError as ie:\n self.log_and_raise(RuntimeError(f\"Port range validation failed for range: '{port_range}'.\"), chained=ie)\n\n return lower_port, upper_port",
"def create_port_forward_rule(self, ipaddressid, protocol, virtualmachineid,\n privateport, privateendport,\n publicport, publicendport): \n params = {'command':'createPortForwardingRule',\n 'ipaddressid':ipaddressid,\n 'protocol':protocol,\n 'privateport':privateport,\n 'privateendport':privateendport,\n 'publicport':publicport,\n 'publicendport':publicendport,\n 'virtualmachineid':virtualmachineid,\n 'openfirewall':False} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createportforwardingruleresponse']['jobid']\n self.logger.debug('Start job - createPortForwardingRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)",
"def find_port(addr, user):\n home = pwd.getpwuid(os.getuid()).pw_dir\n for name in os.listdir('%s/.ssh/' % home):\n if name.startswith('unixpipe_%s@%s_' % (user, addr,)):\n return int(name.split('_')[2])",
"def destination_port_range(self) -> Optional[pulumi.Input['TrafficMirrorFilterRuleDestinationPortRangeArgs']]:\n return pulumi.get(self, \"destination_port_range\")",
"def destination_port_range(self) -> Optional[pulumi.Input['TrafficMirrorFilterRuleDestinationPortRangeArgs']]:\n return pulumi.get(self, \"destination_port_range\")"
] | [
"0.6941393",
"0.65439576",
"0.6128386",
"0.61070126",
"0.60062206",
"0.59506077",
"0.5934577",
"0.5844289",
"0.58083206",
"0.58003646",
"0.5789853",
"0.5707216",
"0.56426615",
"0.564023",
"0.55956894",
"0.5579187",
"0.5557291",
"0.5545042",
"0.55236137",
"0.5491499",
"0.5473711",
"0.5384334",
"0.5377425",
"0.5374015",
"0.53348666",
"0.53316975",
"0.5309259",
"0.5295351",
"0.5294963",
"0.5294963"
] | 0.6856934 | 1 |
Delete fn file if it exists | def delete_file(input_fn):
if os.path.isfile(input_fn):
os.remove(input_fn) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def rm_file_if_exists(f):\n with contextlib.suppress(FileNotFoundError):\n os.remove(f)",
"def delete_file(filename):\n if os.path.isfile(filename):\n return os.remove(filename)",
"def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def _delete(filename):\n return os.remove(filename)",
"def filedelete(fname):\n\n if os.path.exists(fname):\n try:\n if os.path.isdir(fname):\n # delete folder\n shutil.rmtree(fname)\n return\n else:\n # delete file\n os.remove(fname)\n return\n except:\n return\n else:\n return",
"def remove_file_if_exists(filename):\n try:\n os.remove(filename)\n except OSError as exc:\n if exc.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise",
"def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def rm_file(filename):\n try:\n os.unlink(filename)\n except FileNotFoundError:\n pass",
"def rm_file(file_):\n Path(file_).unlink(missing_ok=True)",
"def remove_file(fpath):\n logger.debug(\"remove file [%s]\" % fpath)\n try:\n os.unlink(fpath)\n return 0\n except:\n logger.info(\"can't delete [%s]\" % fpath)\n return 1",
"def delete(self, filename):\n pass",
"def _removeFile(self, filename):\n try:\n #delete the output file\n os.remove(filename)\n except:\n #print (\"Failed to remove the file: \" + filename)\n pass",
"def safe_delete(filename):\r\n try:\r\n os.unlink(filename)\r\n except OSError as e:\r\n if e.errno != errno.ENOENT:\r\n raise",
"def delete_file_if_exists(self, file_path):\r\n # noinspection PyBroadException,PyPep8\r\n try:\r\n os.remove(file_path)\r\n except:\r\n pass",
"def remove_file(fp):\n print(f\"{fp} is duplicated, removing.\")\n rm_result = fp.unlink()\n\n return rm_result",
"def remove_file(self):\n if self.file_exists:\n os.remove(self.file_name)",
"def delete_file(path):\n return files.delete_file(path)",
"def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)",
"def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass",
"def safe_delete(self, filename):\n try:\n os.remove(filename)\n except OSError:\n pass",
"def deleteSingleFile(filename):\n os.popen('rm {}'.format(filename))",
"def remove_filepath_if_exists(filepath):\n if os.path.isfile(filepath):\n log.info(f'\\tdeleting existing file: {filepath}')\n os.remove(filepath)",
"def cleanup(fname):\n if os.path.isfile(fname):\n try:\n os.remove(fname)\n print \"Cleaned up\", fname\n except OSError:\n print \"Failed to clean up\", fname",
"def delete_temp_file(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e",
"def check_and_delete(file_loc):\n if os.path.isfile(file_loc):\n os.remove(file_loc)",
"def remove( filename ):\n try:\n os.remove( filename )\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e",
"def remove_file(filename, verbose=True):\r\n if verbose:\r\n LOG.info('Deleting file %s' % os.path.relpath(filename, BASE_DIR))\r\n if not os.path.exists(filename):\r\n LOG.warn(\"File does not exist: %s\" % os.path.relpath(filename, BASE_DIR))\r\n else:\r\n os.remove(filename)"
] | [
"0.77830786",
"0.7718093",
"0.7718093",
"0.7579628",
"0.75730056",
"0.75378186",
"0.7521336",
"0.74646246",
"0.73274463",
"0.7292511",
"0.7247649",
"0.72340125",
"0.7233349",
"0.71774656",
"0.7140424",
"0.71367246",
"0.713443",
"0.7099989",
"0.7091202",
"0.70755124",
"0.7072506",
"0.7067576",
"0.7067535",
"0.70652676",
"0.7041645",
"0.7027996",
"0.6997032",
"0.69751334",
"0.6970521",
"0.6969895"
] | 0.81810206 | 0 |
Initializes this loss scale optimizer. | def __init__(self, optimizer, loss_scale):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('"optimizer" must be an instance of OptimizerV2, but '
'got: %s' % optimizer)
if optimizer.clipnorm is not None:
raise ValueError('LossScaleOptimizer does not support wrapping '
'optimizers with a clipnorm. Optimizer %s has clipnorm '
'%s' % (optimizer, optimizer.clipnorm))
if optimizer.clipvalue is not None:
raise ValueError('LossScaleOptimizer does not support wrapping '
'optimizers with a clipvalue. Optimizer %s has '
'clipvalue %s' % (optimizer, optimizer.clipvalue))
self.clipnorm = None
self.clipvalue = None
self._optimizer = optimizer
self._loss_scale = keras_loss_scale_module.get(loss_scale)
if self._loss_scale is None:
raise ValueError('loss_scale cannot be None.')
for weight in loss_scale_module.get_loss_scale_weights(self._loss_scale):
# We cannot call `track_variable` in the LossScale class itself, because a
# file outside of Keras cannot depend on a Keras file. Calling it here
# instead is OK, because a variable only needs to be tracked if used with
# a Keras class, and the only way to use LossScale with a Keras class is
# through the LossScaleOptimizer.
backend.track_variable(weight)
self._track_trackable(self._optimizer, 'base_optimizer')
self._track_trackable(self._loss_scale, 'loss_scale')
# Needed because the superclass's __getattribute__ checks this.
self._hyper = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_loss_and_optimizer(self):\n self.criterion = CrossEntropyLoss()\n self.optimizer = Adam(self.model.parameters(), lr=self.hyper_parameters['lr'])",
"def __init__(self, **kwargs):\n\n super().__init__(**kwargs)\n self.criterion = self._initLoss()",
"def __init__(self, **kwargs):\n\n super().__init__(**kwargs)\n self.criterion = self._initLoss()",
"def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)",
"def initialize_optimization(self):\n\n if self.FLAGS.optimizer == \"Adam\" :\n self.solver = tf.train.AdamOptimizer(\n learning_rate = self.learning_rate,\n beta1 = self.FLAGS.beta1,\n beta2 = self.FLAGS.beta2)\n else:\n print(\"ERROR: Cannot handle optimizer type {}!!!\".format(self.FLAGS.optimizer))\n raise RuntimeError\n \n # batch normalization in tensorflow requires this extra dependency\n # this is required to update the moving mean and moving variance variables\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n self.update = self.solver.minimize(self.loss, global_step=self.global_step)",
"def __init__(self, initializer, scale=1):\n self.scale = normalize_tuple(scale, 2, \"scale\")\n self.initializer = initializer",
"def __init__(self, scale: Union[float, torch.Tensor]):\n\n super().__init__()\n self._scale = scale",
"def __init__(self, scales, downscale, weights=None, train_loss = 'smoothL1', test_loss='L1', mask=False):\n super(MultiScaleLoss, self).__init__()\n self.downscale = downscale\n self.mask = mask\n self.weights = torch.Tensor(scales).fill_(1).cuda() if weights is None else torch.Tensor(weights).cuda()\n assert(len(self.weights) == scales)\n if train_loss == 'smoothL1':\n self.train_loss = smoothL1\n elif train_loss == 'L1':\n self.train_loss = EPE\n else:\n raise NotImplementedError\n if type(test_loss) is str:\n\n if test_loss == 'L1':\n self.test_loss = nn.L1Loss()\n else:\n raise NotImplementedError\n else:\n self.test_loss = test_loss\n self.multiScales = [nn.AvgPool2d(self.downscale*(2**i), self.downscale*(2**i)) for i in range(scales)]\n\n print('self.multiScales: ', self.multiScales, ' self.downscale: ', self.downscale)",
"def __init__(self):\n self.layers = []\n self.best_loss = None",
"def __init__(self, **kwargs):\n super(VitisGlobalAveragePooling2D, self).__init__(**kwargs)\n self.rescale_factor = None",
"def set_loss_scale_status(self, loss_scale_number, init_loss_scale):\n self.loss_scale_number = loss_scale_number\n inner_list = [P._DynamicLossScale(layer=x) for x in range(loss_scale_number + 1)] # pylint: disable=W0212\n self.layer_loss_scale = tuple(inner_list)\n self.dynamic_loss_scale = ParameterTuple(Parameter(Tensor(1, mstype.float32),\n name='layer_loss_scale_{}'.format(x), requires_grad=False)\n for x in range(loss_scale_number + 2))\n if isinstance(init_loss_scale, list):\n for i, value in enumerate(init_loss_scale):\n self.dynamic_loss_scale[i + 1].set_data(value)\n else:\n for i in range(self.loss_scale_number):\n self.dynamic_loss_scale[i + 1].set_data(init_loss_scale)",
"def __init__(self) -> None:\n self.name = \"minmaxScaler\"\n self.min = 0\n self.max = 0",
"def __init__(self):\n super(SpectralConvergenceLoss, self).__init__()",
"def __init__(self,scale):\n self.scale = scale",
"def _initialize_trainer(self):\n self.cost = mse(0., 0.)\n for task_id in self.task_ids.keys():\n self.cost += self.model.get_layer(task_id + '-loss')\n\n opt = Optimizer(self.cost)\n self.optimizer = opt.get_adagrad(self.learning_rate)",
"def __init__(self,\n dtype=tf.float32,\n validate_args=False,\n name='conditional_scale'):\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n super(ConditionalScale, self).__init__(\n forward_min_event_ndims=0,\n is_constant_jacobian=True,\n validate_args=validate_args,\n dtype=dtype,\n parameters=parameters,\n name=name)",
"def __init__(self, scale=False):\n self.scale = scale",
"def initialize(self, problem, problem_data):\n self.problem_data = problem_data\n\n # Generate scaling functions for states, costates\n # constants, constraints, lagrange multipliers\n\n # Convert units to symbols\n self.units_sym = symbols(list(self.units))\n\n # Growing list TODO: Put inside utils\n # TODO: Automate the following sections\n\n # Scaling functions for constants\n # self.scale_func['const'] = {str(const): self.create_scale_fn(const.unit)\n # for const in problem.constants()}\n self.scale_func['const'] = {str(const): lambdify(self.units_sym,sympify2(const.unit))\n for const in problem.constants()}\n\n # Cost function used for scaling costates\n cost_used = [key for (key,val) in problem.cost.items() if val.expr is not '0']\n if len(cost_used) < 1:\n raise ValueError('At least one cost function must be specified as nonzero!')\n cost_unit = problem.cost[cost_used[0]].unit\n\n # Scaling functions for states & costates\n self.scale_func['states'] = {}\n self.scale_func['states'] = {str(state): self.create_scale_fn(state.unit)\n for state in problem.states()}\n self.scale_func['states'].update({ state.make_costate():\n self.create_scale_fn('('+cost_unit+')/('+state.unit+')')\n for state in problem.states()})\n\n # Scaling function for the independent variable\n # TODO: Fix hardcoding\n # self.scale_func['independent_var'] = lambdify(units_sym,sympify2(problem.indep_var().unit))\n self.scale_func['states']['tf'] = self.create_scale_fn(problem.indep_var().unit)\n\n self.scale_func['initial'] = self.scale_func['states']\n self.scale_func['terminal'] = self.scale_func['states']\n\n # Scaling functions for constraint multipliers and other parameters\n self.scale_func['parameters'] = {}\n indices = {}\n for c in problem.constraints():\n if c.type not in indices:\n indices[c.type] = 1 # initialize multiplier index\n\n mul_var = c.make_multiplier(indices[c.type])\n mul_unit = '('+cost_unit+')/('+c.unit+')'\n self.scale_func['parameters'][mul_var] = self.create_scale_fn(mul_unit)\n indices[c.type] += 1 # increment multiplier index",
"def init(self, constrain=True, rescale=True, **kwargs):\n # pylint: disable=W0201\n self.constrain = constrain\n self.rescale = rescale\n self.y_min = None\n self.y_max = None\n self.yhat_mean = None\n self.yhat_sd = None\n self.y_mean = None\n self.y_sd = None\n orig_init(self, **kwargs)",
"def __init__(self, scale, **kwargs):\n super(NormalNoise, self).__init__(**kwargs)\n self._scale = scale",
"def initialize(self):\n self.gc1.reset_parameters()\n self.gc2.reset_parameters()\n\n for s in self.scores:\n stdv = 1. / math.sqrt(s.size(1))\n s.data.uniform_(-stdv, stdv)\n for b in self.bias:\n # fill in b with postive value to make\n # score s closer to 1 at the beginning\n b.data.fill_(self.bias_init)\n\n for Dk in self.D_k:\n stdv = 1. / math.sqrt(Dk.size(1))\n Dk.data.uniform_(-stdv, stdv)\n\n for b in self.D_bias:\n b.data.fill_(0)",
"def __init__(self, scales=(2048, 1024, 512, 256, 128, 64)):\n super(MultiscaleSpectralLoss, self).__init__()\n self.scales = scales\n self.stfts = nn.ModuleList([\n Spectrogram(\n n_fft=scale,\n hop_length=(scale//4))\n for scale in scales\n ])\n self.l1s = nn.ModuleList([\n nn.L1Loss(reduction='mean')\n for scale in scales\n ])",
"def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))",
"def __init__(self, input_dim=600+9, output_dim=1*3, dropout_prob=0., scale=3):\n super(F0_RNN_Scaled, self).__init__(input_dim=input_dim, output_dim=output_dim, dropout_prob=dropout_prob)\n self.scale = scale",
"def _initLoss(self):\n\n return torch.nn.MSELoss()",
"def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True",
"def initial_state(self, parameters = None):\n if parameters is None:\n parameters = self._get_static_parameters_or_die()\n scale_linear_op = self._initial_dist_scale_ctor(\n parameters.get('initial_dist_scale'))\n return Value(\n state=ed.MultivariateNormalLinearOperator(\n loc=0.0, scale=scale_linear_op))",
"def _initLoss(self):\n\n pass",
"def initialize(self, optimizer):\n limit = 1 / math.sqrt(self.input_shape[0])\n self.W = np.random.uniform(-limit, limit, (self.input_shape[0], self.units)) # TODO check limits\n self.b = np.zeros((1, self.units))\n\n self.W_optimizer = copy.copy(optimizer)\n self.b_optimizer = copy.copy(optimizer)",
"def __init__(self, args: Namespace):\n self.loss_key = args.loss_key\n super().__init__(args)"
] | [
"0.7157795",
"0.6916654",
"0.6916654",
"0.68659997",
"0.6758468",
"0.67563814",
"0.6621184",
"0.66053504",
"0.660172",
"0.6601449",
"0.6474571",
"0.6467551",
"0.64525384",
"0.6447518",
"0.64248365",
"0.641149",
"0.63619477",
"0.6337405",
"0.6334744",
"0.632274",
"0.63118184",
"0.63087827",
"0.6286836",
"0.62650704",
"0.62557334",
"0.624711",
"0.6236755",
"0.6221989",
"0.6220243",
"0.6214462"
] | 0.71536416 | 1 |
The `LossScale` instance associated with this optimizer. | def loss_scale(self):
return self._loss_scale | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_loss_scale(self):\n return self._loss_scale",
"def get_scaled_loss(self, loss):\n loss_scale = self._loss_scale()\n if callable(loss):\n def new_loss():\n loss_val = loss()\n return loss_val * math_ops.cast(loss_scale, loss_val.dtype)\n return new_loss\n else:\n return loss * math_ops.cast(loss_scale, loss.dtype)",
"def scale(self):\n return self._scale",
"def scaling(self):\n return self.__scaling",
"def scaling(self):\n return self._scaling",
"def scaling(self):\n return self._scaling",
"def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")",
"def scale(self):\n return self.distribution.scale",
"def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")",
"def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale",
"def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")",
"def get_scale_op(self):\n\t\treturn self.variables.get('scale')",
"def getScale(self):\n return self.factor**self.turnOn",
"def scale(self):\n return self._gev_bijector.scale",
"def GetScale(self):\n ...",
"def scale(self):\n return self._a",
"def scale_parameter(self):\n return self._scale_parameter",
"def scale(self) -> Tuple[float, float]:\n return self._scale",
"def getScale(self):\n return _libsbml.Unit_getScale(self)",
"def __init__(self, optimizer, loss_scale):\n if not isinstance(optimizer, optimizer_v2.OptimizerV2):\n raise ValueError('\"optimizer\" must be an instance of OptimizerV2, but '\n 'got: %s' % optimizer)\n if optimizer.clipnorm is not None:\n raise ValueError('LossScaleOptimizer does not support wrapping '\n 'optimizers with a clipnorm. Optimizer %s has clipnorm '\n '%s' % (optimizer, optimizer.clipnorm))\n\n if optimizer.clipvalue is not None:\n raise ValueError('LossScaleOptimizer does not support wrapping '\n 'optimizers with a clipvalue. Optimizer %s has '\n 'clipvalue %s' % (optimizer, optimizer.clipvalue))\n\n self.clipnorm = None\n self.clipvalue = None\n\n self._optimizer = optimizer\n self._loss_scale = keras_loss_scale_module.get(loss_scale)\n if self._loss_scale is None:\n raise ValueError('loss_scale cannot be None.')\n for weight in loss_scale_module.get_loss_scale_weights(self._loss_scale):\n # We cannot call `track_variable` in the LossScale class itself, because a\n # file outside of Keras cannot depend on a Keras file. Calling it here\n # instead is OK, because a variable only needs to be tracked if used with\n # a Keras class, and the only way to use LossScale with a Keras class is\n # through the LossScaleOptimizer.\n backend.track_variable(weight)\n self._track_trackable(self._optimizer, 'base_optimizer')\n self._track_trackable(self._loss_scale, 'loss_scale')\n\n # Needed because the superclass's __getattribute__ checks this.\n self._hyper = {}",
"def scaling_object(self):\n return self.__scaling_object",
"def loss(self) -> KernelLoss:\n return self._loss",
"def scaling_adjustment(self):\n return self._scaling_adjustment",
"def get_loss(self):\n raise NotImplementedError",
"def update_loss_scale_status(self, layer, update_ratio):\n layer = layer + 1\n new_loss_scale = self.dynamic_loss_scale[layer] * update_ratio\n P.Assign()(self.dynamic_loss_scale[layer], new_loss_scale)\n return new_loss_scale",
"def loss(self):\n return self._loss",
"def scale_value(self):\n return self._scale_value[2]",
"def scale(self):\n return self.scale_factor / CONSTANTS.AU",
"def overlay_scale(self):\n return self._overlay_scale",
"def loss(self):\n return la.norm(self.resids) / self.normX"
] | [
"0.793493",
"0.73162746",
"0.68573815",
"0.6839494",
"0.67812747",
"0.67812747",
"0.67776465",
"0.6770828",
"0.67481637",
"0.6745338",
"0.6738262",
"0.67050755",
"0.6683727",
"0.6681704",
"0.667356",
"0.66186064",
"0.65862453",
"0.6572908",
"0.6491323",
"0.64412814",
"0.63570654",
"0.6313236",
"0.63082516",
"0.62915784",
"0.6269884",
"0.622585",
"0.6219371",
"0.6168536",
"0.6163269",
"0.61543876"
] | 0.8009351 | 0 |
Scales the loss by the loss scale. This method is only needed if you compute gradients manually, e.g. with `tf.GradientTape`. In that case, call this method to scale the loss before passing the loss to `tf.GradientTape`. If you use `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss scaling is automatically applied and this method is unneeded. If this method is called, `get_unscaled_gradients` should also be called. See the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for an example. | def get_scaled_loss(self, loss):
loss_scale = self._loss_scale()
if callable(loss):
def new_loss():
loss_val = loss()
return loss_val * math_ops.cast(loss_scale, loss_val.dtype)
return new_loss
else:
return loss * math_ops.cast(loss_scale, loss.dtype) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loss_scale(self):\n return self._loss_scale",
"def get_loss_scale(self):\n return self._loss_scale",
"def supervised_cost_scale(\n scale, loss_supervised, output_noise_labelled, labelled_target\n):\n cost_supervised = loss_supervised.forward(output_noise_labelled, labelled_target)\n\n cost_supervised *= scale\n return cost_supervised",
"def backprop(self, loss: torch.FloatTensor):\n if self.scaler is not None:\n self.scaler.scale(loss).backward()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()",
"def mmd_gradient_scale(self, x, s_x):\r\n jaco_sfn = jacobian_squared_frobenius_norm(s_x, x, do_summary=self.do_summary)\r\n dis_loss_scale = 1.0 / (self.penalty_weight * tf.reduce_mean(jaco_sfn) + 1.0)\r\n\r\n return dis_loss_scale",
"def gradients_with_loss_scaling(loss, loss_scale):\n\n grads = [(grad[0] / loss_scale,grad[1]) for grad in\n tf.train.AdamOptimizer(learning_rate=learning_rate,epsilon=1e-4).\n compute_gradients(loss * loss_scale,colocate_gradients_with_ops=True)]\n return grads",
"def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")",
"def update_loss_scale_status(self, layer, update_ratio):\n layer = layer + 1\n new_loss_scale = self.dynamic_loss_scale[layer] * update_ratio\n P.Assign()(self.dynamic_loss_scale[layer], new_loss_scale)\n return new_loss_scale",
"def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)",
"def scale(step_size: float) -> GradientTransformation:\n return _scale(step_size=step_size, already_flattened=False)",
"def Scale(*args, **kwargs):\n return _gdi_.GraphicsContext_Scale(*args, **kwargs)",
"def grad_scale(x, multiplier):\n return GradScale(multiplier)(x)",
"def calc_scale(alpha, targets, preds, gamma):\n return alpha * tf.pow(tf.abs(targets - tf.nn.sigmoid(preds)), gamma)",
"def update(self, new_scale=None):\n if not self._enabled:\n return\n\n _scale, _growth_tracker = self._check_scale_growth_tracker(\"update\")\n\n if new_scale is not None:\n # Accept a new user-defined scale.\n if isinstance(new_scale, float):\n self._scale.fill_(new_scale) # type: ignore[union-attr]\n else:\n reason = \"new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False.\"\n assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]\n assert new_scale.numel() == 1, reason\n assert new_scale.requires_grad is False, reason\n self._scale.copy_(new_scale) # type: ignore[union-attr]\n else:\n # Consume shared inf/nan data collected from optimizers to update the scale.\n # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.\n found_infs = [\n found_inf.to(device=_scale.device, non_blocking=True)\n for state in self._per_optimizer_states.values()\n for found_inf in state[\"found_inf_per_device\"].values()\n ]\n\n assert len(found_infs) > 0, \"No inf checks were recorded prior to update.\"\n\n found_inf_combined = found_infs[0]\n\n # Update across all model parallel instances.\n torch.distributed.all_reduce(\n found_inf_combined, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()\n )\n\n if len(found_infs) > 1:\n for i in range(1, len(found_infs)):\n found_inf = found_infs[i]\n # Update across all model parallel instances.\n torch.distributed.all_reduce(\n found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()\n )\n found_inf_combined += found_inf\n\n if found_inf_combined > 0:\n self._hysteresis_tracker -= 1\n if self._hysteresis_tracker <= 0:\n # When hysteresis becomes zero, follow the native grad scale update rule.\n # Increase scale and reset growth tracker\n torch._amp_update_scale_(\n _scale,\n _growth_tracker,\n found_inf_combined,\n self._growth_factor,\n self._backoff_factor,\n self._growth_interval,\n )\n else:\n # Only reset the growth tracker when hysteresis is larger than zero\n _growth_tracker.fill_(0.0)\n else:\n # When no inf found, follow the native grad scale update rule.\n # Increment growth_tracker, update scale when growth tracker reaches the interval, and\n # reset the hysteresis tracker.\n torch._amp_update_scale_(\n _scale,\n _growth_tracker,\n found_inf_combined,\n self._growth_factor,\n self._backoff_factor,\n self._growth_interval,\n )\n self._hysteresis_tracker = self.hysteresis\n\n # To prepare for next iteration, clear the data collected from optimizers this iteration.\n self._per_optimizer_states = defaultdict(torch.cuda.amp.grad_scaler._refresh_per_optimizer_state)",
"def patch_namespaced_scale_scale(self, body, namespace, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.patch_namespaced_scale_scale_with_http_info(body, namespace, name, **kwargs)\n else:\n (data) = self.patch_namespaced_scale_scale_with_http_info(body, namespace, name, **kwargs)\n return data",
"def scale_dimension(dim, scale):\n if isinstance(dim, tf.Tensor):\n return tf.cast((tf.to_float(dim) - 1.0) * scale + 1.0, dtype=tf.int32)\n else:\n return int((float(dim) - 1.0) * scale + 1.0)",
"def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")",
"def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)",
"def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...",
"def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale",
"def get_scale_op(self):\n\t\treturn self.variables.get('scale')",
"def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)",
"def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def _scale(x, axis=None):\n x = _remove_baseline(x, axis=axis)\n x /= np.std(x, ddof=1, axis=axis, keepdims=True)\n return x",
"def scale(self, state, action):\n control_action = action[..., : self._true_dim_action[0]]\n scale = super().scale(state, control_action)\n\n return scale",
"def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res",
"def scale(self, data: np.ndarray):\n if self.scale_type == \"min_max\":\n scaled_data = (data - self.predictor_min) / (\n self.predictor_max - self.predictor_mean\n )\n elif self.scale_type == \"normalize\":\n scaled_data = (data - self.predictor_mean) / (\n self.predictor_max - self.predictor_min\n )\n elif self.scale_type == \"standardize\":\n scaled_data = (data - self.predictor_mean) / self.predictor_std\n elif self.scale_type == \"scale\":\n scaled_data = data - self.predictor_mean\n else:\n scaled_data = data\n return scaled_data",
"def update_axis_scale(self, scale, axis='left'):\n self.plt.getAxis(axis).setScale(scale=scale)",
"def scale(self):\n return self._scale",
"def scale(self) -> Tuple[float, float]:\n return self._scale"
] | [
"0.6739694",
"0.6595755",
"0.65169394",
"0.6198273",
"0.61731267",
"0.59716827",
"0.5964872",
"0.58355457",
"0.58057356",
"0.5729986",
"0.57219154",
"0.57206905",
"0.5635103",
"0.5605027",
"0.55879354",
"0.55739707",
"0.55709136",
"0.552222",
"0.55163234",
"0.5515871",
"0.5500191",
"0.54933894",
"0.54815346",
"0.54702187",
"0.54167664",
"0.5404479",
"0.5396361",
"0.53923494",
"0.5381769",
"0.53749305"
] | 0.7564824 | 0 |
Multiply a (possibly sparse) gradient by the given scale factor. | def _multiply_gradient(gradient, scale):
scale = math_ops.cast(scale, gradient.dtype)
if isinstance(gradient, ops.IndexedSlices):
return ops.IndexedSlices(
gradient.values * scale,
gradient.indices,
dense_shape=gradient.dense_shape)
else:
return gradient * scale | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grad_scale(x, multiplier):\n return GradScale(multiplier)(x)",
"def scaling(mat, factor):\n\treturn mat / (mat + factor)",
"def scale(self, scale_factor: float) -> None:\n self.tensor[:, :3] *= scale_factor",
"def scale(self, factor):\n self.b = factor * self.b",
"def supervised_cost_scale(\n scale, loss_supervised, output_noise_labelled, labelled_target\n):\n cost_supervised = loss_supervised.forward(output_noise_labelled, labelled_target)\n\n cost_supervised *= scale\n return cost_supervised",
"def mmd_gradient_scale(self, x, s_x):\r\n jaco_sfn = jacobian_squared_frobenius_norm(s_x, x, do_summary=self.do_summary)\r\n dis_loss_scale = 1.0 / (self.penalty_weight * tf.reduce_mean(jaco_sfn) + 1.0)\r\n\r\n return dis_loss_scale",
"def scale(self, factor):\n return BSplineFunc(self.kvs, self.coeffs * factor)",
"def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def scale(self, factor):\n new = self.copy()\n new.d.clear()\n\n for val, prob in self.items():\n new.set(val * factor, prob)\n return new",
"def scale(self, scale):\n\t\tself._current_score *= scale",
"def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show",
"def scale(step_size: float) -> GradientTransformation:\n return _scale(step_size=step_size, already_flattened=False)",
"def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);",
"def scale(self,factor):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y*factor for y in self.coord[x]])\n return self",
"def setScaling(factor=1.0):\n dislin.sclfac(factor)",
"def scale(self, scale_x: float, scale_y: float) -> None:\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y",
"def scale(inp, ab):\n\n return inp * ab[0] + ab[1]",
"def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])",
"def scale_operator(L, lmax, scale=1):\n I = sparse.identity(L.shape[0], format=L.format, dtype=L.dtype)\n L *= 2 * scale / lmax\n L -= I\n return L",
"def scale(self, sval: complex) -> None:\n self.coeff = self.coeff.astype(numpy.complex128) * sval",
"def scale(self, scale_factor: Union[float, Tuple[float, float]]):\n\n if isinstance(scale_factor, float):\n self.x *= scale_factor\n self.y *= scale_factor\n self.width *= scale_factor\n self.height *= scale_factor\n\n elif isinstance(scale_factor, tuple):\n scale_x, scale_y = scale_factor\n self.x *= scale_x\n self.y *= scale_y\n self.width *= scale_x\n self.height *= scale_y",
"def scale(inp, ab):\n\n return inp * ab[0] + ab[1]\n # pass",
"def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)",
"def scale(data, factor):\n\n if np.ndim(data) != 2: # only process one IV dataset at a time\n raise IndexError('Incorrect data format')\n\n if np.size(data, 0) < np.size(data, 1):\n data = data.T # make sure data is in columns\n\n # match data types for float multiplication/division\n new_data = data.copy().astype(float)\n\n new_data[:, 1] *= factor\n\n return new_data",
"def scale(self,\n factor_x: Scalar,\n factor_y: Optional[Scalar] = None) -> 'Multipoint[Scalar]':\n return self._context.scale_multipoint(\n self, factor_x, factor_x if factor_y is None else factor_y\n )",
"def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self",
"def gradient(self):\n return ScalingOperator(self.domain, 2.0)",
"def calc_scale(alpha, targets, preds, gamma):\n return alpha * tf.pow(tf.abs(targets - tf.nn.sigmoid(preds)), gamma)",
"def scale(requestContext, seriesList, factor):\n for series in seriesList:\n series.name = \"scale(%s,%g)\" % (series.name,float(factor))\n series.pathExpression = series.name\n for i,value in enumerate(series):\n series[i] = safeMul(value,factor)\n return seriesList",
"def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)"
] | [
"0.6945256",
"0.6364424",
"0.6349341",
"0.63197815",
"0.62594366",
"0.6207935",
"0.6151295",
"0.60449195",
"0.602039",
"0.5922737",
"0.5878117",
"0.5822671",
"0.580783",
"0.5759809",
"0.5734907",
"0.5721659",
"0.57112604",
"0.5709619",
"0.5707256",
"0.56965876",
"0.56932414",
"0.5672784",
"0.56550884",
"0.5640909",
"0.56372553",
"0.56306183",
"0.5627024",
"0.56244713",
"0.55747074",
"0.55621654"
] | 0.80634993 | 0 |
Copy energy model file. | def copy_model_file(src_dir, dst_dir, file_name, file_ext=None):
file_ext = 'osm' if file_ext is None else file_ext
src_file = src_dir.joinpath('in.{}'.format(file_ext))
dst_file = dst_dir.joinpath('{}.{}'.format(file_name, file_ext))
try:
shutil.copyfile(src_file, dst_file)
except FileNotFoundError:
print('''in.{} doesn't exist.'''.format(file_ext)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return",
"def applyEmevd(self, emevdName):\n emevdFileName = emevdName + '.emevd'\n emevdPathName = 'PTDE/'\n if (self.useDCX):\n emevdFileName = emevdName + '.emevd.dcx'\n emevdPathName = 'REMASTER/'\n\n self.createBackup('event/' + emevdFileName)\n \n if os.path.isfile('enemyRandomizerData/emevd/' + emevdPathName + emevdFileName):\n with open('event/' + emevdFileName, 'wb') as oldf:\n with open('enemyRandomizerData/emevd/' + emevdPathName + emevdFileName, 'rb') as modf:\n oldf.write(modf.read())\n print('copied new ' + emevdFileName)",
"def copyModels():\n\n # Create the local path if it doesn't exist\n createPath(opts.local_path)\n if not opts.local_path.endswith('/'): opts.local_path = opts.local_path + '/'\n\n sys.stdout.write(\"Copying the model files to the local path: %s ...\\n\\n\" % (opts.local_path))\n\n \"\"\"\n # Copy the phrase table - racing not possible\n (ptable_path, ptable) = os.path.split(opts.ruleFile)\n local_path = opts.local_path + os.path.basename(ptable_path)\n createPath(local_path)\n local_ptable = local_path + '/' + ptable\n if not os.path.exists(local_ptable) or not sameSize(opts.ruleFile, local_ptable):\n copyFile(opts.ruleFile, local_ptable, False)\n \"\"\"\n\n # Copy the language model - race condition possible\n (lmodel_path, lmodel) = os.path.split(opts.lmFile)\n local_lmodel = opts.local_path + lmodel\n if not os.path.exists(local_lmodel) or not sameSize(opts.lmFile, local_lmodel):\n copyFile(opts.lmFile, local_lmodel, True)\n\n # Point the models to the local copies\n #opts.ruleFile = local_ptable\n opts.lmFile = local_lmodel",
"def file_copy_form_bcdbfs(self, path, dest):\n source_file = self._file_model.get_by_name(name=path)[0]\n if self.is_dir(dest):\n dest = j.sal.fs.joinPaths(dest, j.sal.fs.getBaseName(path))\n dest_file = self.file_create_empty(dest)\n if source_file.blocks:\n dest_file.blocks = source_file.blocks\n elif source_file.content:\n dest_file.content = source_file.content\n\n dest_file.save()\n return dest_file",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def subtract_model(self, outfile, del_script=True):\n os.system('cp -r {} {}'.format(self.ms, outfile)) \n ct.subtract_model(outfile, delete=del_script)",
"def copy(location):\n\tcopyData = settings.getDataFile()\n\tcopyFileLocation = os.path.abspath(location)\n\tcopy = {\"copyLocation\": copyFileLocation}\n\tdataFile = open(copyData, \"wb\")\n\tpickle.dump(copy, dataFile)\n\tspeech.speak(location + \" copied successfully!\")\n\tspeech.speak(\"Tip: use 'hallie paste' to paste this file.\")",
"def _save_model(self):\n with open(self.filepath, 'wb') as file:\n pickle.dump(self.cmodel, file)",
"def _generate_model(self, specs, experiment = None, filename = 'dist/app/Model.hs'):\n with open(filename, \"w\") as file:\n self._write_model(file, specs, experiment = experiment)",
"def _copy_file ( self, source, dest ):\n return",
"def model(self, new_model):\n if self.locations.empty:\n raise RuntimeError(\"Cannot create a model until locations exist\")\n writer = ModelWriter(self, self.dismod_file)\n new_model.write(writer)\n writer.close()",
"def copy_mx_model_to(model_path, model_epoch, output_folder):\n target_path = os.path.join(output_folder, os.path.basename(model_path))\n logger.info(\"Copying image model from {} to {}\".format(model_path,\n target_path))\n suffix = ['-symbol.json', '-%04d.params' % (model_epoch,)]\n for s in suffix:\n copyfile(model_path + s, target_path + s)\n return target_path",
"def copy(self, src_path: str, tgt_path: str) -> None:",
"def fileCopyToMorph():\r\n print(str(self.copyFilePath))\r\n print(str(self.morphPath))\r\n \"\"\"copyPath = self.createDir + self.name + \"-\" + self.method\r\n print(str(copyPath))\r\n \r\n os.system(copyPath)\"\"\"\r\n os.system(self.copyFilePath)\r\n print(\"Burada sorun yok\")",
"def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")",
"def ImportModelPart(self):\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Importing model part.\")\n problem_path = os.getcwd()\n input_filename = self.settings[\"model_import_settings\"][\"input_filename\"].GetString()\n if self.is_restarted():\n self.get_restart_utility().LoadRestart()\n elif(self.settings[\"model_import_settings\"][\"input_type\"].GetString() == \"mdpa\"):\n # Import model part from mdpa file.\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Reading model part from file: \" + os.path.join(problem_path, input_filename) + \".mdpa\")\n KratosMultiphysics.ModelPartIO(input_filename).ReadModelPart(self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Finished reading model part from mdpa file.\")\n self.PrepareModelPartForSolver()\n else:\n raise Exception(\"Other model part input options are not yet implemented.\")\n KratosMultiphysics.Logger.PrintInfo(\"ModelPart\", self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]:: \", \"Finished importing model part.\")",
"def save_model(self, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n logger.info('Saving model')\n dst_config_file = os.path.join(output_dir, self.CONFIG_FILE)\n if self.fullpath_input_configfile != dst_config_file:\n shutil.copy(self.fullpath_input_configfile, dst_config_file)\n\n pickle.dump(self.word_det_rfc,\n open(os.path.join(output_dir, self.WORD_DET_RFC), 'wb'))\n pickle.dump(self.reg_coeffs, open(\n os.path.join(output_dir, self.REGRESSION_PARAMS), 'wb'))",
"def deepcopy(self):\n return ModelFile(self._key)",
"def write_equipment_file(self, model, **kwargs):\n output_file = self.output_path + \"/equipment.txt\"\n\n with open(output_file, \"w\") as f:\n\n # Header\n f.write(\"[GENERAL]\\n\")\n current_date = datetime.now().strftime(\"%B %d, %Y at %H:%M:%S\")\n f.write(\"DATE={}\\n\".format(current_date))\n f.write(\"CYME_VERSION=8.02\\n\")\n f.write(\"\\n[SI]\\n\")\n\n # Substations\n #\n if len(self.substations) > 0:\n f.write(\"\\n[SUBSTATION]\\n\")\n f.write(\n \"FORMAT_SUBSTATION=ID,MVA,KVLL,KVLLdesired,R1,X1,R0,X0,R2,X2,PhaseAngle,MVA_1,MVA_2,MVA_3,MVA_4,Conn,PrimaryEquivalentType,SubEqVal1,SubEqVal2,SubEqVal3,SubEqVal4,SubPrimaryLLVoltage,SecondaryFaultReactance,TxfoConnection,HarmonicEnveloppe,BackgroundHarmonicVoltage,BaseMVA,ImpedanceUnit,BranchID_1,PrimProtDevID_1,PrimProtDevNum_1,TransformerID_1,TransformerNum_1,SubXs_1,SecProtDevID_1,SecProtDevNum_1,BranchStatus_1,BranchID_2,PrimProtDevID_2,PrimProtDevNum_2,TransformerID_2,TransformerNum_2,SubXs_2,SecProtDevID_2,SecProtDevNum_2,BranchStatus_2,BranchID_3,PrimProtDevID_3,PrimProtDevNum_3,TransformerID_3,TransformerNum_3,SubXs_3,SecProtDevID_3,SecProtDevNum_3,BranchStatus_3,BranchID_4,PrimProtDevID_4,PrimProtDevNum_4,TransformerID_4,TransformerNum_4,SubXs_4,SecProtDevID_4,SecProtDevNum_4,BranchStatus_4,BranchID_5,PrimProtDevID_5,PrimProtDevNum_5,TransformerID_5,TransformerNum_5,SubXs_5,SecProtDevID_5,SecProtDevNum_5,BranchStatus_5,FailRate,TmpFailRate,MajorRepairTime,\"\n )\n f.write(\n \"MinorRepairTime,MajorFailureProportion,SymbolID,Favorite,Flags,Comments\\n\"\n )\n\n for sub in self.substations:\n if \"sub_ID\" in sub:\n f.write(sub[\"sub_ID\"] + \",\")\n if \"MVA\" in sub:\n f.write(sub[\"MVA\"] + \",\")\n else:\n f.write(\",\")\n if \"KVLL\" in sub:\n # NOTE: Setting the voltage to 1.05pu at the feeder head is raw coded here\n # TODO: Come up with a less dirty way to have 1.05pu at the substation\n f.write(\n \"{a},{b},\".format(\n a=sub[\"KVLL\"], b=float(sub[\"KVLL\"]) * 1.00\n )\n ) # *1.05))\n else:\n f.write(\",,\")\n #\n # TODO: automatically detect if default or real values should be used for source impedance\n #\n if \"R1\" in sub:\n f.write(sub[\"R1\"] + \",\")\n else:\n f.write(\"DEFAULT,\")\n if \"X1\" in sub:\n f.write(sub[\"X1\"] + \",\")\n else:\n f.write(\"DEFAULT,\")\n if \"R0\" in sub:\n f.write(sub[\"R0\"] + \",\")\n else:\n f.write(\"DEFAULT,\")\n if \"X0\" in sub:\n f.write(sub[\"X0\"] + \",\")\n else:\n f.write(\"DEFAULT,\")\n if \"R2\" in sub:\n f.write(sub[\"R2\"] + \",\")\n elif \"R0\" in sub:\n f.write(sub[\"R0\"] + \",\")\n else:\n f.write(\"DEFAULT,\")\n if \"X2\" in sub:\n f.write(sub[\"X2\"] + \",\")\n elif \"X0\" in sub:\n f.write(sub[\"X0\"] + \",\")\n else:\n f.write(\"DEFAULT,\")\n if \"phase_angle\" in sub:\n f.write(sub[\"phase_angle\"] + \",\")\n else:\n f.write(\",\")\n\n f.write(\n \",,,,,,,,,,,,,,,,0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\"\n )\n f.write(\"\\n\")\n\n # Switches\n #\n # Writing default values for switches\n #\n f.write(\"\\n[SWITCH]\\n\")\n f.write(\n \"FORMAT_SWITCH=ID,Amps,Amps_1,Amps_2,Amps_3,Amps_4,KVLL,Reversible,FailRate,TmpFailRate,MajorRepairTime,MinorRepairTime,MajorFailureProportion,StuckProbability,SwitchTime,SymbolOpenID,SymbolCloseID,SinglePhaseLocking,RemoteControlled,Automated,Comments\\n\"\n )\n f.write(\n \"DEFAULT,100.000000,100.000000,100.000000,100.000000,100.000000,25.000000,0,,,,,,,,0,0,0,0,0,\\n\"\n )\n for ID, data in self.switchcodes.items():\n f.write(str(ID) + \",\")\n f.write(data)\n f.write(\"\\n\")\n\n # Fuses\n #\n # Writing default values for fuses\n #\n f.write(\"\\n[FUSE]\\n\")\n f.write(\n \"FORMAT_FUSE=ID,Amps,Amps_1,Amps_2,Amps_3,Amps_4,KVLL,Reversible,InterruptingRating,FailRate,TmpFailRate,MajorRepairTime,MinorRepairTime,MajorFailureProportion,StuckProbability,SwitchTime,SymbolOpenID,SymbolCloseID,SinglePhaseLocking,Comments,Manufacturer,Model,TCCRating\\n\"\n )\n f.write(\n \"DEFAULT,100.000000,100.000000,100.000000,100.000000,100.000000,25.000000,0,600.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,0,,,,\\n\"\n )\n for ID, data in self.fusecodes.items():\n f.write(str(ID) + \",\")\n f.write(data)\n f.write(\"\\n\")\n\n # Reclosers\n #\n # Writing default values for reclosers\n #\n f.write(\"\\n[RECLOSER]\\n\")\n f.write(\n \"FORMAT_RECLOSER=ID,Amps,Amps_1,Amps_2,Amps_3,Amps_4,KVLL,Reversible,InterruptingRating,FailRate,TmpFailRate,MajorRepairTime,MinorRepairTime,MajorFailureProportion,StuckProbability,SwitchTime,SymbolOpenID,SymbolCloseID,SinglePhaseLocking,SinglePhaseTripping,RemoteControlled,Automated,Comments,RecloserType,ControlType,Model\\n\"\n )\n f.write(\n \"DEFAULT,100.000000,100.000000,100.000000,100.000000,100.000000,25.000000,0,600.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,0,0,0,0,,1,,\\n\"\n )\n for ID, data in self.reclosercodes.items():\n f.write(str(ID) + \",\")\n f.write(data)\n f.write(\"\\n\")\n\n # Breakers\n #\n # Writing default values for breakers\n #\n f.write(\"\\n[BREAKER]\\n\")\n f.write(\n \"FORMAT_BREAKER=ID,Amps,Amps_1,Amps_2,Amps_3,Amps_4,KVLL,Reversible,InterruptingRating,FailRate,TmpFailRate,MajorRepairTime,MinorRepairTime,MajorFailureProportion,StuckProbability,SwitchTime,SymbolOpenID,SymbolCloseID,SinglePhaseLocking,SinglePhaseTripping,RemoteControlled,Automated,Comments\\n\"\n )\n f.write(\n \"DEFAULT,100.000000,100.000000,100.000000,100.000000,100.000000,25.000000,0,600.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,0,0,0,0,\\n\"\n )\n for ID, data in self.breakercodes.items():\n f.write(str(ID) + \",\")\n f.write(data)\n f.write(\"\\n\")\n\n # Cables\n #\n f.write(\"\\n[CABLE]\\n\")\n f.write(\n \"FORMAT_CABLE=ID,R1,R0,X1,X0,B1,B0,Amps,CableType,UserDefinedImpedances,Frequency,Temperature\\n\"\n )\n f.write(\n \"DEFAULT,0.040399,0.055400,0.035900,0.018200,0.000000,0.000000,447.000000,0,1,60.000000,25.000000\\n\"\n )\n for ID, data in self.cablecodes.items():\n f.write(str(ID))\n for key in [\"R1\", \"R0\", \"X1\", \"X0\", \"B1\", \"B0\", \"amps\", \"cabletype\"]:\n if key in data:\n f.write(\",\" + str(data[key]))\n else:\n f.write(\",\")\n f.write(\",1,60.0000,25.00000\\n\")\n\n # Lines\n #\n if len(self.linecodes_overhead) > 0:\n f.write(\"\\n[LINE UNBALANCED]\\n\")\n f.write(\n \"FORMAT_LINEUNBALANCED=ID,Ra,Rb,Rc,Xa,Xb,Xc,Ba,Bb,Bc,MutualResistanceAB,MutualResistanceBC,MutualResistanceCA,MutualReactanceAB,MutualReactanceBC,MutualReactanceCA,MutualShuntSusceptanceAB,MutualShuntSusceptanceBC,MutualShuntSusceptanceCA,CondID_A,CondID_B,CondID_C,CondID_N1,CondID_N2,SpacingID,AmpsA,AmpsB,AmpsC,UserDefinedImpedances,Transposed\\n\"\n )\n\n for ID, data in self.linecodes_overhead.items():\n f.write(str(ID))\n for key in [\n \"RA\",\n \"RB\",\n \"RC\",\n \"XA\",\n \"XB\",\n \"XC\",\n \"Ba\",\n \"Bb\",\n \"Bc\",\n \"MutualResistanceAB\",\n \"MutualResistanceBC\",\n \"MutualResistanceCA\",\n \"MutualReactanceAB\",\n \"MutualReactanceBC\",\n \"MutualReactanceCA\",\n \"MutualShuntSusceptanceAB\",\n \"MutualShuntSusceptanceBC\",\n \"MutualShuntSusceptanceCA\",\n \"CondID_A\",\n \"CondID_B\",\n \"CondID_C\",\n \"CondID_N1\",\n \"CondID_N2\",\n \"SpacingID\",\n \"AmpsA\",\n \"AmpsB\",\n \"AmpsC\",\n \"UserDefinedImpedances\",\n ]:\n if key in data:\n f.write(\",\" + str(data[key]))\n else:\n if key in [\n \"CondID_A\",\n \"CondID_B\",\n \"CondID_C\",\n \"CondID_N1\",\n \"CondID_N2\",\n \"SpacingID\",\n ]:\n f.write(\"NONE,\")\n else:\n f.write(\",0\")\n f.write(\",0\\n\")\n\n # Conductors\n #\n f.write(\"\\n[CONDUCTOR]\\n\")\n f.write(\"FORMAT_CONDUCTOR=ID,Diameter,GMR,R25,Amps,WithstandRating\\n\")\n f.write(\"DEFAULT,1.000001,1.000001,0.7,2000.000000,2000.000000\\n\")\n if len(self.conductors) > 0:\n for ID, data in self.conductors.items():\n if ID == \"DEFAULT\":\n continue\n f.write(ID)\n f.write(data)\n f.write(\"\\n\")\n\n # Spacing table\n #\n f.write(\"\\n[SPACING TABLE FOR LINE]\\n\")\n f.write(\n \"FORMAT_SPACINGTABLEFORLINE=ID,GMDPh-Ph,GMDPh-N,AvgPhCondHeight,AvgNeutralHeight,PosOfCond1_X,PosOfCond1_Y,PosOfCond2_X,PosOfCond2_Y,PosOfCond3_X,PosOfCond3_Y,PosOfNeutralCond_X,PosOfNeutralCond_Y,PosOfNeutralCond_N2_X,PosOfNeutralCond_N2_Y,BundleDistance,NBPhasesPerCircuit,NBConductorsPerPhase,NBNeutrals,TowerType,DistanceA,DistanceB,DistanceC,DistanceD,DistanceE,ConductorStatusN1,ConductorStatusN2,FootingResistanceN1,FootingResistanceN2,TowerSpanN1,TowerSpanN2,Favorite,Flags,Comments\\n\"\n )\n f.write(\n \"DEFAULT,,,,,-0.609600,10.058400,0.000000,8.839200,0.609600,10.058400,0.000000,11.277600,,,0.010000,3,1,1,0,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,1.000000,1.000000,300.000000,300.000000,0,0,\\n\"\n )\n\n f.write(\n \"N_ABOVE_1PH,,,,,0.000000,9.601200,,,,,0.000000,10.363200,,,0.010000,1,1,1,0,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,1.000000,1.000000,300.000000,300.000000,0,0,\\n\"\n )\n f.write(\n \"N_ABOVE_2PH,,,,,-1.127760,9.601200,1.127760,9.601200,,,0.000000,10.363200,,,0.010000,2,1,1,0,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,1.000000,1.000000,300.000000,300.000000,0,0,\\n\"\n )\n f.write(\n \"N_ABOVE_3PH,,,,,-1.127760,9.601200,0.000000,9.601200,1.127760,9.601200,0.000000,10.363200,,,0.010000,3,1,1,0,0.000000,0.000000,0.000000,0.000000,0.000000,0,0,1.000000,1.000000,300.000000,300.000000,0,0,\\n\"\n )\n\n # TODO\n # Add the user-defined spacing tables here\n\n # Capacitors\n #\n if len(self.capcodes) > 0:\n f.write(\"\\n[SHUNT CAPACITOR]\\n\")\n f.write(\n \"FORMAT_SHUNTCAPACITOR=ID,KVAR,KV,CostForFixedBank,CostForSwitchedBank,Type\\n\"\n )\n\n for ID, data in self.capcodes.items():\n f.write(\"capacitor_\" + str(ID) + \",\")\n f.write(data.strip(\",\"))\n f.write(\",0,0,0\")\n f.write(\"\\n\")\n\n # Two winding transformers\n #\n if len(self.two_windings_trans_codes) > 0:\n f.write(\"\\n[TRANSFORMER]\\n\")\n f.write(\n \"FORMAT_TRANSFORMER=ID,Type,KVA,VoltageUnit,KVLLprim,KVLLsec,Z1,Z0,XR,XR0,Conn,WindingType,NoLoadLosses,PhaseShift,IsLTC\\n\"\n )\n\n for ID, data in self.two_windings_trans_codes.items():\n f.write(\"transformer_\" + str(ID) + \",\")\n f.write(data.strip(\",\"))\n f.write(\"\\n\")\n\n # Three winding transformers\n #\n if len(self.three_windings_trans_codes) > 0:\n f.write(\"\\n[THREE WINDING TRANSFORMER]\\n\")\n f.write(\n \"FORMAT_THREEWINDINGTRANSFORMER=ID,PrimaryRatedCapacity,PrimaryVoltage,PrimaryConnection,PrimaryToSecondaryZ1,PrimaryToSecondaryZ0,PrimaryToSecondaryXR1,PrimaryToSecondaryXR0,PrimaryToTertiaryZ1,PrimaryToTertiaryZ0,PrimaryToTertiaryXR1,PrimaryToTertiaryXR0,SecondaryToTertiaryZ1,SecondaryToTertiaryZ0,SecondaryToTertiaryXR1,SecondaryToTertiaryXR0,SecondaryCapacityLimit1,SecondaryCapacityLimit2,TertiaryCapacityLimit1,TertiaryCapacityLimit2,TertiaryConnection,NoLoadLosses\\n\"\n )\n for ID, data in self.three_windings_trans_codes.items():\n f.write(\"3_wdg_transformer_\" + str(ID) + \",\")\n f.write(data.strip(\",\"))\n f.write(\"\\n\")\n\n # Regulators\n #\n if len(self.reg_codes) > 0:\n f.write(\"\\n[REGULATOR]\\n\")\n f.write(\n \"FORMAT_REGULATOR=ID,KVA,Bandwidth,CT,PT,Type,KVLN,MaxBuck,MaxBoost,Taps,Reversible\\n\"\n )\n\n for ID, data in self.reg_codes.items():\n f.write(\"regulator_\" + str(ID) + \",\")\n f.write(data.strip(\",\"))\n f.write(\"\\n\")\n\n if len(self.irradiance_profiles) > 0:\n f.write(\"\\n[INSOLATION MODEL] \\n\")\n f.write(\"FORMAT_INSOLATIONMODEL=ID,FromFile,FileName\\n\")\n for i in self.irradiance_profiles:\n f.write(\n \"{label},1,{loc}\".format(\n label=i, loc=self.irradiance_profiles[i]\n )\n )\n f.write(\"\\n\")\n\n if len(self.bess_codes) > 0:\n f.write(\"\\n[BESS] \\n\")\n f.write(\n \"FORMAT_BESS=ID,RatedStorageEnergy,MaxChargingPower,MaxDischargingPower,ChargeEfficiency,DischargeEfficiency\\n\"\n )\n for value in self.bess_codes:\n f.write(self.bess_codes[value] + \",\" + value + \"\\n\")\n f.write(\"\\n\")",
"def model_path(self, shared_tmpdir, _root):\n new_file = shared_tmpdir.joinpath(\"test_random.mdl\")\n shutil.copy(\n _root.joinpath(\"more-tests/random/test_random.mdl\"),\n new_file\n )\n return new_file",
"def readmodel(model = 'dominguez'):\n ebl_file_path = os.path.join(os.path.split(__file__)[0],'data/')\n\n if model == 'kneiske':\n file_name = join(ebl_file_path , 'ebl_nuFnu_tanja.dat')\n elif model == 'franceschini':\n file_name = join(ebl_file_path , 'ebl_franceschini.dat')\n elif model == 'dominguez':\n file_name = join(ebl_file_path , 'ebl_dominguez11.out')\n elif model == 'dominguez-upper':\n file_name = join(ebl_file_path , 'ebl_upper_uncertainties_dominguez11.out')\n elif model == 'dominguez-lower':\n file_name = join(ebl_file_path , 'ebl_lower_uncertainties_dominguez11.out')\n elif model == 'inoue':\n file_name = join(ebl_file_path , 'EBL_z_0_baseline.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_baseline.dat')\n elif model == 'inoue-low-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_low_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_low_pop3.dat')\n elif model == 'inoue-up-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_up_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_up_pop3.dat')\n elif model == 'gilmore':\n file_name = join(ebl_file_path , 'eblflux_fiducial.dat')\n elif model == 'gilmore-fixed':\n file_name = join(ebl_file_path , 'eblflux_fixed.dat')\n elif model == 'cuba':\n file_name = join(ebl_file_path , 'CUBA_UVB.dat')\n elif model == 'finke':\n file_name = join(ebl_file_path , 'ebl_modelC_Finke.txt')\n else:\n raise ValueError(\"Unknown EBL model chosen!\")\n\n data = np.loadtxt(file_name)\n if model.find('inoue') >= 0:\n z = np.array([0.])\n #z = data[0,1:]\n #nuInu = data[:,1]\n lmu = data[:,0]\n nuInu = np.array([data[:,1]]).T\n raise ValueError('Inoue models not correctly implemented at the moment, choose another model')\n\n elif model.find('gilmore') >= 0:\n z = data[0,1:]\n lmu = data[1:,0] * 1e-4 # convert from Angstrom to micro meter\n nuInu = data[1:,1:] \n nuInu[nuInu == 0.] = 1e-20 * np.ones(np.sum(nuInu == 0.))\n \n # convert from ergs/s/cm^2/Ang/sr to nW/m^2/sr\n nuInu = (nuInu.T * data[1:,0]).T * 1e4 * 1e-7 * 1e9 \n\n elif model == 'cuba':\n z = data[0,1:-1]\n lmu = data[1:,0] * 1e-4\n nuInu = data[1:,1:-1]\n\n # replace zeros by 1e-40\n idx = np.where(data[1:,1:-1] == 0.)\n nuInu[idx] = np.ones(np.sum(nuInu == 0.)) * 1e-20\n\n # in erg / cm^2 / s / sr\n nuInu = (nuInu.T * c.c.value / (lmu * 1e-6)).T \n nuInu *= 1e6 # in nW / m^2 / sr\n\n # check where lmu is not strictly increasing\n idx = np.where(np.diff(lmu) == 0.)\n for i in idx[0]:\n lmu[i+1] = (lmu[i + 2] + lmu[i]) / 2.\n\n else:\n z = data[0,1:]\n lmu = data[1:,0]\n nuInu = data[1:,1:]\n if model == 'finke': \n lmu = lmu[::-1] * 1e-4\n nuInu = nuInu[::-1]\n\n return EBL(z,lmu,nuInu, model = model)",
"def copyFile(self, *args):\n return _libSALOME_LifeCycleCORBA.SALOME_LifeCycleCORBA_copyFile(self, *args)",
"def copyTwr(self):\n # this is executing during write_input, so curdir is run_dir\n shutil.copyfile(os.path.join(self.fst_dir,self.twr_file), self.twr_file)",
"def copy_file(self, filename):\n shutil.copyfile(os.path.join('testdata', filename),\n os.path.join(self.tmpdir, filename))",
"def export_model(model, name):\n\tpath = \"data/{}/\".format(name)\n\tfilename = \"{}.model\".format(name)\n\tif os.path.isdir(path):\n\t\tprint(\"model already exists\")\n\t\treturn\n\telse:\n\t\tos.mkdir(path)\n\t\tjoblib.dump(model, path + filename)",
"def save_xml(self, filename):\n if \".xml\" not in filename:\n filename = filename + \".xml\"\n\n shutil.copyfile(self.env.model_file, filename)",
"def copy_source(self, filename, new_filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n new_file_path = os.path.join(self.storage_path, new_filename)\n shutil.copyfile(file_path, new_file_path)",
"def load_model(file_index):\n normal, abnormal, all = read_in(file_index, 1, 2, 0.3)\n autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')\n reconstructed = autoencoder.predict(all)\n reconstruction_save = \"Working_Data/reconstructed_cdae_10d_Idx\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstructed)"
] | [
"0.5939559",
"0.5896609",
"0.57205296",
"0.5705327",
"0.5664915",
"0.5664915",
"0.5664915",
"0.5561054",
"0.5485006",
"0.5483633",
"0.5475485",
"0.54704446",
"0.5431423",
"0.5431291",
"0.5385497",
"0.53754187",
"0.53675175",
"0.5362274",
"0.53395766",
"0.53362334",
"0.53218025",
"0.53034455",
"0.5296709",
"0.528386",
"0.5268591",
"0.5250458",
"0.5234133",
"0.5200141",
"0.52001345",
"0.519575"
] | 0.6624411 | 0 |
Creates an output file for in the form of topicsummary with the results | def write_to_out(result_list, topic):
# Use formatted string to make the topic-specific output file name
out_filename = f"{topic}summary.txt"
# Using the with...as construct to open an output file in write mode
with open(out_filename, "w", encoding="utf-8") as out_file:
# For every list in the result_list
for list in result_list:
# The first element in the list is the url
url = list[0]
# The second element in the list is a list of references to
# the topic
mentions = list[1]
# Write the url and a new line to the output file
out_file.write(url + "\n")
# Iterate over all the references
for each_mention in mentions:
# Write each reference and new line
out_file.write(each_mention + "\n")
# Write out 70 lines of dashes to separate the different urls
out_file.write("---------------------------------------" +
"-------------------------------\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export(self, outdir, topic_docs = None):\n parsed_topics_fn = outdir+'/parsed_topics_'+self.name+'.csv'\n parsed_topics = self.parse_topics()\n with open(parsed_topics_fn, 'wb') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(['topic index', 'top words'])\n for i,t in enumerate(parsed_topics):\n writer.writerow([i]+t)\n if topic_docs is not None:\n doc_tops_fn = outdir+'/doc_topics_'+self.name+'.csv'\n pnos,texts = topic_docs\n doc_tops = self.doc_topics(texts)\n with open(doc_tops_fn, 'wb') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(['pno', 'top 10 topics'])\n for pno,dts in zip(pnos, doc_tops):\n writer.writerow([pno]+dts)\n visualize_fn = outdir+'/vis'+self.name+'.html'\n self.visualize(visualize_fn)",
"def create_summary_statistics(forward_accuracy, backward_accuracy, merged_accuracy):\n summary_statistics = open(f'summary_statistics.txt', 'a')\n summary_statistics.write(f'The forward model has an accuracy of: {forward_accuracy}\\n')\n summary_statistics.write(f'The backward model has an accuracy of: {backward_accuracy}\\n')\n summary_statistics.write(f'The merged model has an accuracy of: {merged_accuracy}\\n')\n summary_statistics.close()",
"def generate_summary(final_dictionary):\n otpt = open('multifind_summary.txt', 'w')\n for cat in final_dictionary:\n category_name = cat[0] + ': ' + str(len(cat[1])) + '\\n'\n otpt.write(category_name)\n for entry in cat[1]:\n otpt.write('\\t' + str(entry[0]) + '\\n')\n otpt.write('\\t\\tTotal Entries: %s\\n' % str(entry[1]))\n otpt.write('\\t\\tUnique Species: %s\\n' % str(entry[2]))\n count = 0\n for sp in entry[3]:\n if count < entry[2]-1:\n if count == 0:\n otpt.write('\\t\\tSpecies: ' + sp + ', ')\n else:\n otpt.write(sp + ', ')\n else:\n otpt.write(sp + '\\n')\n count += 1\n otpt.close()",
"def generate_simple_report(final_dictionary):\n otpt = open('multifind_simple_summary.txt', 'w')\n for cat in final_dictionary:\n category_name = cat[0]\n category_cont = str(len(cat[1]))\n otpt.write(category_name + ' ')\n otpt.write(category_cont + '\\n')\n otpt.close()",
"def write_output_summary(outfile, read_scores, args):\n\theader = ['sim_info_file', 'sim_sam_file', 'analysis_info_file', 'results_file', 'junc_type', 'score_type', \n\t\t\t 'true_positives', 'true_negatives', 'false_positives', 'false_negatives']\n\t\t\t \n\tfilenames = [args.sim_info, args.sim_sam, args.analysis_info, args.output]\n\ttypes = ['tp', 'tn', 'fp', 'fn']\n\t\t\t \n\twith open(args.output_summary, \"w\") as outfile:\n\t\toutfile.write(\"\\t\".join(header) + \"\\n\")\n\t\t\n\t\tfor score_type in read_scores:\n\t\t\tfor junc_type in read_scores[score_type]:\n\t\t\t\tif junc_type == 'discord':\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]/2) for type in types]\n\t\t\t\telse:\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]) for type in types]\n\t\t\t\tline = filenames + [junc_type, score_type] + scores\n\t\t\t\toutfile.write(\"\\t\".join(line) + \"\\n\")",
"def topic_content(self,W,output_file = \"topic_description.csv\"):\n\n\t\ttopic_top_probs = []\n\t\ttopic_top_words = []\n\n\t\ttt = self.tt_avg(False)\n\n\t\tfor t in xrange(self.K):\n\t\t\ttop_word_indices = tt[:,t].argsort()[-W:][::-1]\n\t\t\ttopic_top_probs.append(np.round(np.sort(tt[:,t])[-W:][::-1],3))\n\t\t\ttopic_top_words.append([self.token_key.keys()[self.token_key.values().index(i)] for i in top_word_indices])\n\n\t\twith codecs.open(output_file,\"w\",\"utf-8\") as f:\n\t\t\tfor t in xrange(self.K):\n\t\t\t\twords = ','.join(topic_top_words[t])\n\t\t\t\tprobs = ','.join([str(i) for i in topic_top_probs[t]])\n\t\t\t\tf.write(\"topic\" + str(t) + ',')\n\t\t\t\tf.write(\"%s\\n\" % words)\n\t\t\t\tf.write(\" \" + ',')\n\t\t\t\tf.write(\"%s\\n\" % probs)",
"def do_write(self, args):\n\t\tasplit = args.split(\" \")\n\t\tfname = asplit[0]\n\t\twhat = asplit[1]\n\n\t\tif what == \"summary\" or what == \"oldsummary\":\n\t\t\twith open(fname, 'w') as f:\n\t\t\t\tform = DresherInterface.summary_format if what == \"summary\" else DresherInterface.oldsummary_format\n\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\tf.write(x)\n\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t#for lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t#\tdw.writerow(dict(zip(form, [self.get_language_info(lang, x) for x in form])))\n\t\t\t\tfor lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\t\tf.write(str(self.get_language_info(lang, x)))\n\t\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write(\"\\t\")\n\t\tif what == \"hierarchies\":\n\t\t\t# format: #vowels, langname, hierarchy, len(hier), #of marks, lfeats, inv, freq, \n\t\t\t# how many times each feat marked, the actual marks, vowel:feature set, unused features\n\t\t\t# take fname to be name of directory to write outfiles to\n\t\t\tif not os.path.exists(fname):\n\t\t\t\tos.mkdir(fname)\n\t\t\tfor lang in self.languages:\n\t\t\t\tnum_vowels = self.get_language_info(lang, \"linv\")\n\t\t\t\tname = lang.name\n\t\t\t\tnum_feats = self.get_language_info(lang, \"lfeats\")\n\t\t\t\tinv = self.get_language_info(lang, \"inv\")\n\t\t\t\tfreq = self.get_language_info(lang, \"freq\")\n\t\t\t\tinv_feats = lang.phone_feat_dict\n\t\t\t\twith open(os.path.join(fname,name.replace(\" \",\"\")+\".txt\"), 'w') as f:\n\t\t\t\t\tf.write(\"num_vowels\\tname\\thierarchy\\tlen_hier\\tnum_marks\\tnumfeats\\tinv\\tfreq\\tfeat_marks\\tinv_marks\\tinv_feats\\tunused_feats\\n\")\n\t\t\t\t\tfor h in lang.hierarchies:\n\t\t\t\t\t\tf.write(str(num_vowels))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(name)\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(h))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(len(h)))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tspec = SDA(lang._phones, lang._features, h)\n\t\t\t\t\t\tmarkedness = sum([x for phone in spec.keys() for x in spec[phone] if x == 1])\n\t\t\t\t\t\tf.write(str(markedness))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(num_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(freq))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tfeat_counts = {f:sum([spec[phone][i] for phone in spec.keys() if spec[phone][i] == 1]) for i, f in enumerate(h)}\n\t\t\t\t\t\tf.write(str(feat_counts))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(spec))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(list(set(lang._features)-set(h))))\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t# make sure all the threads that need to be finished have finished\n\t\t# using .join() on the appropriate groups of threads",
"def generateSummary(fn, allimages):\n\n # create necessary directories\n d = dirname(join(opts.root, fn))\n if not exists(d):\n os.makedirs(d)\n\n otext = u\"\"\n\n for i in allimages:\n l = i._filename\n l += ','\n if i._title:\n l += i._title\n # Make sure it's on a single line\n# print l\n otext += l.replace('\\n', ' ') + '\\n'\n\n # Write out file.\n try:\n afn = join(opts.root, fn)\n tfile = open(afn, \"w\")\n tfile.write(otext.encode(config.Coding))\n tfile.close()\n\n except IOError, e:\n print >> sys.stderr, \"Error: can't open file: %s\" % fn",
"def output(results):\n\n text_file = open(\"problem_1_B_output.txt\", \"w\")\n\n out = \"\"\n\n for i, line in enumerate(results):\n\n string = \"Sample {}: {}, posterior probability of {:.4f}\".format(i + 1,\n line[0],\n line[1])\n\n out += (string + \"\\n\")\n\n text_file.write(out)\n\n text_file.close()",
"def export_topics(self):\n\n # format as a list (for json output), then sort descending by textIDCount\n topics = [{'name': topic['name'], 'count': topic['count'],\n 'verbatims': list(topic['verbatims']), 'textIDs': list(topic['textIDs']),\n 'textIDCount': topic['textIDCount'], 'rank': topic['rank'],\n 'children': '' if 'children' not in topic else topic['children']}\n for topic_id, topic in self.topics.items()]\n topics = sorted(topics, key=lambda topic: topic['textIDCount'], reverse=True)\n\n for i, topic in enumerate(topics):\n # Note that 'rank' is from topic, not child.\n topic['children'] = [{'name': child['name'], 'count': child['count'], 'rank': topic['rank'],\n 'verbatims': list(child['verbatims']), 'textIDs': list(child['textIDs']),\n 'textIDCount': child['textIDCount']}\n for _, child in topic['children'].items()]\n\n topic['children'] = sorted(topic['children'], key=lambda lemma: lemma['textIDCount'], reverse=True)\n\n # If the subtopic count is greater than the topic count, than calc a multiplier to size each subtopic\n child_count = sum([child['textIDCount'] for child in topic['children']])\n child_count_multiplier = 1 if child_count < topic['textIDCount'] else topic['textIDCount'] / child_count\n\n for child in topic['children']:\n child['size'] = child['textIDCount'] * child_count_multiplier\n\n topic['size'] = topic['textIDCount'] - (child_count * child_count_multiplier)\n\n # Prune topics over max_topics (default ~40): we stopped calc'ing rank over the max_topics\n self.model_output[\"children\"] = [topic for topic in topics]\n\n # Build file name and save\n if self.data_date:\n date = datetime.strptime(self.data_date, \"%Y-%m-%d\").strftime('%d') # from YYYY-MM-DD to DD\n file_name = '{}-{}-Topics.txt'.format(self.corpus_name, date)\n else:\n file_name = '{}-Topics.txt'.format(self.corpus_name)\n\n with open(config.OUTPUT_DIR + file_name, 'w') as file:\n json.dump(self.model_output, file)",
"def writeTask1(resultFile, fmDocs, obsDocs, submission=False):\n\t\tfoutput = open(resultFile, \"w\")\n\t\t\t\t\n\t\tfor doc in sorted(fmDocs):\n\t\t\t#doc_1\tFamilyMember\tFather\tNA\n\t\t\tfor fm, sentence in fmDocs[doc]:\n\t\t\t\tfamilyMember, sideOfFamily = fm\n\t\t\t\tif submission:\n\t\t\t\t\tfoutput.write(\"{}\\tFamilyMember\\t{}\\t{}\\n\".format(doc, familyMember, sideOfFamily))\n\t\t\t\telse:\n\t\t\t\t\tfoutput.write(\"{}\\tFamilyMember\\t{}\\t{}\\t{}\\n\".format(doc, familyMember, sideOfFamily, sentence))\n\t\t\n\t\tfor doc in sorted(obsDocs):\n\t\t\t#doc_1\tObservation\tDisease\n\t\t\tfor disease, sentence in obsDocs[doc]:\n\t\t\t\tif submission:\n\t\t\t\t\tfoutput.write(\"{}\\tObservation\\t{}\\n\".format(doc, disease))\n\t\t\t\telse:\n\t\t\t\t\tfoutput.write(\"{}\\tObservation\\t{}\\t{}\\n\".format(doc, disease, sentence))\n\t\tfoutput.close()",
"def write_txt_summary(training_summary, summary_dir):\n summary_path = os.path.join(summary_dir, _SUMMARY_TXT)\n with tf.io.gfile.GFile(summary_path, 'wb') as f:\n logging.info('Training Summary: \\n%s', str(training_summary))\n f.write(json.dumps(training_summary, indent=4))",
"def write_topic(bag, output_file, topic_name, column_names):\n column_mapping = dict(zip(column_names, range(0, len(column_names))))\n\n \"\"\" Go through every message for a given topic, extract its data fields,\n and write it to the output file\n \"\"\"\n msg_count = 1\n for _, msg, _ in bag.read_messages(topics=topic_name):\n sys.stdout.write('\\t\\tWriting message %u%s' % (msg_count, \"\\r\"))\n msg_count += 1\n column_values = {}\n \"\"\" Build a dictionary of field names and their values. The field names\n match the column headers.\n \"\"\"\n find_field_value('', msg, column_values, column_mapping)\n \"\"\" write the discovered values out to the file \"\"\"\n write_topic_line(output_file, column_mapping, column_values)\n\n sys.stdout.write('\\t\\tProcessed %u messages\\n' % (msg_count - 1))",
"def output_results(results, summary, format=\"json\", outstream=sys.stdout):\n\n try:\n\n if format.upper() == \"JSON\":\n\n res_json = {\n \"summary\": summary,\n \"test_results\": results\n }\n\n json.dump(res_json, outstream, indent=4)\n\n elif format.upper() == \"TEXT\":\n\n outstream.write(\"***\\n\")\n outstream.write(\"TEST SUMMARY\\n\")\n outstream.write(\"------------\\n\")\n outstream.write(\"Tests passed: %i\\n\" % summary[\"passed\"])\n outstream.write(\"Tests failed: %i\\n\" % summary[\"failed\"])\n\n outstream.write(\"Success percentage : %.2f%%\\n\"\n % summary[\"success_percentage\"])\n outstream.write(\"Total elapsed time: %.3f seconds\\n\"\n % summary[\"total_elapsed_time\"])\n outstream.write(\"***\\n\")\n\n for res in results:\n\n outstream.write(\"%s\\n\" % res[\"name\"])\n outstream.write(\"\\tStatus:%s\\n\" % res[\"status\"])\n outstream.write(\"\\tElapsed time: %f\\n\" % res[\"elapsed_time\"])\n\n if(res[\"status\"] == \"FAILED\"):\n outstream.write(\"\\tError message: %s\\n\"\n % res[\"error_msg\"])\n\n except KeyError as e:\n print(str(e))",
"def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))",
"def _dump_text(output_dir, topic, text):\n topic_id = \"%s-%s\" % (topic, \"help\")\n filename = bzrlib.osutils.pathjoin(output_dir, topic_id + \".txt\")\n f = open(filename, \"w\")\n f.write(text.encode('utf-8'))\n f.close()\n return topic_id",
"def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):",
"def summaryText(self):\n\n print('\\nReport Summary:\\n')\n for author in self.lowQuality.keys():\n if len(self.lowQuality[author]) > 0:\n print('Author: ' + author)\n print('---------------------')\n # do some sorting for readability\n files = []\n file2rating = {}\n for fileRating in self.lowQuality[author]:\n files.append(fileRating[1])\n file2rating[fileRating[1]] = fileRating[0]\n files.sort()\n for fileRating in files:\n print(file2rating[fileRating] + ' :: ' + fileRating)\n print('\\n\\n')",
"def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)",
"def print_topic_word_distribution(corpus, number_of_topics, topk, filepath):\n\tV = len(corpus.vocabulary) # size of vocabulary\n\tassert(topk < V)\n\tf = open(filepath, \"w\")\n\tfor k in range(number_of_topics):\n\t\tword_prob = corpus.topic_word_prob[k, ] # word probability given a topic\n\t\t# print word_prob\n\t\tword_index_prob = []\n\t\tfor i in range(V):\n\t\t\tword_index_prob.append([i,corpus.vocabulary[i],word_prob[i]])\n\t\tword_index_prob = sorted(word_index_prob, key=itemgetter(1), reverse=True) # sort by word count\n\t\twith open('word_index_prob.txt',\"a+\") as f2:\n\t\t\tf2.write(str(word_index_prob)+'\\n')\n\t\t\tf2.close()\n\t\tf.write(\"Topic #\" + str(k) + \":\\n\")\n\t\tfor i in range(topk):\n\t\t\tindex = word_index_prob[i][0]\n\t\t\tf.write(corpus.vocabulary[index] + \" \")\n\t\tf.write(\"\\n\")\n\tprint \"Written topic-word distribution to file: \" + filepath \n\tf.close()",
"def write_summary(self, text):\n summary = Path(self.path, self.folder + \"_summary.txt\")\n with open(summary, \"a\") as summary_file:\n summary_file.write(f\"{text}\\n\")\n shutil.copyfile(summary, summary.parent.parent / summary.name)",
"def write():\n #with st.spinner(\"Loading Dashboard ...\"):\n #ast.shared.components.title_awesome(\"\")\n\n st.title('arXiv - Analytics')\n st.text(\"\")\n if st.checkbox('Most similar words in w2v'):\n user_input = st.text_input(\"Topic (please enter up to two keywords)\", 'Machine Learning')\n user_input = user_input.lower().replace(\" \", \"_\")\n st.text(\"\")\n number_of_similar_words = st.slider('Select a modulus', 3, 50)\n plot_similar_words(model, user_input, number_of_similar_words)\n st.pyplot()\n st.text(\"\")\n if st.checkbox('Word Cloud'):\n cluster = st.slider('Select a cluster', 0, 49)\n word_cloud_kmeans(cluster)\n st.pyplot()",
"def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")",
"def _print_summary_counts(\n self, out_file, categories, result_events_by_status, extra_rows):\n\n # Get max length for category printed name\n category_with_max_printed_name = max(\n categories, key=lambda x: len(x[1]))\n max_category_name_length = len(category_with_max_printed_name[1])\n\n # If we are provided with extra rows, consider these row name lengths.\n if extra_rows is not None:\n for row in extra_rows:\n name_length = len(row[0])\n if name_length > max_category_name_length:\n max_category_name_length = name_length\n\n self._print_banner(out_file, \"Test Result Summary\")\n\n # Prepend extra rows\n if extra_rows is not None:\n for row in extra_rows:\n extra_label = \"{}:\".format(row[0]).ljust(\n max_category_name_length + 1)\n out_file.write(\"{} {:4}\\n\".format(extra_label, row[1]))\n\n for category in categories:\n result_status_id = category[0]\n result_label = \"{}:\".format(category[1]).ljust(\n max_category_name_length + 1)\n count = len(result_events_by_status[result_status_id])\n out_file.write(\"{} {:4}\\n\".format(\n result_label,\n count))",
"def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')",
"def app():\n articles_list = get_popular_articles()\n authors_list = get_popular_authors()\n rates_list = get_days_rate()\n file = open('results.txt', 'w+')\n file.write('Most Popular Articles\\n')\n file.write('%s\\n' % articles_list)\n file.write('\\n')\n file.write('Most Popular Authors\\n')\n file.write('%s\\n' % authors_list)\n file.write('\\n')\n file.write('Days with error rate > 1%\\n')\n file.write('%s\\n' % rates_list)",
"def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()",
"def save_topic_terms(self, topics, output_path):\n topic_terms_df = self.get_topic_terms_df(topics)\n topic_terms_df.to_string(output_path, index=False)",
"def generate_folder_summaries(final_dictionary):\n for cat in final_dictionary:\n if len(cat[1]) == 0:\n pass\n else:\n category_name = cat[0]\n summary_path = \"./%s/summary.txt\" % category_name\n with open(summary_path, 'w') as sumry:\n for match_file in cat[1]:\n sumry.write(match_file[0] + '\\n')",
"def write_result(file, result):\n name = result[0]\n dataset = result[1]\n file.write(name + \"\\n\")\n for data, values in dataset.items():\n file.write(\"%s %.3f %.3f \\n\" %(data, np.mean(values),np.std(values)))"
] | [
"0.7065068",
"0.67121065",
"0.667491",
"0.66702765",
"0.6493708",
"0.64164627",
"0.63807386",
"0.6348919",
"0.62307876",
"0.62293154",
"0.61001325",
"0.6076558",
"0.607312",
"0.60509425",
"0.60321623",
"0.6012794",
"0.59988433",
"0.595068",
"0.59492254",
"0.59465146",
"0.5946238",
"0.5924368",
"0.5921186",
"0.59210306",
"0.59097713",
"0.5897719",
"0.5897126",
"0.5810016",
"0.5798102",
"0.57899916"
] | 0.750176 | 0 |
Analyzes the urls in the file with urls to find matches | def analyze_urls(filename, topic):
# Initialize an empty list. Note that I store my urls and references
# in a sort of strange way. Each element in result_list is a list of two
# elements, the first element being the url, and the second element
# being a list of all the references to the url
result_list = []
# Using the with...as construct to open the file in read mode
with open(filename, "r", encoding="utf-8") as files:
# Iterate over each line (each is a url)
for line in files:
# Use the try ... except construct
try:
# Try to open each url
with urllib.request.urlopen(line) as url_file:
# Read the page
page = url_file.read()
# Decode the page
decoded_page = page.decode("UTF-8")
# Regex expression to find the places which open
# with a > then have some stuff, then the topic, then
# close with a <
pattern = fr">[^<]*\b{topic}\b.*?<"
# Use the findall method from re to find all of the
# occurrences of pattern in decoded_page as a list
# The flags are IGNORECASE and DOTALL
my_list = re.findall(pattern, decoded_page,
re.IGNORECASE | re.DOTALL)
# If my_list is not empty
if my_list:
# Slice off the the closing and opening angle
# brackets using a list comprehension
new_list = [word[1:-1] for word in my_list]
# Append a new list of two elements to result_list,
# where the first element of the list is the url,
# and the second element of the list is the list of
# references
result_list.append([line, new_list])
# One possible error is the urllib.error.URLError
except urllib.error.URLError as url_err: # Catch the error
# Print a message, url, and the error
print("Error opening url:", line, url_err)
# Another possible error is the UnicodeDecodeError
except UnicodeDecodeError as dec_err: # Catch the error
# Print a message, and url
print("Error decoding url:", line)
# Print the error
print(dec_err)
# Except all other errors
except:
pass
# Return the result_list
return result_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_from_file():\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\"",
"def read_urls(filename):\n # +++your code here+++\n result = []\n if not path_exists(filename):\n print 'Path ' + filename + ' doesn\\'t exist!'\n sys.exit(1)\n \n # get base url from the filename\n match = re.search(r'\\S*_(\\S*)', filename)\n host = 'http://' + match.group(1)\n \n # read file for urls\n file = open(filename, 'rU')\n for line in file:\n match = re.search(r'\\S*puzzle\\S*.jpg', line)\n if match:\n result.append(host + match.group())\n file.close()\n # sort the list and remove duplicates (-> set)\n return sorted(set(result), key=sortedFn)\n #return sorted(set(result))",
"def read_urls(filename):\n \n urls = []\n with open(filename, 'r') as f:\n for line in f:\n if 'puzzle' in line:\n match = re.search(r'GET\\s(.*)HTTP', line)\n url = match.group(1)\n urls.append(url.strip())\n sorted_urls = sorted(set(urls))\n for url in sorted_urls:\n print (url[-8:-4])\n return sorted_urls",
"def process_textfile(inf):\n list_of_urls_to_check = [line.rstrip() for line in inf.readlines()]\n return list_of_urls_to_check",
"def dorkScanner():\n pysearch.PySearch()\n openfile = open(\"sites.txt\", 'r')\n urls = openfile.read()\n openfile.close()\n return urls",
"def read_urls(filename):\n # +++your code here+++\n\n res=utility(filename)\n for i in res:\n \tprint i",
"def get_urls(links):\n\n temp_list=[]\n url_list=[]\n temp_list2=[]\n #Open the file where the url's are saved and copy the tuple values into an empty list\n z=open('dbdocs.txt','r')\n for line in z:\n temp_list.append(line)\n #print temp_list\n for x in temp_list:\n index=x.find(',')\n if index==-1:\n y=x.split(\" \",1)\n key=int(y[0])\n val=str(x[1]).replace('\\n','')\n url_list.append((key,val))\n else:\n #find the tab seperator between the key and the url, and\n #split them, in order to put in a list\n key=x[0:index-1]\n #print key\n value=str(x[index+3:len(x)-1])\n #print value\n temp_list2.append((int(key),value))\n #Find the url's of the links where the word was found\n for k in links:\n for i,j in temp_list2:\n #print j\n if i==k:\n url_list.append((i,j))\n break\n #print len(url_list)\n #print len(links)\n z.close()\n return url_list",
"def read_urls(filename):\n # Searches the file for any urls containing \"puzzle\", removing duplicates\n # and then sorting them by the word before .jpg\n with open(filename) as f:\n urls = set(re.split(r'(\\S+)', f.read()))\n urls = filter(lambda url: \"puzzle\" in url, urls)\n server = re.split('_', filename)[1]\n for i, url in enumerate(urls):\n urls[i] = 'https://' + server + '/' + url\n return sorted(urls, key=lambda x: re.findall(r'(\\w+).jpg', x))",
"def extract_urls_from_file(f, all_abns, links_existed):\n content = open(CURL_OUTPUT + f).read()\n soup = BeautifulSoup(content)\n\n fh = open(ALL_LINKS + 'all_links.txt', 'a')\n\n cnt = 0\n all_rows = soup.find_all('tr', {'class': 'rgRow'})\n for row in all_rows:\n all_cells = row.find_all('td')\n abn = all_cells[0].text\n if (abn in all_abns):\n link = all_cells[1].findChildren('a')[0]['href']\n if not link in links_existed:\n print(link)\n download_page(link, f, cnt)\n fh.write(link + '\\n')\n cnt = cnt + 1\n\n fh.close()",
"def read_urls(filename):\n with open(filename, 'r') as f:\n line = f.readline()\n pattern = \"GET\" + \"(.+?)\"+ \"jpg\"\n result = []\n\n while len(line) > 0:\n end_point = re.search(pattern, line)\n if end_point != None and end_point.group(0)[4:] not in result:\n if \"no_picture\" not in end_point.group(0)[4:]:\n result.append(end_point.group(0)[4:])\n line = f.readline()\n return sorted(result, key = lambda x: x.split(\"/\")[-1].split(\"-\")[-1])",
"def load_links(self) -> Tuple[List[str], List[str]]:\n\n with open(URL_FILE, 'r') as txt_file:\n lines = txt_file.read().split()\n\n urls = []\n for line in lines:\n urls.append(line.split(',')[0])\n \n return lines, urls",
"def process_links():\n from pymongo import Connection\n conn = Connection()\n db = conn['mchs']\n# db.drop_collection('svodki')\n coll = db['svodki']\n coll.ensure_index(\"url\")\n f = open('alllinks.csv', 'r')\n for l in f:\n parts = l.strip().split('\\t')\n if len(parts) < 4: continue\n year, month, day, url = parts\n o = coll.find_one({'url' : url})\n if o is not None: \n print url, 'passed'\n continue\n u = urllib2.urlopen(url)\n data = u.read()\n u.close()\n data = data.decode('cp1251')\n record = {'year' : int(year), 'month' : int(month), 'day' : int(day), 'url' : url, 'text' : data.encode('utf8')}\n coll.save(record)\n # MCHS site is badly designed and it could block us if we will download pages too often\n time.sleep(5)\n print url, 'processed'",
"def match_url(self, url):\n pass",
"def get_urls(inputfiles):\n urls = []\n scheme_rgx = re.compile(r'^https?://')\n for ifile in inputfiles:\n urls.append(ifile.read().splitlines())\n urls = set([n for l in urls for n in l])\n urls = list(filter(None, urls))\n for i in range(len(urls)):\n if not scheme_rgx.match(urls[i]):\n urls[i] = 'http://' + urls[i]\n return urls",
"def scan_links_from_url(url):\n\n\t#Get the url\n\thtml_io = StringIO.StringIO()\n\n\tcurl = pycurl.Curl()\n\tcurl.setopt(pycurl.URL, str(url))\n\tcurl.setopt(pycurl.WRITEFUNCTION, html_io.write)\n\tcurl.perform()\n\n\thtml = html_io.getvalue()\n\n\thtml_io.close()\n\tcurl.close()\n\n\t#Apply the regex expression and fetch all links from source\n\tregexp = re.compile(\"\"\"http\\:\\/\\/rapidshare\\.(?:com|de)\\/files\\/[\\d]*\\/.*?\\..*?[^\"\\s\\<\\>]*[^.,;'\">\\:\\s\\<\\>\\)\\]\\!]\"\"\")\n\n\treturn regexp.findall(html)",
"def monitor_urls():\n all_files = [f for f in listdir(\n settings.CSV_PATH) if isfile(join(settings.CSV_PATH, f))]\n if all_files:\n for url in get_url_data(settings.CSV_PATH + all_files[0]):\n check_status(url[0], url[1])",
"def find_target_urls(url_list):\n candidate_urls = []\n \n #iterate through urls\n for url in get_urls(get_clean_text(message_list)):\n #skip any urls from our 33mail mask domain\n if re.findall('33mail', url):\n pass\n #return everything else\n else:\n candidate_urls.append(url)\n return candidate_urls",
"def get_links_from_file( filename ):\n\twith open( filename, 'r') as f:\n\t\tfor url in f:\n\t\t\tyield url",
"def parse_links_regex(filename):\n # no idea why we use re...\n import re\n \n try:\n html = open(filename,\"r\")\n html_string = html.read()\n data = re.findall('<a.+href=[\\'|\\\"](.+)[\\'|\\\"].*?>(.+)</a>', html_string)\n\n dictionary = {}\n for (URL,link) in data:\n # handling the case where the link is already there.\n if link in dictionary:\n dictionary[link] += \" and \" + URL\n else:\n dictionary[link] = URL\n\n return dictionary\n \n\n except IOError:\n print (\"File {} does not exist\".format(filename))",
"def _search(self, log, progressbar):\n self._urls = []\n for filename in os.listdir(self._path):\n url = 'file:////' + filename\n self._urls.append(url)\n self._urls.sort()",
"def read_urls(filename, server_name='http://code.google.com/'):\n # Construct unique URLs from file as - http://code.google.com/<url from file>\n animal_list = []\n ordered_list = []\n src_file = open(filename, 'rU')\n for line in src_file :\n animal_path = re.search( 'GET\\s+/(.+jpg)', line )\n if animal_path is not None :\n if animal_path.group(1) not in animal_list :\n animal_list.append( animal_path.group(1) )\n ordered_list = sorted(animal_list,key=sort_img_name)\n # Used in in range loop to operate on ordered_list rather than shallow copy, e.g. for path in ordered_list\n for i in range(0, len(ordered_list), 1) :\n ordered_list[i] = server_name + ordered_list[i]\n return ordered_list",
"def run_linkfinder(self, text, urljoin_fnct):\n\n urls = set()\n # store the text in a separate file for Linkfinder\n tmp_filename_in = \"linkfinder_tmp_%d.in\" % random.randint(0, 2**32)\n with open(tmp_filename_in, \"w\") as f:\n f.write(text)\n\n # Run Linkfinder as subprocess and remove the input file thereafter\n linkfinder_out = \"\"\n try:\n linkfinder_out = subprocess.check_output([\"python3 LinkFinder/linkfinder.py -i \" +\n tmp_filename_in + \" -o cli 2>/dev/null\"], shell=True)\n linkfinder_out = linkfinder_out.decode()\n except subprocess.CalledProcessError:\n pass\n os.remove(tmp_filename_in)\n\n # process Linkfinder's output\n for line in linkfinder_out.split(\"\\n\"):\n if not line:\n continue\n line = line.strip()\n line = self.to_absolute_url(line, urljoin_fnct)\n # if configured, check if the discovered URL is valid and exists\n if self.config[\"check_linkfinder\"].lower() == \"true\":\n try:\n timeout = float(self.config[\"linkfinder_check_timeout\"])\n if str(requests.head(line, timeout=timeout).status_code) != \"404\":\n urls.add(line)\n except:\n pass\n else:\n urls.add(line)\n\n return urls",
"def find_urls(url):\n try:\n #sock = urllib2.urlopen(url)\n result = urlfetch.fetch(url)\n sock = result.content\n parser = URLParser()\n #print sock.read()\n parser.feed(sock.read())\n sock.close()\n parser.close()\n return parser.urls\n except: # This is to take care of links that are not valid.\n return []",
"def find(self):\n if self.start >= SMAX:\n return 4\n link = \"http://www.google.com/search?q={}&start={}\".format(self.qon[1], self.start)\n try:\n fobj = self.opener.open(link)\n except HTTPError:\n self.update(\"Google banned you.\", \"\")\n return 3\n except timeout:\n self.update(\"Timed out or Google banned you.\", \"\")\n return 3\n else:\n data = fobj.read() # google's source\n fobj.close()\n # find a relevant closest position to the link\n index1 = data.find(self.first)\n if index1 == -1: # no results in page or modified pattern\n return 1 # invalid source\n self.start += 1 # now do the increment\n index1 += len(self.first)\n index2 = data.find(self.second, index1)\n url = data[index1:index2]\n # edit url\n newurl = \"\"\n i = 0\n length = len(url)\n while i < length:\n if url[i] == \"%\":\n char = chr(int(url[i + 1] + url[i + 2], 16))\n i += 2\n else:\n char = url[i]\n newurl += char\n i += 1\n url = newurl\n # process it\n if url in self.seen: # link already visited\n return 2\n self.seen.add(url)\n upo = urlparse(url)\n self.update(\"Looking in %s...\" % upo.netloc, \"\")\n try:\n fobj = self.opener.open(url)\n except URLError:\n self.update(\"Invalid link.\", \"\")\n return 2\n except timeout:\n self.update(\"Timed out.\", \"\")\n return 3\n else:\n self.data = fobj.read()\n self.dataLen = len(self.data)\n fobj.close()\n return 0 # all fine",
"def parse_url_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'urls' in f:\n URL_FILES.append(f)\n PY_FILES.remove(f)",
"def fuzzySearch(urls, files, n, cutoff):\n mapping = {}\n for url in urls:\n matches = difflib.get_close_matches(url, files, n, cutoff)\n if matches:\n mapping[url] = matches\n return mapping",
"def fetch_web_cont(self):\n with open(self.input_file) as input_file:\n data = yaml.load(input_file, yaml.FullLoader)\n url_list = data.get(self.url_access)\n regex_list = data.get(self.regex_access)\n\n print('Fetching data:')\n\n for url in url_list:\n # This restores the same behavior as before.\n # Enabling certificate verification by default for stdlib http clients\n context = ssl._create_unverified_context()\n run_time = datetime.now().strftime(\"Date: %d-%m-%Y Time: %I:%M:%S:%f_%p\")\n start = time.perf_counter()\n web_resp = request.urlopen(url, context=context)\n respData = web_resp.read()\n resp_time = '%0.2f s' % (time.perf_counter() - start)\n\n for regex in regex_list:\n contents = re.findall(regex, str(respData))\n with open(self.output_file, 'a') as file:\n if not contents:\n print(run_time, ' | URL: ', url, '| content not found with this regex: ', regex,\n file=file)\n\n else:\n for content in contents:\n print(run_time, ' | URL: ', url, ' | Response Time: ', resp_time,\n url, ' | Contents: ', content, file=file)\n \n with open(self.output_file, 'a') as file:\n \n print('\\n#################################\\n', file=file)",
"def read_urls(file):\r\n with open(file, \"r+\") as url_file:\r\n url_list = url_file.readlines()\r\n return url_list",
"def extract_URLs(self, input_file_name):\n file = open(input_file_name, 'r')\n lines = []\n for line in file.readlines():\n # Don't add empty lines.\n if len(line.strip()) > 0:\n lines.append(line.strip())\n return lines",
"def crawl_all_by_file(file_path, verbose):\n holcrawl.imdb_crawl.crawl_by_file(file_path, verbose)\n holcrawl.metacritic_crawl.crawl_by_file(file_path, verbose)"
] | [
"0.7247138",
"0.7022",
"0.66191137",
"0.65766627",
"0.6553886",
"0.6445074",
"0.63741",
"0.63335973",
"0.63231295",
"0.6276514",
"0.6263442",
"0.6192583",
"0.61920995",
"0.6159629",
"0.61356217",
"0.61198586",
"0.6062683",
"0.6062513",
"0.60128546",
"0.5975132",
"0.5968954",
"0.5932175",
"0.5928068",
"0.59226257",
"0.5908619",
"0.59068686",
"0.590016",
"0.5897836",
"0.58473647",
"0.58459306"
] | 0.7395171 | 0 |
Function adds two numbers and converts to binary. | def add_binary(a, b):
return bin(a + b)[2:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_binary(a,b):\n max_len = max(len(a),len(b))\n a = a + (max_len-len(a))*'0'\n b = b + (max_len-len(b))*'0'\n result = \"\" \n elde = 0 \n for i in range(max_len):\n a_i,b_i = int(a[i]),int(b[i])\n if (a_i + b_i+elde) == 2:\n elde = 1\n t = 0\n else:\n t = (a_i + b_i+elde)%2\n result += str(t)\n if (i == max_len-1) and elde:\n result+=\"1\"\n return result[::-1]",
"def addBinary(self, a: str, b: str) -> str:\n return self.bit_manipulation(a, b)",
"def add_bitwise(b1,b2):\n \n \n \n \n \n if b1 == \"\":\n \n return b2\n \n elif b2 == \"\":\n \n return b1\n \n elif b1 == \"\" and b2 == \"\":\n \n return \"\"\n \n elif b1 == \"1\" and b2 == \"1\":\n \n return \"10\"\n \n else: \n \n rest = add_bitwise(b1[:-1],b2[:-1])\n \n if len(b1) == len(b2): \n \n if b1[-1] == \"0\" and b2[-1] == \"0\":\n \n return rest + \"0\"\n \n elif b1[-1] == \"1\" and b2[-1] == \"0\":\n \n return rest + \"1\"\n \n elif b1[-1] == \"0\" and b2[-1] == \"1\":\n \n return rest + \"1\"\n \n \n elif b1[-1] == \"1\" and b2[-1] == \"1\" and len(b1) != 1 and len(b2) != 1:\n \n rest = add_bitwise(b1[:-1],b2[:-1])\n \n if rest == \"10\":\n \n rest = \"11\" \n \n elif rest == \"\":\n \n rest = \"10\"\n \n elif rest == \"1\":\n \n rest = \"10\"\n \n else: \n \n return \"1\" + rest \n \n return rest + \"0\"\n \n \n elif len(b1) > len(b2):\n \n b2_with_zeroes = \"0\"*(len(b1) - len(b2)) + b2\n \n return add_bitwise(b1,b2_with_zeroes) \n \n \n elif len(b2) > len(b1):\n \n b1_with_zeroes = \"0\"*(len(b2) - len(b1)) + b1\n \n return add_bitwise(b1_with_zeroes,b2)",
"def add_str_binary(num_one, num_two):\n\n max_len = max(len(num_one), len(num_two))\n v_num_one = list(map(int, list(num_one.zfill(max_len))))\n v_num_two = list(map(int, list(num_two.zfill(max_len))))\n sum_bits = [None] * max_len\n carry = 0\n\n for r_idx in reversed(range(max_len)):\n result = v_num_one[r_idx] ^ v_num_two[r_idx]\n if carry and not result:\n result = 1\n carry = 0\n\n if result and carry:\n result = 0\n else:\n carry = v_num_one[r_idx] & v_num_two[r_idx]\n\n sum_bits[r_idx] = result\n\n if carry:\n sum_bits.insert(0, carry)\n\n str_result = ''.join(map(str, sum_bits))\n assert str_result == bin(int(num_one, 2) + int(num_two, 2))[2:]\n return str_result",
"def convert_to_binary(num):\n return '{0:b}'.format(num)",
"def _add(a, b):\n\n # Todo: What if numbers have bigger length than 8\n a = _I2B(a, fixed_length=8)\n b = _I2B(b, fixed_length=8)\n return _B2I([i ^ j for i, j in zip(a, b)])",
"def binary(num):\n binary = \"\"\n \n while num > 0:\n bit = num%2\n binary = str(bit) + binary # on rajoute le bit au nombre en binaire mais à la fin parce que comme ça ça inverse l'ordre\n num = num//2\n\n return binary",
"def decimal_binary(num):\n\treturn \"{:08b}\".format(num)",
"def add(num1,num2):\n if(num2==0):\n return num1\n return add((num1^num2),(num1&num2)<<1)",
"def bit_manipulation(self, a: str, b: str) -> str:\n x, y = int(a, 2), int(b, 2)\n while y:\n answer = x ^ y\n carry = (x & y) << 1\n x, y = answer, carry\n return bin(x)[2:]",
"def binary_add(x, y):\n # Makes sure that the arrays have the same length.\n # Could be changed to padding on extra zeroes, if so desired.\n assert(len(x) == len(y))\n\n z = [0] * (len(x)+1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n # Makes sure that the array is a binary array.\n # Strictly speaking, not necessary. But nice.\n if i not in [0, 1]: return False\n if j not in [0, 1]: return False\n\n # if i and j are both 1 \n if i and j:\n z[a] += 0\n z[a+1] += 1\n # if only one of them is 1\n elif i or j:\n z[a] += 1\n # if they're both 0\n else: pass\n\n if z[a] == 2:\n z[a+1] += 1\n z[a] -= 2\n \n return z[::-1]",
"def num_to_binary(n):\n if n == 0:\n return ''\n elif n % 2 == 1:\n return num_to_binary(n // 2) + '1'\n else:\n return num_to_binary(n // 2) + '0'",
"def add_wo_carry(n1, n2):\n l1 = [int(x) for x in str(n1)]\n l2 = [int(x) for x in str(n2)] \n res1 = map(operator.add, l1, l2)\n res2 = [str(x)[-1] for x in res1]\n return \"\".join(res2)",
"def int2bin(n: int) -> str:",
"def makeBinary(self):\r\n\t\tls = 5.12 #limite superior\r\n\t\tli = -5.12 #limite inferior\r\n\t\tt = 14 # total de binarios\r\n\t\t\r\n\t\tcadena_bits = \"\"\r\n\t\tfor i in self.values:\r\n\t\t\tentero = (int) ( ( ( i - li ) * ( 2 ** t ) ) / ( ls - li ) )\r\n\t\t\t#print entero\r\n\t\t\tcadena_bits += \"{0:b}\".format(entero).zfill(14)\r\n\t\t\t\r\n\t\tself.cadenaBits = cadena_bits\r\n\t\treturn cadena_bits",
"def add(x, y):\n carry = \"\"\n result = \"\"\n carry = \"0\"\n for i in range(len(x) - 1, -1, -1):\n a = carry[0]\n b = x[i]\n c = y[i]\n\n if a == b and b == c and c == '0':\n result = \"0\" + result\n carry = \"0\"\n elif a == b and b == c and c == '1':\n result = \"1\" + result\n carry = \"1\"\n else:\n if a == '1' and b == c and c == '0':\n result = \"1\" + result\n carry = \"0\"\n elif a == '0' and b == '1' and c == '0':\n result = \"1\" + result\n carry = \"0\"\n elif a == '0' and b == '0' and c == '1':\n result = \"1\" + result\n carry = \"0\"\n elif a == '0' and b == '1' and c == '1':\n result = \"0\" + result\n carry = \"1\"\n elif a == '1' and b == '0' and c == '1':\n result = \"0\" + result\n carry = \"1\"\n elif a == '1' and b == '1' and c == '0':\n result = \"0\" + result\n carry = '1'\n return result",
"def intToBinary(x, N):\n return (\"{0:0\" + str(N) + \"b}\").format(x)",
"def sum(self, a, b):\n return int(a) + int(b)",
"def increment(b): \n if b == 11111111:\n return 00000000\n else:\n b = bin_to_dec(b)\n b = b + 1\n res = dec_to_bin (b)\n if len(res) == 8:\n return res\n else:\n c = 8 - len(res)\n return c*'0' + res",
"def bitwise_and(b1,b2):\n \n if b1 == \"\" and b2 == \"\":\n \n return \"\"\n \n elif b1 == \"\":\n \n return \"0\"*len(b2)\n \n elif b2 == \"\":\n \n return \"0\"*len(b1)\n \n \n else: \n \n rest = bitwise_and(b1[:-1],b2[:-1])\n \n if len(b1) == len(b2):\n \n if b1[-1] == \"0\" and b2[-1] == \"0\":\n \n return rest + \"0\"\n \n elif b1[-1] == \"1\" and b2[-1] == \"1\":\n \n return rest + \"1\"\n \n else: \n \n return rest + \"0\"\n \n elif len(b1) > len(b2):\n \n b2_with_zeroes = \"0\"*(len(b1) - len(b2)) + b2\n \n return bitwise_and(b1,b2_with_zeroes) \n \n \n elif len(b2) > len(b1):\n \n b1_with_zeroes = \"0\"*(len(b2) - len(b1)) + b1\n \n return bitwise_and(b1_with_zeroes,b2)",
"def addition(self, first_value, second_value):\n return bytes(first_value + second_value)",
"def int2bin(i):\n if i == 0: return \"0\"\n s = ''\n while i:\n if i & 1 == 1:\n s = \"1\" + s\n else:\n s = \"0\" + s\n i /= 2\n return s",
"def dec_to_bin(num):\n\n count = 0\n out = \"\"\n\n if num == 0 or num == 1:\n return num\n\n while (num > 1):\n rem = num % 2\n num = num / 2\n out = str(rem * 10**count) + out\n count += 1\n \n out = str(num) + out \n return out",
"def __add__(self, other: 'SInt') -> 'SInt':\r\n # Recoding the addition\r\n if type(other) != self.__class__ or len(self) != len(other):\r\n raise TypeError(\"Wrong type or length for other\")\r\n retenue = [0 for i in range(len(self) + 1)]\r\n new_bin = ''\r\n for i in range(len(self)):\r\n k = int(self.binaire[-(i + 1)]) + int(other.binaire[-(i + 1)]) + retenue[i]\r\n new_bin = ['0', '1', '0', '1'][k] + new_bin\r\n retenue[i + 1] = 1 if k > 1 else 0\r\n if self.signe == other.signe and retenue[-1] != retenue[-2]:\r\n raise OverflowError(\"The sum is over the bytes available\")\r\n H = self.__class__(self.nbBytes)\r\n H.binaire = new_bin\r\n return H",
"def to_binary_string(x):\n return \"{0:b}\".format(x)",
"def decimalToBinary(num):\r\n if num > 1:\r\n decimalToBinary(num // 2)\r\n print(num % 2, end='')",
"def flexibase_add(str1, str2, base1, base2):\n result = int_to_base(tmp, base1)\n return result",
"def int_to_binary(x, n=64):\n return format(x, 'b').zfill(n)",
"def decimal_to_binary(num):\n binary_res = \"\"\n while num >= 1:\n binary_char = num % BINARY_BASE\n num = math.floor(num / BINARY_BASE)\n binary_res += str(binary_char)\n if len(binary_res) < REGISTER_SIZE:\n binary_res += \"0\" * (REGISTER_SIZE - len(binary_res))\n return binary_res[::-1]",
"def _bin_backport(x):\n chars = []\n for n in range(7, -1, -1):\n y = x - 2**n\n if y >= 0:\n chars.append('1')\n x = y\n else:\n chars.append('0')\n return ''.join(chars)"
] | [
"0.80124867",
"0.7827019",
"0.76427984",
"0.7577933",
"0.7251505",
"0.70949155",
"0.70785266",
"0.69601315",
"0.6959739",
"0.69178003",
"0.6867724",
"0.6841246",
"0.6840538",
"0.68314004",
"0.67125916",
"0.67007583",
"0.6699635",
"0.669284",
"0.66489726",
"0.6640531",
"0.66307104",
"0.6622231",
"0.6615026",
"0.6599684",
"0.65880096",
"0.65812963",
"0.65668666",
"0.6545632",
"0.6542143",
"0.6531059"
] | 0.8312693 | 0 |
Initializes Hydra only if it is not already initialized. | def _ensure_hydra_initialized(
config_module: str, job_name: str = "fseval_hydra_utils"
) -> None:
gh = GlobalHydra()
if not gh.is_initialized():
initialize_config_module(
config_module=config_module, job_name=job_name, version_base="1.1"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize():\n manager.initialize()\n logs.exit_great_success()",
"def do_init(self):\n\n pass",
"def __init__(self):\n self._initialized = False\n self.init()",
"def __init__(self):\n self._initialized = False\n self.init()",
"def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass",
"def initialize():\n dislin.disini()",
"def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED",
"def _real_initialize(self):\n pass",
"def _initialize(self):\n pass",
"def _initialize(self):\n pass",
"def _initialize(self):\n pass",
"def ensure_initialized(self):\n if not self._initialized:\n self.reload()",
"def initialize(self):\n if not self._ready:\n self._real_initialize()\n self._ready = True",
"def _lazy_init(self) -> None:\n # Initialize _is_root and setup streams. These steps would ideally\n # happen in __init__, but _is_root can only be determined after the\n # entire model hierarchy is setup, thus we run it lazily.\n if self._is_root is None:\n self._set_is_root()\n self._setup_output_hook_and_backward_opt_barrier_lists()\n\n if self._is_root and self.disable_reshard_on_root:\n # Don't free the full params for the outer-most (root) instance,\n # since those params will be needed immediately after for the\n # backward pass.\n self.reshard_after_forward = False",
"def __init__(self):\n self.initialized = False",
"def __init__(self):\n self.initialized = False",
"def init(self):\n return True",
"def init(self):\n self.is_init = True\n raise NotImplementedError",
"def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()",
"def _initialize(self):\n self.send_init_command()",
"def initialize():\n environment = Environment()\n environment.setup()",
"def init():\n pass",
"def init_host(self, host):\n LOG.debug(\"init_host\")",
"def _post_init(self):\n pass",
"def initialize(self, *args, **kwargs):\n self.initialized = True",
"def __init__(self):\n self.setup_called = False",
"def pre_init(self) -> None:\n self._check_and_set_network()\n self._check_and_apply_migrations()",
"def __init__(self):\r\n self.initialized = False",
"def __init__(self):\r\n self.initialized = False",
"def init():"
] | [
"0.6209268",
"0.61949795",
"0.6180044",
"0.6180044",
"0.61731255",
"0.6147392",
"0.61265355",
"0.6097938",
"0.6017801",
"0.6017801",
"0.6017801",
"0.59966564",
"0.5966444",
"0.59586257",
"0.5950315",
"0.5950315",
"0.5927798",
"0.5924597",
"0.5913495",
"0.58983785",
"0.58856213",
"0.58807755",
"0.5872287",
"0.5869598",
"0.5862944",
"0.58590394",
"0.5852261",
"0.58487797",
"0.58487797",
"0.5828151"
] | 0.71574265 | 0 |
Grabs the Hydra `ConfigLoader`. | def _get_config_loader(config_module: str) -> ConfigLoader:
_ensure_hydra_initialized(config_module)
gh = GlobalHydra()
cl = gh.config_loader()
return cl | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loader() -> ConfigLoader:\n return ConfigLoader.from_configuration_type(PackageType.AGENT)",
"def loader() -> ConfigLoader:\n return ConfigLoader.from_configuration_type(PackageType.AGENT)",
"def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))",
"def load_config(self):\n pass",
"def get_loader(self) -> BaseLoader:\n return FileSystemLoader(getattr(self, \"cwd\"))",
"def _ensure_hydra_initialized(\n config_module: str, job_name: str = \"fseval_hydra_utils\"\n) -> None:\n gh = GlobalHydra()\n if not gh.is_initialized():\n initialize_config_module(\n config_module=config_module, job_name=job_name, version_base=\"1.1\"\n )",
"def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))",
"def _loadExternalConfig():\n def cached_loadExternalConfig():\n import ConfigParser\n config = ConfigParser.ConfigParser()\n config.readfp(open('/etc/erp5.cfg'))\n return config \n\n cached_loadExternalConfig = CachingMethod(cached_loadExternalConfig,\n id='IntrospectionTool__loadExternalConfig',\n cache_factory='erp5_content_long')\n return cached_loadExternalConfig()",
"def setup_confighelper(self):\n self.cfghelper = cfgmodule.MCfgModule()\n self.cfghelper.load_configfiles(self.configname, self.get_pkgdirimp_config())",
"def load(loggingQueue, name): \n\tlogger = ThreadsafeLogger(loggingQueue, '{0}-{1}'.format(name, 'ConfigLoader'))\n\tthisConfig = {}\n\tconfigParser = configparser.ConfigParser()\n\n\tthisConfig = loadSecrets(thisConfig, logger, configParser)\n\tthisConfig = loadModule(thisConfig, logger, configParser)\n\treturn thisConfig",
"def __init__(self, load_config):\n super().__init__()\n self._load_config = load_config",
"def load(self, configs, container):\n pass;",
"def init_config() -> Config:\n ...",
"def loader(self):\n return self._loader",
"def loader(self):\n return self.loader_class()",
"def setup_config():\n global config\n config = modConfig.Config(cmdline.config)",
"def testLoadConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n self.assertEqual(\n os.path.basename(loader.taskHolders()[0].var('contextConfig')),\n 'config.hjson'\n )",
"def get_or_create_cli_config() -> Dict:\n try:\n return load_yaml(CLI_CONFIG_PATH)\n except FileNotFoundError:\n _init_cli_config()\n return load_yaml(CLI_CONFIG_PATH)",
"def load():\n global tinyConfig\n if not tinyConfig:\n tinyConfig = CmdArgs()\n return tinyConfig",
"def _loadconfig(self):\n\n # Get the Topology, from the topology layout file\n topo = {}\n with open(self._autoconfig_filename, \"r\") as stream:\n try:\n topo = yaml.load(stream)\n if \"metadata\" in topo:\n self._metadata = topo[\"metadata\"]\n except yaml.YAMLError as exc:\n raise RuntimeError(\n \"Couldn't read the Auto config file {}.\".format(\n self._autoconfig_filename, exc\n )\n )\n\n systemfile = self._rootdir + self._metadata[\"system_config_file\"]\n if self._clean is False and os.path.isfile(systemfile):\n with open(systemfile, \"r\") as sysstream:\n try:\n systopo = yaml.load(sysstream)\n if \"nodes\" in systopo:\n self._nodes = systopo[\"nodes\"]\n except yaml.YAMLError as sysexc:\n raise RuntimeError(\n \"Couldn't read the System config file {}.\".format(\n systemfile, sysexc\n )\n )\n else:\n # Get the nodes from Auto Config\n if \"nodes\" in topo:\n self._nodes = topo[\"nodes\"]\n\n # Set the root directory in all the nodes\n for i in self._nodes.items():\n node = i[1]\n node[\"rootdir\"] = self._rootdir",
"def confeditor_loader():\n return MQTTSubscribeDriverConfEditor()",
"def load_config(path_: str) -> Any:\n path = find_system(path_).path\n if path is None:\n raise ValueError(\"Can't find path {path_!r}\".format(path_=path_))\n loader: Callable[[Any], Any]\n if path.endswith('.yaml'):\n loader = yaml.safe_load\n elif path.endswith('.json'):\n loader = json.load\n else:\n raise ValueError('No known loader for {0}'.format(path))\n with open(path) as file_object:\n return loader(file_object)",
"def config():\n return _config",
"def loadConfigModule(name, options, tags):\n if isinstance(name, str):\n LOG.info('Loading %s', name)\n d = {}\n module = __import__(name[:-3], d, d)\n else:\n module = reload(name)\n onload = module.__dict__.get('onload')\n if callable(onload):\n try:\n onload(options, tags)\n except:\n LOG.fatal('Exception while loading %s', name)\n raise\n return module",
"def get_config_handler():\n global config_handler\n # This is to ensure that incase the initial setup\n # of config handler failed,\n return config_handler",
"def load_config():\n path = os.environ.get('WORKER_CONFIG')\n if not path:\n path = _get_default_config_path()\n\n mod_name, file_ext = os.path.splitext(os.path.split(path)[-1])\n config = imp.load_source(mod_name, path)\n return config",
"def config():",
"def config():",
"def get_config():\n return _config",
"def config():\n return Config()"
] | [
"0.67302746",
"0.67302746",
"0.63855314",
"0.6287435",
"0.61643356",
"0.5959769",
"0.59583026",
"0.5888783",
"0.58228844",
"0.5808321",
"0.57732725",
"0.5772872",
"0.5714202",
"0.56751454",
"0.56719464",
"0.56634206",
"0.5655696",
"0.5647409",
"0.56410396",
"0.56160206",
"0.5604981",
"0.5535522",
"0.55298793",
"0.5529378",
"0.5526169",
"0.55233973",
"0.55127245",
"0.55127245",
"0.5504043",
"0.5503755"
] | 0.8083242 | 0 |
This function takes the routing vector and the neighbours routing vector and updates the current vector. If there is no update the second value is returned as false, else second value is True vec1 = vector to update vec2 = vector which is received dist = distance between them | def update_table(vec1, vec2, dist):
flag = False
for router_to in range(len(vec1)):
if vec1[router_to] > vec2[router_to] + dist:
vec1[router_to] = vec2[router_to] + dist
flag = True
return vec1, flag | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def route_update(self, neighbor, dv_list):\n neighbor.is_killed = False\n neighbor.kill_timer = time.time()\n neighbor.dv_update(dv_list)\n # Iterate to see if new node is included in the graph.\n for name in neighbor.distance_vector:\n if name not in self.distance_vector:\n self.distance_vector[name] = Router.OtherRouter(float('Inf'), None)\n if self.update_dv():\n for name in self.neighbors:\n self.neighbors[name].update_ready = True\n self.neighbors[name].send_timer = time.time()",
"def update_dv(self):\n is_changed = False\n for name in self.distance_vector:\n smallest = float('Inf')\n smallest_neighbor = None\n for neighbor_name in self.neighbors:\n if self.neighbors[neighbor_name].is_killed:\n weight = float('Inf')\n else:\n weight = self.neighbors[neighbor_name].weight\n if name in self.neighbors[neighbor_name].distance_vector:\n candidate = self.neighbors[neighbor_name].distance_vector[name]\n candidate += weight\n if smallest > candidate:\n smallest = candidate\n smallest_neighbor = neighbor_name\n if self.distance_vector[name].cost != smallest and name != self.name_str:\n self.distance_vector[name].cost = smallest\n self.distance_vector[name].link = smallest_neighbor\n is_changed = True\n return is_changed",
"def update_route(self, vrpdata):\n self.distance = 0\n self.quantity = 0\n self.tourValid = False\n lastc = 0 # first entry is depot\n for c in self.route:\n self.distance += vrpdata.DistMatrix[lastc][c]\n self.quantity += vrpdata.CustDem[c]\n lastc = c\n self.distance += vrpdata.DistMatrix[lastc][0] # last entry is depot\n self.tourValid = (self.quantity <= vrpdata.MaxVehCap)",
"def savings2routes(self,r1,r2):\n newRoute = VRP_Route(r1.route+r2.route)\n newRoute.update_route(self.vrpdata) # compute distance, quantity for newRoute, check whether valid\n if newRoute.tourValid:\n return r1.distance + r2.distance - newRoute.distance\n return -1",
"def send_update(self, neighbor):\n message = 'ROUTE UPDATE'\n source = ':'.join([self.name[0], str(self.name[1])])\n dv = []\n for others in self.distance_vector:\n others_sep = others.split(':')\n dv.append(','.join([others_sep[0], others_sep[1], str(self.distance_vector[others].cost)]))\n dv = '\\n'.join(dv)\n to_send = '\\n'.join([message, source, dv])\n neighbor.sok.sendto(to_send, (neighbor.addr, neighbor.port))\n neighbor.send_timer = time.time()\n neighbor.update_ready = False",
"def distance_of_x_to_y(start_node, end_node):\n\n # adding the base router's d_vec to n_d_vec\n # will simplify the problem and we can then\n # remove the first conditional statement\n\n # when a router comes back to its parent\n # its distance will ultimately increase\n # and hence that branch will be ignored\n\n global DATA\n\n # neighbor ids will now change for every router\n all_neighbor_ids = [\n neighbor[0]\n for neighbor in DATA[\"n_d_vec\"][start_node]\n ]\n # if start_node is not present at that moment\n # in n_d-vec then an exception (Type Error)\n # will be raised after which we need to\n # return math.inf\n try:\n if end_node in all_neighbor_ids:\n return [\n every_neighbor[1]\n for every_neighbor in DATA[\"n_d_vec\"][start_node]\n if end_node is every_neighbor[0]\n ][0]\n else:\n # we need to handle going back\n # we can pass an initial router\n # from which the algorithm has\n # started and hence we can avoid\n # going back\n\n # we may do some memoization here\n # and hence don't do reevaluation every time\n return min(\n [\n distance_of_x_to_y(start_node, neighbor) +\n distance_of_x_to_y(neighbor, end_node)\n for neighbor in all_neighbor_ids\n ]\n )\n except TypeError as node_err:\n with PRINT_LOCK:\n print(\"the start node is node is not present\\\n at this moment in the n_d_vec \\n{}\"\\\n .format(node_err)\\\n )\n return math.inf",
"def update_neighbours(self, iteration, iterations, input_vector, bmu):\n\n t = iteration / iterations\n learning_rate = self.learning_rate(t)\n for node in self.codebook:\n influence = self.codebook.neighbourhood(node, bmu, t)\n node.update(learning_rate, influence, input_vector, bmu)",
"def __direction(self, vector, hits, coordinate):\n try:\n assert hits is not None\n # Button at the end to the vector\n next_x = coordinate[0]+vector[0]\n next_y = coordinate[1]+vector[1]\n next_coordinate = [next_x,next_y]\n # Check token and save it as new\n if self._board_snapshot[next_y][next_x] == self._player:\n # Add hit and continue if next token is of the players\n return self.__direction(vector, hits+1 ,next_coordinate)\n else:\n return hits\n # Out of bounds\n except IndexError:\n return hits",
"def updateGraphByEuclideanDistance(self, graph, neighborDistance):\r\n graph.adjacencyMatrix = np.matrix(np.zeros(graph.adjacencyMatrix.shape))\r\n for a1 in range(self.agentNum):\r\n for a2 in range(a1+1, self.agentNum):\r\n if np.linalg.norm(self.agentPos[:,a1] - self.agentPos[:,a2]) <= neighborDistance:\r\n graph.adjacencyMatrix[a1,a2] = 1\r\n graph.adjacencyMatrix[a2,a1] = 1\r\n else:\r\n graph.adjacencyMatrix[a1,a2] = 0\r\n graph.adjacencyMatrix[a2,a1] = 0\r\n \r\n assert (graph.adjacencyMatrix == graph.adjacencyMatrix.T).all()",
"def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path",
"def __check_direction(self, vector, coordinate):\n inverse_vector = -vector[0], -vector[1]\n # Calculate hits to direction\n hits = self.__direction(vector,1,coordinate)\n if hits == 5:\n return True\n # After reaching the end, add hits towards the opposite direction\n hits = self.__direction(inverse_vector,hits,coordinate)\n if hits == 5:\n return True",
"def move(self,dvec):\n if (dvec.x + dvec.y) == 0:\n return 1\n if not ((dvec.x * dvec.y) == 0):\n print(\"vector cannot contain both x and y componenets\")\n return 0\n dvec = dvec + self.move_step(dvec)\n return self.move(dvec)",
"def transition(s, direction):\n new_pos = [sum(x) for x in zip(s, direction)] # sum up every element at same index of two lists\n if hit_wall(new_pos):\n return s\n else:\n return new_pos",
"def computeForces(self, neighbors=[]): #computing forces to drive the agents and avoid collisions \n if not self.atGoal:\n if self.entry_state % 2 == 0 and len(self.entrancex) > 0 and self.id != 4 : #checks if assigned curve is entry and switches to state 1 to follow entry bezier curve\n time2=0.5 # time used to calculate driving force \n self.local_goal = [self.entrancex[0], self.entrancey[0]] #assigning waypoint as goal\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2))) #calculating direction vector\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez) #driving force\n self.entrancex = np.delete(self.entrancex,0) #eliminating the used waypoints from the list \n self.entrancey = np.delete(self.entrancey,0) #eliminating the used waypoints from the list \n \n elif self.force_state == 1 and (abs(self.pos[0] - self.goal[0]) >400 or abs(self.pos[1] - self.goal[1]) >400): #checks if force-based navigation is assigned, switches to state 2\n self.F = (self.gvel-self.vel)/self.ksi #driving force\n for neighbor in neighbors:\n if neighbor.id != self.id: #and not neighbor.atGoal: \n distSq = (neighbor.pos-self.pos).dot(neighbor.pos-self.pos)\n #print(distSq, self.dhorSq)\n if distSq < self.dhorSq: # neighbor is inside the sensing radius\n tau = self.ttc(neighbor)\n #print(tau, self.timehor)\n if tau < self.timehor: # will the two agents collide in less than timehor?\n dir = self.pos + self.vel*tau - neighbor.pos - neighbor.vel*tau \n length = sqrt(dir.dot(dir))\n if length > 0:\n dir = dir/length # the direction of the force\n mag = (self.timehor - tau)/(tau + 1e-6) # the magnitude of the force\n self.F += mag*dir # add the force\n \n else: #state 3 - following the exit bezier curve\n time2=0.5 # time used to calculate driving force\n self.local_goal = [self.exitx[0], self.exity[0]]\n if abs(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos)))) >10: #to reach first point of exit curve from agents previous state position\n self.F = ((self.local_goal - self.pos)/(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos) )))*self.prefspeed)/self.ksi\n else:\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2)))\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez)\n #print(self.pos, self.local_goal)\n if len(self.exitx) > 1 :\n self.exitx = np.delete(self.exitx,0)\n self.exity = np.delete(self.exity,0)",
"def __add_neighbours(p1, p2, n1, n2, idx1, idx2, mapping=None, dist1=None,\n dist2=None, dist_tol=0, pairs=None):\n\n if mapping is None:\n mapping = np.zeros((p1.numnodes, p2.numnodes))\n\n for i in range(p1.numnodes):\n for j in range(p2.numnodes):\n weighted_freq, _ = compare_nodes(p1.nodes[i], p2.nodes[j])\n if weighted_freq > 0.0:\n mapping[i][j] = weighted_freq\n if dist1 is None:\n dist1 = distances(p1)\n dist1[p1.edges > 0] = p1.edges[p1.edges > 0]\n\n if dist2 is None:\n dist2 = distances(p2)\n dist2[p2.edges > 0] = p2.edges[p2.edges > 0]\n\n if pairs is None:\n pairs = []\n\n def is_compatible(pair1, pair2):\n if (pair1[0] == pair2[0]) or (pair1[1] == pair1[1]):\n return False\n if (p1.edges[pair1[0], pair2[0]] != 0) and \\\n (p2.edges[pair1[1], pair2[1]] != 0):\n dist_diff = math.fabs((p1.edges[pair1[0], pair2[0]] -\n p2.edges[pair1[1], pair2[1]]))\n if dist_diff <= dist_tol:\n return True\n else:\n return False\n else:\n return True\n\n for i1, i2 in zip(idx1, idx2):\n neighbours1 = []\n for node in reversed(np.where(p1.edges[i1, n1] > 0)[0]):\n neighbours1.append(n1[node])\n n1.remove(n1[node])\n\n neighbours2 = []\n for node in reversed(np.where(p2.edges[i2, n2] > 0)[0]):\n neighbours2.append(n2[node])\n n2.remove(n2[node])\n\n # find compatible neighbours\n for neigh1 in neighbours1:\n for neigh2 in neighbours2:\n if (mapping[neigh1, neigh2] > 0):\n dist_diff = np.abs(dist1[idx1, neigh1] -\n dist2[idx2, neigh2])\n\n # there is edge between nodes in at least one of phars\n connected = np.where(p1.edges[idx1, neigh1] +\n p2.edges[idx2, neigh2])\n max_cost = np.max(dist_diff[connected])\n if (max_cost <= dist_tol):\n pairs.append((neigh1, neigh2))\n\n score = np.sum(mapping[idx1, idx2])\n dist_diff = np.abs(dist1[idx1][:, idx1] - dist2[idx2][:, idx2])\n connected = np.where(p1.edges[idx1][:, idx1] + p2.edges[idx2][:, idx2])\n\n if len(connected[0]) > 0:\n assert np.max(dist_diff[connected]) <= dist_tol, \"cost too high\"\n cost = np.sum(dist_diff[connected]) / 2.0\n\n aln = [idx1[:], idx2[:]]\n\n for i, pair in enumerate(pairs):\n compatible = [p for p in pairs[i+1:] if is_compatible(pair, p)]\n\n s, c, (aln1, aln2) = __add_neighbours(p1, p2, n1[:], n2[:],\n idx1+[pair[0]], idx2+[pair[1]],\n mapping, dist1, dist2, dist_tol,\n compatible)\n\n if (s - c > score - cost) or (s - c == score - cost and s > score):\n score = s\n cost = c\n aln = [aln1[:], aln2[:]]\n\n return score, cost, aln",
"def _get_single_direction_neighbors(object_idx, ui_v_dist, ui_h_dist):\n neighbor_dict = {}\n vertical_dist = ui_v_dist[object_idx]\n horizontal_dist = ui_h_dist[object_idx]\n bottom_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] > 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n top_neighbors = np.array([\n idx for idx in range(len(vertical_dist)) if vertical_dist[idx] < 0 and\n abs(horizontal_dist[idx]) < config.NORM_HORIZONTAL_NEIGHBOR_MARGIN\n ])\n right_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] > 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n left_neighbors = np.array([\n idx for idx in range(len(horizontal_dist)) if horizontal_dist[idx] < 0 and\n abs(vertical_dist[idx]) < config.NORM_VERTICAL_NEIGHBOR_MARGIN\n ])\n\n if bottom_neighbors.size:\n neighbor_dict[NeighborContextDesc.TOP] = bottom_neighbors[np.argmin(\n vertical_dist[bottom_neighbors])]\n if top_neighbors.size:\n neighbor_dict[NeighborContextDesc.BOTTOM] = top_neighbors[np.argmax(\n vertical_dist[top_neighbors])]\n if right_neighbors.size:\n neighbor_dict[NeighborContextDesc.LEFT] = right_neighbors[np.argmin(\n horizontal_dist[right_neighbors])]\n if left_neighbors.size:\n neighbor_dict[NeighborContextDesc.RIGHT] = left_neighbors[np.argmax(\n horizontal_dist[left_neighbors])]\n\n return neighbor_dict",
"def _estimate_velocity_by_neigh(\n x_coords_metres, y_coords_metres, x_velocities_m_s01,\n y_velocities_m_s01, e_folding_radius_metres):\n\n if numpy.isnan(e_folding_radius_metres):\n neigh_radius_metres = numpy.inf\n else:\n neigh_radius_metres = 3 * e_folding_radius_metres\n\n orig_x_velocities_m_s01 = x_velocities_m_s01 + 0.\n orig_y_velocities_m_s01 = y_velocities_m_s01 + 0.\n\n nan_flags = numpy.logical_or(\n numpy.isnan(orig_x_velocities_m_s01),\n numpy.isnan(orig_y_velocities_m_s01)\n )\n nan_indices = numpy.where(nan_flags)[0]\n\n for this_index in nan_indices:\n if numpy.isnan(e_folding_radius_metres):\n these_neighbour_indices = numpy.where(numpy.invert(nan_flags))[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n x_velocities_m_s01[this_index] = numpy.mean(\n orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.mean(\n orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n continue\n\n these_x_diffs_metres = numpy.absolute(\n x_coords_metres[this_index] - x_coords_metres)\n these_y_diffs_metres = numpy.absolute(\n y_coords_metres[this_index] - y_coords_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_x_diffs_metres <= neigh_radius_metres,\n these_y_diffs_metres <= neigh_radius_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_neighbour_flags, numpy.invert(nan_flags)\n )\n\n these_neighbour_indices = numpy.where(these_neighbour_flags)[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n these_neighbour_dist_metres = numpy.sqrt(\n these_x_diffs_metres[these_neighbour_indices] ** 2 +\n these_y_diffs_metres[these_neighbour_indices] ** 2\n )\n\n these_neighbour_subindices = numpy.where(\n these_neighbour_dist_metres <= neigh_radius_metres\n )[0]\n if len(these_neighbour_subindices) == 0:\n continue\n\n these_neighbour_indices = these_neighbour_indices[\n these_neighbour_subindices]\n these_neighbour_dist_metres = these_neighbour_dist_metres[\n these_neighbour_subindices]\n\n these_weights = numpy.exp(\n -these_neighbour_dist_metres / e_folding_radius_metres\n )\n these_weights = these_weights / numpy.sum(these_weights)\n\n x_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n return x_velocities_m_s01, y_velocities_m_s01",
"def normalized_nearest_neighbors(flann, vecs2, K, checks=800):\n import vtool as vt\n if K == 0:\n (fx2_to_fx1, _fx2_to_dist_sqrd) = empty_neighbors(len(vecs2), 0)\n elif len(vecs2) == 0:\n (fx2_to_fx1, _fx2_to_dist_sqrd) = empty_neighbors(0, K)\n elif K > flann.get_indexed_shape()[0]:\n # Corner case, may be better to throw an assertion error\n raise MatchingError('not enough database features')\n #(fx2_to_fx1, _fx2_to_dist_sqrd) = empty_neighbors(len(vecs2), 0)\n else:\n fx2_to_fx1, _fx2_to_dist_sqrd = flann.nn_index(vecs2, num_neighbors=K, checks=checks)\n _fx2_to_dist = np.sqrt(_fx2_to_dist_sqrd.astype(np.float64))\n # normalized dist\n fx2_to_dist = np.divide(_fx2_to_dist, PSEUDO_MAX_DIST)\n fx2_to_fx1 = vt.atleast_nd(fx2_to_fx1, 2)\n fx2_to_dist = vt.atleast_nd(fx2_to_dist, 2)\n return fx2_to_fx1, fx2_to_dist",
"def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum",
"def advertise_route_to_neighbors(self, destination):\n distance_vector = self.hosts_to_ports[destination]\n self.handle_proper_packet(distance_vector.port, destination, distance_vector.latency, True)\n self.handle_poison_packet(distance_vector.port, destination)",
"def is_downstream(neighbour, cell, routing, direction_map):\n # Eliminate invalid cases\n if not is_valid_index(neighbour, routing.shape):\n return False\n cell_routing = routing[cell]\n # `neighbour` is downstream of `cell` if `cell`'s routing points at `neighbour`\n return vec_add(cell, direction_map[int(cell_routing)]) == neighbour",
"def add_neighbors(self, pos, distance, obstacles):\n \n neighbor_list = [(pos[0]-1,pos[1]), (pos[0]+1,pos[1]), \\\n (pos[0],pos[1]-1), (pos[0], pos[1]+1)]\n # Processing each neighbor.\n for (x,y) in neighbor_list:\n if x>=0 and y>=0 and x<self.M and y<self.N: # Out from boundary?\n if (x,y) not in obstacles:\n if (x,y) not in self.footprint: # Already in done list?\n new_distance = distance + 1 + self.heuristic_map[x,y]\n if (x,y) not in self.frontier.keys(): # A new candidate to add to frontier set.\n self.frontier.update({(x,y):new_distance})\n self.distance_map[x,y] = distance + 1\n self.camefrom_map[(x,y)] = pos\n elif new_distance < self.frontier[(x,y)]: # A short path reached this neighbor.\n self.frontier[(x,y)] = new_distance\n self.distance_map[x,y] = distance + 1\n self.camefrom_map[(x,y)] = pos",
"def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))",
"def does_path_intersect_obstacle_2d(self, obstacle, uav_point, waypoint):\n drone_point = uav_point[:-1]\n waypoint = waypoint[:-1]\n obstacle_point = obstacle.get_point()[:-1]\n\n waypoint_vector = np.subtract(waypoint, drone_point)\n obstacle_vector = np.subtract(obstacle_point, drone_point)\n obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)\n rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)\n rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)\n\n # Uncomment for DEBUGGING ONLY\n print(\"Waypoint Vector: \" + str(waypoint_vector))\n print(\"Obstacle Vector: \" + str(obstacle_vector))\n print(\"Rejection Vector: \" + str(rejection_vector))\n print(\"Rejection Vector Magnitude: \" + str(rejection_vector_magnitude))\n print(\"Obstacle Radius: \" + str(obstacle.get_radius()))\n print(\"Distance From Obstacle: \" + str(VectorMath.get_vector_magnitude(np.subtract(uav_point, obstacle.get_point()))))\n\n if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):\n return rejection_vector_magnitude < obstacle.get_radius()\n\n return False",
"def _update_valid_directions(self, valid_directions, velocity):\n # If not preventing backtracking, all open directions are valid\n if not self._prevent_backtracking:\n return\n axis = np.argmax(np.abs(velocity))\n direction = np.sign(velocity[axis])\n\n # If velocity is zero, all open directions are valid\n if direction == 0:\n return\n \n # If hit a wall and allow wall backtracking, all open directions are\n # valid\n can_continue = valid_directions[axis, int(0.5 * (1 + direction))]\n if not can_continue and self._allow_wall_backtracking:\n return\n # If not hit a wall and only turn at wall, then continue\n if can_continue and self._only_turn_at_wall:\n valid_directions.fill(0)\n valid_directions[axis, int(0.5 * (1 + direction))] = 1\n return\n\n # If none of the above conditions are true, prevent backtracking\n valid_directions[axis, int(0.5 * (1 - direction))] = False",
"def main(edges=[(0, 1, 3), (1, 3, 4), (2, 3, 3), (0, 2, 2) ], num=4):\n\n # initialize routers array\n routers = []\n for x in range(num):\n routers.append([1000] * num)\n routers[x][x] = 0\n \n # set distance to all neighbours \n for edge in edges:\n routers[edge[0]][edge[1]] = edge[2]\n routers[edge[1]][edge[0]] = edge[2]\n\n start_table = routers.copy()\n\n flag = True\n while flag:\n upflag = False\n for nbrs in edges:\n routers[nbrs[0]], up_flag1 = update_table(routers[nbrs[0]], routers[nbrs[1]], dist=nbrs[2])\n routers[nbrs[1]], up_flag2 = update_table(routers[nbrs[1]], routers[nbrs[0]], dist=nbrs[2])\n upflag = upflag or up_flag1 or up_flag2\n\n flag = upflag\n\n return start_table, routers",
"def eq(self, pos, vec1, vec2):\n leftobj = associate_comp(self.get_ground_vector('!Left:{}'.format(pos)), vec1)\n rightobj = associate_comp(self.get_ground_vector('!Right:{}'.format(pos)), vec2)\n result = normalize_comp(\n self.eq_weights @\n torch.cat([\n leftobj,\n rightobj,\n associate_comp(leftobj, rightobj),\n vec1,\n vec2,\n associate_comp(vec2, vec2),\n ]).reshape(-1, 2 * self.hrr_size)).reshape(2, self.hrr_size)\n\n return result",
"def eq(self, pos, vec1, vec2):\n leftobj = associate_comp(self.get_ground_vector('!Left:{}'.format(pos)), vec1)\n rightobj = associate_comp(self.get_ground_vector('!Right:{}'.format(pos)), vec2)\n result = normalize_comp(\n self.eq_weights @\n torch.cat([\n leftobj,\n rightobj,\n associate_comp(leftobj, rightobj),\n vec1,\n vec2,\n associate_comp(vec2, vec2),\n ]).reshape(-1, 2 * self.hrr_size)).reshape(2, self.hrr_size)\n\n return result",
"def linear_move(self, dist):\n\t\tglobal estop_flag, move_state\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\thalfway_flag = False\n\t\t\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\tcurrent_x = start_x\n\t\tcurrent_y = start_y\n\t\tcurrent_z = start_z\n\t\t#While the distance travelled is less than target distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the emergency stop flag is set, if so, break the current loop and reset velocity\t\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#If the distance goal is negative, move backward\n\t\t\t\tif dist < 0:\n\t\t\t\t\t#Send negative velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\t#Send positive velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\t\t\t\n\t\t#previously had while, finally block -> illegal syntax in python. Just moved to outside loop.\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)",
"def determineNextMove(player_location, opponentLocation, coins):\n global route, currentcoin, meta_route, best_weight, best_path, coins_to_search, index\n if opponentLocation in coins_to_search:\n coins_to_search, meta_route, route = change_way(coins, opponentLocation, player_location)[:3]\n index = 0\n elif currentcoin == player_location: \n if len(route) != 0:\n old_dist = algo.dijkstra(mazeMap, player_location)[1][meta_route[index+1]]\n coins_to_search2, meta_route2, route2, new_dist = change_way(coins, opponentLocation, player_location)\n\n #dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n #coins_to_search = get_n_shortest(3, coins, player_location, dists_matrix)\n \t\n #ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n #for c in coins_to_search:\n #if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n # coins_to_search.remove(c)\n #break\n \t\t\n #best_weight = float(\"inf\")\n #best_path = []\n #exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n #meta_route2 = [player_location] + best_path\n #route2 = u.location_list_to_route(meta_route2, route_matrix)\n #new_dist = dist_matrix[player_location][meta_route2[1]]\n\t\t\n if len(route) == 0 or old_dist - new_dist > 3:\n route = route2\n meta_route = meta_route2 \n index = 0\n index += 1\n currentcoin = meta_route[index]\n #api.debug(route)\n return u.direction(player_location, route.pop(0))"
] | [
"0.63939047",
"0.6258217",
"0.6063154",
"0.56720173",
"0.5632161",
"0.5608573",
"0.5462357",
"0.5388178",
"0.5371082",
"0.53031",
"0.52890694",
"0.5269303",
"0.5231124",
"0.5221327",
"0.52091473",
"0.52045894",
"0.5196824",
"0.51870656",
"0.5162876",
"0.5161015",
"0.5159955",
"0.51559657",
"0.5151766",
"0.51495785",
"0.51351213",
"0.5121236",
"0.51121986",
"0.51121986",
"0.5111707",
"0.5110856"
] | 0.7336643 | 0 |
Build chat window, set widgets positioning and event bindings | def build_window(self):
# Size config
self.root.geometry('{}x{}'.format(800, 450))
self.root.minsize(600, 400)
# create all of the main containers
self.left_frame = Frame(self.root, bg='red', width=150, height=450, pady=3)
self.right_frame = Frame(self.root, bg='blue', width=650, height=450, pady=3)
# layout all of the main containers
self.root.grid_rowconfigure(0, weight=1)
self.root.grid_columnconfigure(1, weight=1)
self.left_frame.grid(row=0,column=0,sticky='ns')
self.right_frame.grid(row=0,column=1,sticky='nswe')
# create all of the left containers
self.Username_Search_Frame = Frame(self.left_frame, bg='yellow', pady=3)
self.Username_Search_Frame.grid_rowconfigure(0, weight=1)
self.Username_Search_Frame.grid_columnconfigure(0, weight=1)
self.Username_label = Label(self.Username_Search_Frame, text=self.client.username)
self.Search_entry = Entry(self.Username_Search_Frame, text='Add people')
self.Search_entry.bind('<Return>', self.add_event)
self.Username_label.grid(row=0,column=0,sticky='nswe')
self.Search_entry.grid(row=1,column=0,sticky='nswe')
self.Show_Friend_request_Frame = Frame(self.left_frame, bg='red', pady=3)
self.Show_button = Button(self.Show_Friend_request_Frame, text='Chats')
self.Show_button.bind('<Button-1>', self.show_event)
self.Show_button_label = Label(self.Show_Friend_request_Frame, text='Chats')
self.Friend_request_button = Button(self.Show_Friend_request_Frame, text='Friend_request')
self.Friend_request_button.bind('<Button-1>', self.Friend_request_event)
self.Friend_request_button_label = Label(self.Show_Friend_request_Frame, text='Friend_request')
self.Show_button_label.pack(side=LEFT, fill=BOTH, expand=YES)
self.Friend_request_button.pack(side=LEFT, fill=BOTH, expand=YES)
self.logins_list_Frame = Frame(self.left_frame, bg='green', pady=3)
self.logins_list_Frame.grid_rowconfigure(0, weight=1)
self.logins_list_Frame.grid_columnconfigure(0, weight=1)
self.logins_list = Listbox(self.logins_list_Frame, selectmode=SINGLE, exportselection=False)
self.logins_list.bind('<<ListboxSelect>>', self.selected_login_event)
self.logins_list.pack(side=LEFT, fill=BOTH, expand=YES)
self.friend_request_list = Listbox(self.logins_list_Frame, selectmode=SINGLE, exportselection=False)
self.friend_request_list.bind('<<ListboxSelect>>', self.select_friend_request)
#self.friend_request_list.pack(side=LEFT, fill=BOTH, expand=YES)
self.Username_Search_Frame.grid(row=0,column=0,sticky='nswe')
self.Show_Friend_request_Frame.grid(row=1,column=0,sticky='nswe')
self.logins_list_Frame.grid(row=2,column=0,sticky='nswe')
self.left_frame.grid_rowconfigure(2, weight=1)
self.left_frame.grid_columnconfigure(0, weight=1)
# create all of the right containers
self.Target_name_frame = Frame(self.right_frame, bg='yellow', pady=3)
self.Target_name_frame.grid_rowconfigure(0, weight=1)
self.Target_name_frame.grid_columnconfigure(0, weight=1)
self.Target = Label(self.Target_name_frame, text='Target_name')
self.Target.grid(row=0,column=0,sticky='nswe')
self.Message_box_frame = Frame(self.right_frame, bg='black', pady=3)
self.message_list = Message_list(self.Message_box_frame)
self.message_list.show()
self.Entry_frame = Frame(self.right_frame, bg='grey', height=100, pady=3)
self.Entry_frame.grid_rowconfigure(0, weight=1)
self.Entry_frame.grid_columnconfigure(0, weight=1)
self.Entry = Text(self.Entry_frame)
self.Entry.bind('<Return>', self.send_entry_event)
self.Entry.grid(row=0,column=0,sticky='nswe')
self.Send_file_button = Button(self.right_frame, text='Send file')
self.Send_file_button.bind('<Button-1>', self.send_file_event)
self.Send_file_button.grid(row=3,column=0,sticky='nswe')
self.Target_name_frame.grid(row=0,column=0,sticky='nswe')
self.Message_box_frame.grid(row=1,column=0,sticky='nswe')
self.Entry_frame.grid(row=2,column=0,sticky='nswe')
self.right_frame.grid_rowconfigure(1, weight=1)
self.right_frame.grid_columnconfigure(0, weight=1)
self.right_frame.grid_rowconfigure(2, weight=4)
self.right_frame.grid_columnconfigure(0, weight=1)
self.root.protocol("WM_DELETE_WINDOW", self.on_closing_event) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def chat_window(window, chat_lines, write_box):\n for i in xrange(25):\n chat_lines[i] = Entry(Point(130,245-(i*9)),80)\n chat_lines[i].draw(window)\n chat_lines[i].setFill(\"white\")\n write_box.draw(window) # draw it to the window\n help(chat_lines)",
"def new_window_messages(self, button_see_all_msgs):\r\n # changing the button command to closing the window\r\n button_see_all_msgs.config(command=lambda: self.close_window(button_see_all_msgs))\r\n\r\n # creating the chat Tk object\r\n self.messages_window = Tk()\r\n self.messages_window.resizable(False, False)\r\n self.messages_window.config(bg=self.bg_color)\r\n self.messages_window.protocol(\"WM_DELETE_WINDOW\",\r\n lambda: self.close_window(button_see_all_msgs))\r\n\r\n chat_label = Label(self.messages_window, text=\"Hello \" + self.username +\r\n \"\\nHere are your messages\",\r\n bg=self.bg_color, font=self.title_font)\r\n chat_label.pack(padx=20, pady=10)\r\n chat_frame = Frame(self.messages_window)\r\n chat_frame.pack(padx=15, pady=15)\r\n scrollbar_chat = Scrollbar(chat_frame)\r\n scrollbar_chat.pack(side=RIGHT, fill=Y)\r\n text_chat = Text(chat_frame, width=30, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_chat.set)\r\n text_chat.pack()\r\n scrollbar_chat.config(command=text_chat.yview)\r\n for msg, encryption_data, sender_user in self.msg_list:\r\n text_chat.insert(END, \"from: \" + sender_user + \"\\n\")\r\n text_chat.insert(END, msg + \"\\n\\n\")\r\n text_chat.config(state=DISABLED)",
"def __init__(self, master, Client):\n super(Chat, self).__init__(master)\n self.grid()\n self.create_widgets()\n master.grid_propagate(True)\n master.grid_rowconfigure(0, weight = 1)\n master.grid_columnconfigure(0, weight = 1)\n self.Client = Client\n threading.Thread(target = self.listen).start()",
"def createWidgets(self):\r\n top = self.winfo_toplevel()\r\n top.rowconfigure(0, weight=1)\r\n top.columnconfigure(0, weight=1)\r\n self.rowconfigure(0, weight=1)\r\n self.columnconfigure(0, weight=1) \r\n\r\n self.button_quit = tk.Button(self, text='Quit', command=self.quit)\r\n self.button_quit.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)",
"def create_widgets(self):\n # self.var_spherical = IntVar()\n # self.var_3d = IntVar()\n # self.var_spatial_audio = IntVar()\n # self.button_open[\"command\"] = self.action_open\n # self.button_inject[\"command\"] = self.action_inject\n pass",
"def __set_properties(self): \n self.SetTitle(str(self.friend))\n self.SetSize((653, 467))\n self.chat_log.SetMinSize((635, 400))\n self.text_send.SetMinSize((635, -1))\n self.text_send.SetFocus()",
"def build_frames(self):\n self.cntrl_frame = tk.PanedWindow(self.root)\n self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)\n self.info_frame_1 = tk.PanedWindow(self.root)\n self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y)",
"def build_window(self):\n\n main_frame = tk.Frame(self.root)\n main_frame.pack(fill='both')\n\n self.open_machine_learner_window_button = tk.Button(main_frame, text=\"Open Machine Learner\")\n self.open_machine_learner_window_button.bind('<Button-1>', self.open_machine_learner_window)\n self.open_machine_learner_window_button.pack(side=\"left\")\n\n self.open_web_crawler_window_button = tk.Button(main_frame, text=\"Open Web Crawler\")\n self.open_web_crawler_window_button.bind('<Button-1>', self.open_web_crawler_window)\n self.open_web_crawler_window_button.pack(side=\"left\")\n\n self.open_webpage_classifier_window_button = tk.Button(main_frame, text=\"Open WebPage Classifier\")\n self.open_webpage_classifier_window_button.bind('<Button-1>', self.open_webpage_classifier_window)\n self.open_webpage_classifier_window_button.pack(side=\"left\")\n\n self.run_steady_state_genetic_button = tk.Button(main_frame, text=\"Run Steady State\")\n self.run_steady_state_genetic_button.bind('<Button-1>', self.run_steady_state)\n self.run_steady_state_genetic_button.pack(side=\"left\")\n\n # Protocol for closing window using 'x' button\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing_event)",
"def __init__(self, controller):\n self.controller = controller\n\n self.top = tkinter.Tk()\n self.top.title(\"Chatter\")\n\n self.input = tkinter.StringVar() # For the messages to be sent.\n self.input.set(\"\")\n\n \"\"\"Message box\"\"\"\n messages_frame = tkinter.Frame(self.top)\n scrollbar = tkinter.Scrollbar(messages_frame)\n self.msg_list = tkinter.Listbox(messages_frame, height=15, width=60, yscrollcommand=scrollbar.set)\n scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)\n self.msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\n self.msg_list.pack()\n messages_frame.pack()\n\n\n\n\n \"\"\"Input box and send button\"\"\"\n entry_field = tkinter.Entry(self.top, textvariable=self.input)\n entry_field.bind(\"<Return>\", self.controller.msg_to_send)\n entry_field.pack()\n send_button = tkinter.Button(self.top, text=\"Send\", command=lambda: self.controller.msg_to_send(self.input))\n send_button.pack()\n \"\"\"Send file button\"\"\"\n file_button = tkinter.Button(self.top, text=\"Send file\", command=lambda: self.controller.find_file())\n file_button.pack()\n\n \"\"\"On closing the window\"\"\"\n self.top.protocol(\"WM_DELETE_WINDOW\", self.controller.close)",
"def _init_widgets(self):\n # Container frame\n self.container = Frame(self)\n # Workspace block\n self.main_container = Frame(self.container)\n\n self.text = Label(self.main_container)\n self.text.config(text=\"PyEventLogViewer is a timeline-based tool used to simplify the way\\n\"\n \"a user can view and explore Windows EVTX files. To begin using this\\n\"\n \"software you must do the following:\\n\\n\"\n \"\\t1) File → New → 'Create a new project'\\n\"\n \"\\t2) Tools → Import Log File → 'Open a specified EVTX file'\\n\"\n \"\\t3) Explore the presented timeline.\\n\"\n \"\\t4) Double-click a specific record to view the XML data for that record.\\n\"\n \"\\t5) File → Export → 'Generate a CSV or HTML file for timeline presentation.'\\n\\n\"\n \"At this point, only System and Security EVTX files are parsable with this software.\")\n\n self.show_var = BooleanVar()\n self.show_check = Checkbutton(self.main_container, text=\"Don't Show on Startup\", variable=self.show_var)\n\n # Action block\n self.button_ok = Button(self.main_container, text='Ok', underline=0, command=self.callback_close)\n self.bind('<Return>', self.callback_close)\n self.bind('<Escape>', self.callback_close)\n\n # Focus on window - required for binds to work.\n self.focus_set()",
"def _make_message_frame( self, parent, default_scroll = True ):\n# color = \"black\" # this may need a bit of rework -- looks like not used\n #iframe = Tk.Frame( parent, width=300, height=800,\n # bg =\"blue\", relief = Tk.RAISED, borderwidth=1, )\n iframe = self\n\n # bframe is for the buttons on the left\n bframe = Tk.Frame( iframe, bg = \"white\", width=30 )\n # width=300, height=800, bg =\"blue\", relief=RAISED, borderwidth=1, )\n bframe.grid( row=0, column=0, sticky = Tk.N + Tk.S )\n\n text0 = Tk.Text( iframe , width=50, height=20 )\n\n s_text0 = Tk.Scrollbar( iframe )\n s_text0.grid( row=0, column=2, sticky = Tk.N + Tk.S )\n\n s_text0.config( command=text0.yview )\n text0.config( yscrollcommand=s_text0.set )\n\n text0.grid( row=0, column=1, sticky = Tk.N + Tk.S + Tk.E + Tk.W )\n\n self.msg_text = text0\n\n iframe.grid_columnconfigure( 1, weight=1 )\n iframe.grid_rowconfigure( 0, weight=1 )\n\n # now into the button frame bframe\n\n # spacer\n s_frame = Tk.Frame( bframe, bg =\"green\", height=20 ) # width=30 )\n s_frame.grid( row=0, column=0 )\n row_ix = 0\n\n # ---- Clear button\n b_clear = Tk.Button( bframe , width=10, height=2, text = \"Clear\" )\n b_clear.bind( \"<Button-1>\", self.do_clear_button )\n if self.gui_style:\n self.gui_style.style_button( b_clear )\n b_clear.grid( row=row_ix, column=0 )\n\n self.button_widgets.append( b_clear )\n row_ix += 1\n\n # ---- Copy selection\n a_widget = Tk.Button( bframe , width=10, height=2, text = \"Cop Selection\",\n command = self.copy_selection)\n # b_temp.bind( \"<Button-1>\", self.doButtonText )\n if self.gui_style:\n self.gui_style.style_button( a_widget )\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n row_ix += 1\n\n #-----\n a_widget = Tk.Button( bframe , width=10, height=2, text = \"Copy All\" )\n a_widget.bind( \"<Button-1>\", self.do_copy_button )\n if self.gui_style:\n self.gui_style.style_button( a_widget )\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n row_ix += 1\n\n # -------------\n self.cb_scroll_var = Tk.IntVar() # for check box in reciev frame\n a_widget = Tk.Checkbutton( bframe,\n width = 7,\n height = 2,\n text = \"A Scroll\",\n variable = self.cb_scroll_var,\n command = self.do_auto_scroll )\n\n a_widget.grid( row=row_ix, column=0 )\n self.button_widgets.append( a_widget )\n\n row_ix += 1\n self.cb_scroll_var.set( default_scroll ) # was AppGlobal.parameters.default_scroll )\n\n return iframe",
"def initGUI(self):\n\n\t\t# Set main frame's location \n\t\tself.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\t# Set path entry frame and its location\n\t\tself.entryFrame = Frame(self, relief = RAISED, borderwidth = 1)\n\t\tself.entryFrame.pack(fill = BOTH, expand = False)\n\t\t# Make label\n\t\tif self.message:\n\t\t\tmessageLabel = Label(self.entryFrame, text = self.message, font=(\"Bradley\", 10))\n\t\t\tmessageLabel.pack(anchor=W, padx=0, pady=0)\n\n\t\t# Set path entry and its location\n\t\tself.filePathEntry = Entry(self.entryFrame, bd = 4, width = 50)\n\t\tself.filePathEntry.pack(side = LEFT, padx=2, pady=1)",
"def makeWidgets(self):\r\n self._frame = tk.Frame(self, relief=tk.RAISED, borderwidth=1)\r\n self._frame.pack(fill=tk.BOTH, expand=1)\r\n\r\n self.pack(fill=tk.BOTH, expand=1)\r\n\r\n self._frame._label1 = tk.Label(self._frame, text='----File Name----')\r\n self._frame._label1.pack(fill=tk.X, expand=tk.NO, pady=1, padx=2)\r\n self._frame._entry = tk.Entry(self._frame)\r\n self._frame._entry.pack(pady=2, padx=2)\r\n\r\n self._frame._label0 = tk.Label(self._frame, textvariable=self.timestr)\r\n self._setTime(self._elapsedtime)\r\n self._frame._label0.pack(fill=tk.X, expand=tk.NO, pady=3, padx=2)\r\n\r\n self._frame._label2 = tk.Label(self._frame, text='----Laps----')\r\n self._frame._label2.pack(fill=tk.X, expand=tk.NO, pady=4, padx=2)\r\n\r\n self._frame._scrollbar = tk.Scrollbar(self._frame, orient=tk.VERTICAL)\r\n self._frame._listbox = tk.Listbox(self._frame, selectmode=tk.EXTENDED, height=10,\r\n yscrollcommand=self._frame._scrollbar.set)\r\n self._frame._listbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, pady=5, padx=2)\r\n self._frame._scrollbar.config(command=self._frame._listbox.yview)\r\n self._frame._scrollbar.pack(side=tk.RIGHT, fill=tk.Y)",
"def create_widgets( self ):",
"def initGUI(self):\n\n\t\t# Set window's title\n\t\tself.parent.title(\"Error Message\")\n\t\t# Creat frames that contain messages and buttons \n\t\tself.buttonFrame = Frame(self.parent)\n\t\tself.buttonFrame.pack(fill = BOTH, expand = True)\n\t\tmessageFrame = Frame(self.buttonFrame, borderwidth = 1)\n\t\tmessageFrame.pack(fill = BOTH, expand = True)\n\t\t# Creat buttons\n\t\tself.makeButtons()\n\t\t# Create and show an error message as an label\n\t\tvar = StringVar()\n\t\tlabel = Message(messageFrame, textvariable=var, relief=RAISED, width = 1000)\n\t\tvar.set(self.message)\n\t\tlabel.pack(fill = BOTH, expand = True)",
"def createWidgets(self):\n raise NotImplementedError",
"def build_window(self):\n\n form_frame = tk.Frame(self.window)\n form_frame.pack(fill='x')\n\n tk.Label(form_frame, text=\"Url\").grid(row=0, column=0, sticky='W')\n url = tk.StringVar()\n self.url_entry = tk.Entry(form_frame, textvariable=url)\n self.url_entry.grid(row=0, column=1, sticky='W')\n\n tk.Label(form_frame, text=\"Depth\").grid(row=1, column=0, sticky='W')\n depth = tk.IntVar()\n self.depth_entry = tk.Entry(form_frame, textvariable=depth)\n self.depth_entry.grid(row=1, column=1, sticky='W')\n\n self.submit_button = tk.Button(form_frame, text=\"Submit\")\n self.submit_button.bind('<Button-1>', self.send_entry_event)\n self.submit_button.grid(row=2, column=0, columnspan=1, sticky='W')\n\n url_list_frame = tk.Frame(self.window)\n url_list_frame.pack(fill='x')\n\n self.url_list = tk.Listbox(url_list_frame, selectmode=tk.SINGLE, font=self.font,\n exportselection=False)\n self.url_list.bind('<<ListboxSelect>>', self.selected_url_event)\n self.url_list.pack(fill=tk.BOTH, expand=tk.YES)",
"def create_widgets(self):\r\n self.create_containers()\r\n self.setup_containers()\r\n self.create_panel_widgets()\r\n self.setup_scrollbar()",
"def widgets(self):\r\n self.setWindowTitle(\"PyCrypt\")\r\n self.setMinimumSize(QSize(500, 500))\r\n self.setMaximumSize(QSize(500, 500))\r\n# Adding the sub def for widgets etc\r\n self.add_menus_and_status()\r\n self.add_buttons()",
"def create_widgets(self):",
"def __init__(self):\n self.stdscr = curses.initscr()\n self.client = None\n self.max_y, self.max_x = self.stdscr.getmaxyx()\n self.chat_container = curses.newwin(self.max_y - 2, self.max_x, 1, 0)\n self.chat_win = self.chat_container.subwin(self.max_y - 3, self.max_x - 4, 2, 2)\n self.prompt_win = curses.newwin(1, self.max_x, self.max_y - 1, 0)\n self.setup()",
"def send_messages(self):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username,\r\n font=self.title_font, bg=self.bg_color, height=2)\r\n user_label.pack(pady=10, padx=50)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=10)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n write_message = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n write_message.pack()\r\n scrollbar_msg.config(command=write_message.yview)\r\n button_speech_rec = Button(self.root, text=\"listen\\nto speech\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.create_speech_thread(write_message))\r\n button_speech_rec.pack(pady=10)\r\n button_send = Button(self.root, text=\"send\", font=self.text_font,\r\n height=2, width=20, command=lambda: self.send(write_message))\r\n button_send.pack(pady=10)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.choose_path)\r\n button_send.pack(pady=10)",
"def setup(self):\n curses.curs_set(1)\n curses.noecho()\n curses.cbreak()\n # Keypad disabled until scrolling properly implemented\n # self.stdscr.keypad(True)\n self.stdscr.clear()\n self.stdscr.addstr(\"SecureChat v{}\".format(__version__))\n self.chat_container.box()\n self.chat_win.addstr(\"Welcome to SecureChat!\")\n self.chat_win.scrollok(True)\n self.chat_win.setscrreg(0, self.max_y - 5)\n self.prompt_win.addstr(\"> \")\n self.refresh_all()",
"def create_widgets(self):\n self.info_input = tk.Label(self)\n self.info_input['text'] = 'Paste emails, one per line and click \"Encrypt\" to see results below:'\n self.info_input['pady'] = 10\n self.info_input.grid(row=0, columnspan=3)\n\n self.input = tk.Text(self)\n self.input['height'] = 10\n self.input.grid(row=1, columnspan=3)\n\n self.info_output = tk.Label(self)\n self.info_output['text'] = 'Encrypted mails:'\n self.info_output['pady'] = 10\n self.info_output.grid(row=2, columnspan=3)\n\n self.output = tk.Text(self)\n self.output['height'] = 10\n self.output.grid(row=3, columnspan=3)\n\n self.btn_submit = tk.Button(self)\n self.btn_submit['text'] = 'Encrypt'\n self.btn_submit['command'] = self.hash_input\n self.btn_submit.grid(row=4, column=0)\n\n self.btn_save_as = tk.Button(self)\n self.btn_save_as['text'] = 'Save to file'\n self.btn_save_as['command'] = self.save_to_file\n self.btn_save_as.grid(row=4, column=1)\n\n self.btn_quit = tk.Button(self)\n self.btn_quit['text'] = 'Quit'\n self.btn_quit['command'] = self.master.destroy\n self.btn_quit.grid(row=4, column=2)",
"def create_widgets(self):\n self.Hi = Button(self, text= \"hi\", fg=\"red\", command=self.say_hi)\n self.Hi.pack({\"side\": \"left\"})\n # quit with out () means return not call\n self.Quit = Button(self, text=\"Goodbye\", fg=\"blue\", command=self.quit)\n self.Quit.pack({\"side\": \"left\"})",
"def create_widgets(self):\n self.pack(fill=tk.BOTH, expand=True)\n self.top_frame = tk.Frame(self)\n self.top_frame.pack(fill=tk.X, expand=False)\n\n # Create obstacle button\n self.create_obstacle_button = tk.Button(\n self.top_frame,\n text=self.OBSTACLE_CREATION_INACTIVE_LABEL,\n command=self._toggle_creation_mode_cb\n )\n self.create_obstacle_button.pack(side=tk.LEFT)\n\n # Load button\n self.load_button = tk.Button(\n self.top_frame,\n text=self.LOAD_BUTTON_LABEL,\n command=self._load_button_cb\n )\n self.load_button.pack(side=tk.LEFT)\n\n # Export button\n export_button = tk.Button(\n self.top_frame,\n text=self.EXPORT_BUTTON_LABEL,\n command=self._export_button_cb\n )\n export_button.pack(side=tk.RIGHT)\n\n # Main canvas\n self.canvas = tk.Canvas(self, background='white')\n self.canvas.config(width=self.CANVAS_WIDTH, height=self.CANVAS_HEIGHT)\n self.canvas.bind('<ButtonRelease-1>', self._draw_line)\n self.canvas.pack(fill=tk.BOTH, expand=True)\n self.canvas.focus_set()",
"def create_widgets(self, root):\n\n self.widgets['Main Frame'] = Frame(self.widgets['Login Window'], borderwidth=20)\n self.widgets['Main Frame'].pack(expand=YES, fill=BOTH)\n\n Label(self.widgets['Main Frame'], text='Enter password:').pack(side=TOP, expand=YES)\n\n self.widgets['Input Frame'] = Frame(self.widgets['Main Frame'])\n self.widgets['Input Frame'].pack(side=TOP, expand=YES, fill=BOTH)\n\n self.widgets['Password Entry'] = Entry(self.widgets['Input Frame'], show=\"*\")\n self.widgets['Password Entry'].focus_set() # place cursor in entry\n self.widgets['Password Entry'].pack(side=LEFT, expand=YES)\n\n self.widgets['Enter Button'] = Button(self.widgets['Input Frame'], text=\"Enter\",\n command=lambda: self.verify_password_input(root))\n self.widgets['Enter Button'].pack(side=RIGHT, expand=YES)\n\n self.widgets['Login Window'].bind(\"<Return>\", lambda e: self.widgets['Enter Button'].invoke())\n\n self.widgets['Message'] = Label(self.widgets['Main Frame'], width=50, wraplength=300)",
"def build_window(self):\n # Size config\n self.window.geometry('750x500')\n self.window.minsize(600, 400)\n\n main_frame = tk.Frame(self.window)\n main_frame.pack(fill=\"both\")\n\n top_frame = tk.Frame(main_frame)\n top_frame.pack(side=\"top\", fill=\"x\")\n\n tk.Label(top_frame, text=\"Enter a URL to classify\").pack(side=\"top\")\n\n webpage_classifier_form_frame = tk.Frame(top_frame)\n webpage_classifier_form_frame.pack(side=\"top\")\n\n tk.Label(webpage_classifier_form_frame, text=\"URL\").pack(side=\"left\")\n url = tk.StringVar()\n self.url_entry = tk.Entry(webpage_classifier_form_frame, textvariable=url)\n self.url_entry.pack(side=\"right\")\n\n\n self.add_webpages_to_dataset_button = tk.Button(top_frame, text=\"Add Webpages to Dataset\")\n self.add_webpages_to_dataset_button.bind('<Button-1>', self.add_webpages_to_dataset)\n self.add_webpages_to_dataset_button.pack(side=\"bottom\")\n\n self.balance_dataset_button = tk.Button(top_frame, text=\"Balance Dataset\")\n self.balance_dataset_button.bind('<Button-1>', self.balance_dataset)\n self.balance_dataset_button.pack(side=\"bottom\")\n\n self.submit_button = tk.Button(top_frame, text=\"Scrape Site\")\n self.submit_button.bind('<Button-1>', self.scrape_site)\n self.submit_button.pack(side=\"bottom\")\n\n\n\n bottom_frame = tk.Frame(main_frame)\n bottom_frame.pack(side=\"bottom\", fill=\"x\")\n\n # ScrolledText widget for displaying messages\n self.messages_list = scrolledtext.ScrolledText(bottom_frame, wrap='word', font=self.font)\n self.messages_list.configure(state='disabled')\n self.messages_list.pack(fill=\"x\")",
"def __init__(self, user, passw, friend, *args, **style):\n style[\"style\"] = wx.DEFAULT_FRAME_STYLE\n wx.Frame.__init__(self, *args, **style)\n self.chat_log = wx.TextCtrl(self, -1, \"\", style=wx.TE_MULTILINE | wx.TE_READONLY)\n self.text_send = wx.TextCtrl(self, -1, \"\")\n self.user=user\n self.passw=passw\n self.friend=friend\n self.__set_properties()\n self.__do_layout()\n self.__set_chatlog(0)\n self.Bind(wx.EVT_TEXT_ENTER, self.text_e, self.text_send)\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n Publisher().subscribe(self.__set_chatlog, \"updatetext\")",
"def setup_gui(root_window, mqtt_client):\n frame = ttk.Frame(root_window, padding=35, relief='groove')\n frame.grid()\n\n speed_entry_box = ttk.Entry(frame)\n go_forward_button = ttk.Button(frame, text=\"Go forward\")\n follow_path_button = ttk.Button(frame, text='follow path')\n progress_bar = ttk.Progressbar(frame, length=200)\n path_by_color_button = ttk.Button(frame, text='path by color')\n find_nearest_object_button = ttk.Button(frame, text='find nearest object')\n\n progress_bar.grid()\n speed_entry_box.grid()\n go_forward_button.grid()\n follow_path_button.grid()\n path_by_color_button.grid()\n find_nearest_object_button.grid()\n\n go_forward_button['command'] = \\\n lambda: handle_go_forward(speed_entry_box, mqtt_client)\n\n follow_path_button['command'] = \\\n lambda: handle_follow_path(mqtt_client)\n\n path_by_color_button['command'] = \\\n lambda: handle_path_by_color(mqtt_client)\n\n find_nearest_object_button['command'] = \\\n lambda: handle_find_nearest(mqtt_client)\n\n return progress_bar"
] | [
"0.6898628",
"0.6870365",
"0.68484443",
"0.6702743",
"0.6519873",
"0.6475004",
"0.6413773",
"0.6409881",
"0.6383972",
"0.63573664",
"0.6306729",
"0.6292875",
"0.6283318",
"0.6264044",
"0.62494653",
"0.6241723",
"0.62238955",
"0.6218888",
"0.62177086",
"0.6195311",
"0.61767215",
"0.61725867",
"0.6159563",
"0.61535007",
"0.6144078",
"0.6125745",
"0.61192",
"0.6055976",
"0.60434306",
"0.6016322"
] | 0.7285587 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.